filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
fetch_video_list.py
|
# -*- coding: utf8 -*-
import os
import sys
import json
import sqlite3
import googleapiclient.discovery
import googleapiclient.errors
import google_auth_oauthlib
class VideoInfo():
'''
Attributions: video_id, title, upload_date
'''
def __init__(self, video_id: str, title: str, upload_date: str) -> None:
'''
args: video_id (str), title (str), upload_date (str);
Store video info in a VideoInfo object
'''
self.video_id = video_id
self.title = title
self.upload_date = upload_date
def retrieve_video_info(video_dict: dict):
'''
video_dict: a single video's information diction getting from youtube playlist item api
Return a VideoInfo object
'''
video_info = VideoInfo(
video_id=video_dict["snippet"]["resourceId"]["videoId"],
title=video_dict["snippet"]["title"],
upload_date=video_dict["snippet"]["publishedAt"]
)
return video_info
def fetch_uploaded_list(channel_id: str):
'''
Args: channel_id (str)
Fetching uploaded video list from youtube for a given channel
'''
# set api credentials
scopes = ['https://www.googleapis.com/auth/youtube.force-ssl']
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "0"
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "secrets.json"
refresh_token_file = "refresh_token.json"
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(client_secrets_file, scopes)
# if no refresh token, request brand new token set
if not os.path.isfile('refresh_token.json'):
credentials = flow.run_console()
with open('refresh_token.json', 'w') as f:
json.dump(credentials.refresh_token, f)
# if there is refresh token, use it to get new access token
else:
with open(client_secrets_file) as f:
client_info = json.load(f)
client_id = client_info["installed"]["client_id"]
with open(refresh_token_file) as f:
refresh_token = json.load(f)
flow.oauth2session.refresh_token(flow.client_config['token_uri'], refresh_token=refresh_token, client_id=client_id, client_secret=flow.client_config['client_secret'])
credentials = google_auth_oauthlib.helpers.credentials_from_session(flow.oauth2session, flow.client_config)
# create api client
youtube = googleapiclient.discovery.build(api_service_name, api_version, credentials=credentials)
try:
# connect to db and create table for video list if not existing
conn = sqlite3.connect('archive.db')
cur = conn.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS video_list (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, video_id TEXT NOT NULL UNIQUE, title TEXT NOT NULL, date TEXT NOT NULL, channel_id TEXT NOT NULL, upload_idx INTEGER NOT NULL DEFAULT 0)')
# retrieve local video list
cur.execute('SELECT id, video_id FROM video_list WHERE channel_id=? ORDER BY upload_idx DESC', (channel_id, ))
existing_video_list = cur.fetchall()
# get channel detail => upload video playlist
request = youtube.channels().list(
id = channel_id,
part = "contentDetails",
maxResults = 1
)
response = request.execute()
uploads_id = response['items'][0]['contentDetails']['relatedPlaylists']['uploads']
# get detail of videos inside upload playlist
request = youtube.playlistItems().list(
part="snippet",
playlistId=uploads_id,
maxResults=50,
)
# keep ask for next page if there is more results and not fetched
all_new_fetched = False
while request:
response = request.execute()
for single_video in response["items"]:
if single_video['snippet']['resourceId']['kind'] == 'youtube#video':
single_video_info = retrieve_video_info(single_video)
# if returned video info match existing video info, stop
if single_video_info.video_id in [i[1] for i in existing_video_list]:
all_new_fetched = True
break
cur.execute('INSERT INTO video_list (video_id, title, date, channel_id) VALUES (?, ?, ?, ?)', (single_video_info.video_id, single_video_info.title, single_video_info.upload_date, channel_id))
if all_new_fetched:
break
request = youtube.playlistItems().list_next(request, response)
cur.execute('SELECT id FROM video_list WHERE channel_id=? ORDER BY date', (channel_id, ))
id_by_date = cur.fetchall()
for upload_idx in range(len(id_by_date)):
cur.execute('UPDATE video_list SET upload_idx=? WHERE id=?', (upload_idx+1, id_by_date[upload_idx][0]))
except:
raise
finally:
cur.close()
conn.commit()
conn.close()
def fetch_all():
try:
conn = sqlite3.connect('archive.db')
cur = conn.cursor()
# get channel list
cur.execute('SELECT channel_id FROM channel_list')
searched_channel_list = cur.fetchall()
if not searched_channel_list:
raise NameError('No valid channel data')
channel_list = [i[0] for i in searched_channel_list]
for channel_id in channel_list:
fetch_uploaded_list(channel_id)
return 0
except:
raise
finally:
cur.close()
conn.commit()
conn.close()
def main():
channel_id = sys.argv[1]
fetch_uploaded_list(channel_id)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"OAUTHLIB_INSECURE_TRANSPORT"
] |
[]
|
["OAUTHLIB_INSECURE_TRANSPORT"]
|
python
| 1 | 0 | |
internal/praefect/datastore/glsql/postgres_test.go
|
// +build postgres
package glsql
import (
"bytes"
"context"
"database/sql"
"errors"
"os"
"strconv"
"strings"
"testing"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitaly/internal/praefect/config"
"gitlab.com/gitlab-org/gitaly/internal/testhelper"
)
func TestOpenDB(t *testing.T) {
getEnvFromGDK(t)
dbCfg := config.DB{
Host: os.Getenv("PGHOST"),
Port: func() int {
pgPort := os.Getenv("PGPORT")
port, err := strconv.Atoi(pgPort)
require.NoError(t, err, "failed to parse PGPORT %q", pgPort)
return port
}(),
DBName: "postgres",
SSLMode: "disable",
}
t.Run("failed to ping because of incorrect config", func(t *testing.T) {
badCfg := dbCfg
badCfg.Host = "not-existing.com"
_, err := OpenDB(badCfg)
require.Error(t, err, "opening of DB with incorrect configuration must fail")
})
t.Run("connected with proper config", func(t *testing.T) {
db, err := OpenDB(dbCfg)
require.NoError(t, err, "opening of DB with correct configuration must not fail")
require.NoError(t, db.Close())
})
}
func TestTxQuery_MultipleOperationsSuccess(t *testing.T) {
db := getDB(t)
defer createBasicTable(t, db, "work_unit_test")()
ctx, cancel := testhelper.Context()
defer cancel()
const actions = 3
txq := NewTxQuery(context.TODO(), nil, db.DB)
defer func() {
var err error
txq.Done(&err)
require.NoError(t, err)
db.RequireRowsInTable(t, "work_unit_test", actions)
}()
for i := 0; i < actions; i++ {
require.True(
t,
txq.Exec(ctx, func(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, "INSERT INTO work_unit_test VALUES (DEFAULT)")
return err
}),
"expects row to be inserted",
)
}
}
func TestTxQuery_FailedOperationInTheMiddle(t *testing.T) {
db := getDB(t)
defer createBasicTable(t, db, "work_unit_test")()
ctx, cancel := testhelper.Context()
defer cancel()
txq := NewTxQuery(ctx, nil, db.DB)
defer func() {
var err error
txq.Done(&err)
require.EqualError(t, err, `pq: syntax error at or near "BAD"`, "expects error because of the incorrect SQL statement")
db.RequireRowsInTable(t, "work_unit_test", 0)
}()
require.True(t,
txq.Exec(ctx, func(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, "INSERT INTO work_unit_test(id) VALUES (DEFAULT)")
return err
}),
"expects row to be inserted",
)
require.False(t,
txq.Exec(ctx, func(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, "BAD OPERATION")
return err
}),
"the SQL statement is not valid, expects to be reported as failed",
)
require.False(t,
txq.Exec(ctx, func(ctx context.Context, tx *sql.Tx) error {
t.Fatal("this func should not be called")
return nil
}),
"because of previously failed SQL operation next statement expected not to be run",
)
}
func TestTxQuery_ContextHandled(t *testing.T) {
db := getDB(t)
defer createBasicTable(t, db, "work_unit_test")()
ctx, cancel := testhelper.Context()
defer cancel()
txq := NewTxQuery(ctx, nil, db.DB)
defer func() {
var err error
txq.Done(&err)
require.EqualError(t, err, "context canceled")
db.RequireRowsInTable(t, "work_unit_test", 0)
}()
require.True(t,
txq.Exec(ctx, func(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, "INSERT INTO work_unit_test(id) VALUES (DEFAULT)")
return err
}),
"expects row to be inserted",
)
cancel() // explicit context cancellation to simulate situation when it is expired or cancelled
require.False(t,
txq.Exec(ctx, func(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, "INSERT INTO work_unit_test(id) VALUES (DEFAULT)")
return err
}),
"expects failed operation because of cancelled context",
)
}
func TestTxQuery_FailedToCommit(t *testing.T) {
db := getDB(t)
defer createBasicTable(t, db, "work_unit_test")()
ctx, cancel := testhelper.Context()
defer cancel()
txq := NewTxQuery(ctx, nil, db.DB)
defer func() {
var err error
txq.Done(&err)
require.EqualError(t, err, sql.ErrTxDone.Error(), "expects failed COMMIT because of previously executed COMMIT statement")
db.RequireRowsInTable(t, "work_unit_test", 1)
}()
require.True(t,
txq.Exec(ctx, func(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, "INSERT INTO work_unit_test(id) VALUES (DEFAULT)")
return err
}),
"expects row to be inserted",
)
require.True(t,
txq.Exec(ctx, func(ctx context.Context, tx *sql.Tx) error {
require.NoError(t, tx.Commit()) // COMMIT to get error on the next attempt to COMMIT from Done method
return nil
}),
"expects COMMIT without issues",
)
}
func TestTxQuery_FailedToRollbackWithFailedOperation(t *testing.T) {
db := getDB(t)
defer createBasicTable(t, db, "work_unit_test")()
ctx, cancel := testhelper.Context()
defer cancel()
outBuffer := &bytes.Buffer{}
logger := logrus.New()
logger.Out = outBuffer
logger.Level = logrus.ErrorLevel
logger.Formatter = &logrus.JSONFormatter{
DisableTimestamp: true,
PrettyPrint: false,
}
txq := NewTxQuery(ctx, logger, db.DB)
defer func() {
var err error
txq.Done(&err)
require.EqualError(t, err, "some unexpected error")
require.Equal(t,
`{"error":"sql: transaction has already been committed or rolled back","level":"error","msg":"rollback failed"}`,
strings.TrimSpace(outBuffer.String()),
"failed COMMIT/ROLLBACK operation must be logged in case of another error during transaction usage",
)
db.RequireRowsInTable(t, "work_unit_test", 1)
}()
require.True(t,
txq.Exec(ctx, func(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, "INSERT INTO work_unit_test(id) VALUES (DEFAULT)")
return err
}),
"expects row to be inserted",
)
require.False(t,
txq.Exec(ctx, func(ctx context.Context, tx *sql.Tx) error {
require.NoError(t, tx.Commit(), "expects successful COMMIT") // COMMIT to get error on the next attempt to COMMIT
return errors.New("some unexpected error")
}),
"expects failed operation because of explicit error returned",
)
}
func TestTxQuery_FailedToCommitWithFailedOperation(t *testing.T) {
db := getDB(t)
defer createBasicTable(t, db, "work_unit_test")()
ctx, cancel := testhelper.Context()
defer cancel()
outBuffer := &bytes.Buffer{}
logger := logrus.New()
logger.Out = outBuffer
logger.Level = logrus.ErrorLevel
logger.Formatter = &logrus.JSONFormatter{
DisableTimestamp: true,
PrettyPrint: false,
}
txq := NewTxQuery(ctx, logger, db.DB)
defer func() {
err := errors.New("some processing error")
txq.Done(&err)
require.EqualError(t, err, "some processing error")
require.Equal(
t,
`{"error":"sql: transaction has already been committed or rolled back","level":"error","msg":"commit failed"}`,
strings.TrimSpace(outBuffer.String()),
"failed COMMIT/ROLLBACK operation must be logged in case of another error during transaction usage",
)
db.RequireRowsInTable(t, "work_unit_test", 1)
}()
require.True(t,
txq.Exec(ctx, func(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, "INSERT INTO work_unit_test(id) VALUES (DEFAULT)")
return err
}),
"expects row to be inserted",
)
require.True(t,
txq.Exec(ctx, func(ctx context.Context, tx *sql.Tx) error {
require.NoError(t, tx.Commit()) // COMMIT to get error on the next attempt to COMMIT
return nil
}),
"expects COMMIT without issues",
)
}
func createBasicTable(t *testing.T, db DB, tname string) func() {
t.Helper()
_, err := db.Exec("CREATE TABLE " + tname + "(id BIGSERIAL PRIMARY KEY, col TEXT)")
require.NoError(t, err)
return func() {
_, err := db.Exec("DROP TABLE IF EXISTS " + tname)
require.NoError(t, err)
}
}
func TestUint64Provider(t *testing.T) {
var provider Uint64Provider
dst1 := provider.To()
require.Equal(t, []interface{}{new(uint64)}, dst1, "must be a single value holder")
val1 := dst1[0].(*uint64)
*val1 = uint64(100)
dst2 := provider.To()
require.Equal(t, []interface{}{new(uint64)}, dst2, "must be a single value holder")
val2 := dst2[0].(*uint64)
*val2 = uint64(200)
require.Equal(t, []uint64{100, 200}, provider.Values())
dst3 := provider.To()
val3 := dst3[0].(*uint64)
*val3 = uint64(300)
require.Equal(t, []uint64{100, 200, 300}, provider.Values())
}
func TestScanAll(t *testing.T) {
db := getDB(t)
var ids Uint64Provider
notEmptyRows, err := db.Query("SELECT id FROM (VALUES (1), (200), (300500)) AS t(id)")
require.NoError(t, err)
require.NoError(t, ScanAll(notEmptyRows, &ids))
require.Equal(t, []uint64{1, 200, 300500}, ids.Values())
var nothing Uint64Provider
emptyRows, err := db.Query("SELECT id FROM (VALUES (1), (200), (300500)) AS t(id) WHERE id < 0")
require.NoError(t, err)
require.NoError(t, ScanAll(emptyRows, ¬hing))
require.Equal(t, ([]uint64)(nil), nothing.Values())
}
|
[
"\"PGHOST\"",
"\"PGPORT\""
] |
[] |
[
"PGHOST",
"PGPORT"
] |
[]
|
["PGHOST", "PGPORT"]
|
go
| 2 | 0 | |
main.go
|
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"os"
"time"
flag "github.com/spf13/pflag"
"helm.sh/helm/v3/pkg/kube"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
"github.com/fluxcd/pkg/runtime/client"
"github.com/fluxcd/pkg/runtime/events"
"github.com/fluxcd/pkg/runtime/leaderelection"
"github.com/fluxcd/pkg/runtime/logger"
"github.com/fluxcd/pkg/runtime/metrics"
"github.com/fluxcd/pkg/runtime/pprof"
"github.com/fluxcd/pkg/runtime/probes"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
v2 "github.com/fluxcd/helm-controller/api/v2beta1"
"github.com/fluxcd/helm-controller/controllers"
// +kubebuilder:scaffold:imports
)
const controllerName = "helm-controller"
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(sourcev1.AddToScheme(scheme))
utilruntime.Must(v2.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
func main() {
var (
metricsAddr string
eventsAddr string
healthAddr string
concurrent int
requeueDependency time.Duration
watchAllNamespaces bool
httpRetry int
clientOptions client.Options
logOptions logger.Options
leaderElectionOptions leaderelection.Options
)
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&eventsAddr, "events-addr", "", "The address of the events receiver.")
flag.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.")
flag.IntVar(&concurrent, "concurrent", 4, "The number of concurrent HelmRelease reconciles.")
flag.DurationVar(&requeueDependency, "requeue-dependency", 30*time.Second, "The interval at which failing dependencies are reevaluated.")
flag.BoolVar(&watchAllNamespaces, "watch-all-namespaces", true,
"Watch for custom resources in all namespaces, if set to false it will only watch the runtime namespace.")
flag.IntVar(&httpRetry, "http-retry", 9, "The maximum number of retries when failing to fetch artifacts over HTTP.")
clientOptions.BindFlags(flag.CommandLine)
logOptions.BindFlags(flag.CommandLine)
leaderElectionOptions.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(logger.NewLogger(logOptions))
var eventRecorder *events.Recorder
if eventsAddr != "" {
if er, err := events.NewRecorder(eventsAddr, controllerName); err != nil {
setupLog.Error(err, "unable to create event recorder")
os.Exit(1)
} else {
eventRecorder = er
}
}
metricsRecorder := metrics.NewRecorder()
crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...)
watchNamespace := ""
if !watchAllNamespaces {
watchNamespace = os.Getenv("RUNTIME_NAMESPACE")
}
// set the managedFields owner for resources reconciled from Helm charts
kube.ManagedFieldsManager = controllerName
restConfig := client.GetConfigOrDie(clientOptions)
mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
HealthProbeBindAddress: healthAddr,
Port: 9443,
LeaderElection: leaderElectionOptions.Enable,
LeaderElectionReleaseOnCancel: leaderElectionOptions.ReleaseOnCancel,
LeaseDuration: &leaderElectionOptions.LeaseDuration,
RenewDeadline: &leaderElectionOptions.RenewDeadline,
RetryPeriod: &leaderElectionOptions.RetryPeriod,
LeaderElectionID: fmt.Sprintf("%s-leader-election", controllerName),
Namespace: watchNamespace,
Logger: ctrl.Log,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
probes.SetupChecks(mgr, setupLog)
pprof.SetupHandlers(mgr, setupLog)
if err = (&controllers.HelmReleaseReconciler{
Client: mgr.GetClient(),
Config: mgr.GetConfig(),
Scheme: mgr.GetScheme(),
EventRecorder: mgr.GetEventRecorderFor(controllerName),
ExternalEventRecorder: eventRecorder,
MetricsRecorder: metricsRecorder,
}).SetupWithManager(mgr, controllers.HelmReleaseReconcilerOptions{
MaxConcurrentReconciles: concurrent,
DependencyRequeueInterval: requeueDependency,
HTTPRetry: httpRetry,
}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", v2.HelmReleaseKind)
os.Exit(1)
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
|
[
"\"RUNTIME_NAMESPACE\""
] |
[] |
[
"RUNTIME_NAMESPACE"
] |
[]
|
["RUNTIME_NAMESPACE"]
|
go
| 1 | 0 | |
seed.py
|
import csv
import os
import requests
import sys
import yaml
from dotenv import load_dotenv
from google.auth.transport.requests import Request
from google.oauth2 import id_token
load_dotenv()
instrument_names = os.getenv("INSTRUMENT_NAMES", "ENV_VAR_NOT_SET").split(",")
bus_client_id = os.getenv("BUS_CLIENT_ID", "ENV_VAR_NOT_SET")
bus_url = os.getenv("BUS_URL", "ENV_VAR_NOT_SET")
host_url = os.getenv("HOST_URL", "ENV_VAR_NOT_SET")
rest_api_url = os.getenv("REST_API_URL", "http://localhost:90")
server_park = os.getenv("SERVER_PARK", "gusty")
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = os.path.split(os.path.abspath(os.path.realpath(sys.argv[0])))[
0] + "/key.json"
def delete_uacs(bus_url, bus_client_id, instrument_name):
token = id_token.fetch_id_token(Request(), bus_client_id)
requests.delete(
f"{bus_url}/uacs/admin/instrument/{instrument_name}",
headers={"Authorization": f"Bearer {token}"},
)
def generate_uacs(bus_url, bus_client_id, instrument_name):
token = id_token.fetch_id_token(Request(), bus_client_id)
return requests.post(
f"{bus_url}/uacs/instrument/{instrument_name}",
headers={"Authorization": f"Bearer {token}"},
).json()
def get_instrument_id(rest_api_url, server_park, instrument_name):
return (
requests.get(
f"{rest_api_url}/api/v1/serverparks/{server_park}/instruments/{instrument_name}/id"
)
.json()
)
def chunk_seed_data(seed_data, chunk_size):
return [seed_data[x:x + chunk_size] for x in range(0, len(seed_data), chunk_size)]
seed_data = []
for instrument_name in instrument_names:
delete_uacs(bus_url, bus_client_id, instrument_name)
instrument_seed_data = generate_uacs(bus_url, bus_client_id, instrument_name)
instrument_id = get_instrument_id(rest_api_url, server_park, instrument_name)
for uac, seed_info in instrument_seed_data.items():
seed_data.append({
"uac": uac,
"case_id": seed_info.get("case_id"),
"instrument_name": instrument_name,
"instrument_id": instrument_id
})
sorted_seed_data = sorted(seed_data, key=lambda k: k["uac"])
split_seed_data = chunk_seed_data(sorted_seed_data, 10000)
with open("values-template.yaml", "r") as values_template:
helm_values = yaml.load(values_template, Loader=yaml.SafeLoader)
helm_values["master"] = {"environment": {"HOST_URL": host_url, "SERVER_PARK": server_park}}
helm_values["worker"]["environment"] = {"HOST_URL": host_url, "SERVER_PARK": server_park}
for index, seed_data_block in enumerate(split_seed_data):
helm_values["loadtest"]["mount_external_secret"]["files"][f"seed-data{index}"] = [f"seed-data{index}.csv"]
with open(f"seed-data{index}.csv", "w", newline="") as seed_data_csv:
seed_data_fieldnames = ["uac", "case_id", "instrument_name", "instrument_id"]
csv_writer = csv.DictWriter(seed_data_csv, fieldnames=seed_data_fieldnames)
csv_writer.writeheader()
for seed_data_row in seed_data_block:
csv_writer.writerow(seed_data_row)
with open("values.yaml", "w") as values_file:
yaml.dump(helm_values, values_file)
|
[] |
[] |
[
"BUS_URL",
"BUS_CLIENT_ID",
"REST_API_URL",
"GOOGLE_APPLICATION_CREDENTIALS",
"HOST_URL",
"SERVER_PARK",
"INSTRUMENT_NAMES"
] |
[]
|
["BUS_URL", "BUS_CLIENT_ID", "REST_API_URL", "GOOGLE_APPLICATION_CREDENTIALS", "HOST_URL", "SERVER_PARK", "INSTRUMENT_NAMES"]
|
python
| 7 | 0 | |
tests/runner/kube_testplatform.go
|
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// ------------------------------------------------------------
package runner
import (
"fmt"
"os"
"strconv"
"log"
kube "github.com/dapr/dapr/tests/platforms/kubernetes"
)
const (
defaultImageRegistry = "docker.io/dapriotest"
defaultImageTag = "latest"
disableTelemetryConfig = "disable-telemetry"
defaultSidecarCPULimit = "4.0"
defaultSidecarMemoryLimit = "512Mi"
defaultSidecarCPURequest = "0.5"
defaultSidecarMemoryRequest = "250Mi"
)
// KubeTestPlatform includes K8s client for testing cluster and kubernetes testing apps.
type KubeTestPlatform struct {
AppResources *TestResources
ComponentResources *TestResources
KubeClient *kube.KubeClient
}
// NewKubeTestPlatform creates KubeTestPlatform instance.
func NewKubeTestPlatform() *KubeTestPlatform {
return &KubeTestPlatform{
AppResources: new(TestResources),
ComponentResources: new(TestResources),
}
}
func (c *KubeTestPlatform) setup() (err error) {
// TODO: KubeClient will be properly configured by go test arguments
c.KubeClient, err = kube.NewKubeClient("", "")
return
}
func (c *KubeTestPlatform) tearDown() error {
if err := c.AppResources.tearDown(); err != nil {
fmt.Fprintf(os.Stderr, "failed to tear down AppResources. got: %q", err)
}
if err := c.ComponentResources.tearDown(); err != nil {
fmt.Fprintf(os.Stderr, "failed to tear down ComponentResources. got: %q", err)
}
// TODO: clean up kube cluster
return nil
}
// addComponents adds component to disposable Resource queues.
func (c *KubeTestPlatform) addComponents(comps []kube.ComponentDescription) error {
if c.KubeClient == nil {
return fmt.Errorf("kubernetes cluster needs to be setup")
}
for _, comp := range comps {
c.ComponentResources.Add(kube.NewDaprComponent(c.KubeClient, kube.DaprTestNamespace, comp))
}
// setup component resources
if err := c.ComponentResources.setup(); err != nil {
return err
}
return nil
}
// addApps adds test apps to disposable App Resource queues.
func (c *KubeTestPlatform) addApps(apps []kube.AppDescription) error {
if c.KubeClient == nil {
return fmt.Errorf("kubernetes cluster needs to be setup before calling BuildAppResources")
}
dt := c.disableTelemetry()
for _, app := range apps {
if app.RegistryName == "" {
app.RegistryName = c.imageRegistry()
}
if app.ImageName == "" {
return fmt.Errorf("%s app doesn't have imagename property", app.AppName)
}
app.ImageName = fmt.Sprintf("%s:%s", app.ImageName, c.imageTag())
if dt {
app.Config = disableTelemetryConfig
}
app.DaprCPULimit = c.cpuLimit()
app.DaprCPURequest = c.cpuRequest()
app.DaprMemoryLimit = c.memoryLimit()
app.DaprMemoryRequest = c.memoryRequest()
log.Printf("Adding app %v", app)
c.AppResources.Add(kube.NewAppManager(c.KubeClient, kube.DaprTestNamespace, app))
}
// installApps installs the apps in AppResource queue sequentially
if err := c.AppResources.setup(); err != nil {
return err
}
return nil
}
func (c *KubeTestPlatform) imageRegistry() string {
reg := os.Getenv("DAPR_TEST_REGISTRY")
if reg == "" {
return defaultImageRegistry
}
return reg
}
func (c *KubeTestPlatform) imageTag() string {
tag := os.Getenv("DAPR_TEST_TAG")
if tag == "" {
return defaultImageTag
}
return tag
}
func (c *KubeTestPlatform) disableTelemetry() bool {
disableVal := os.Getenv("DAPR_DISABLE_TELEMETRY")
disable, err := strconv.ParseBool(disableVal)
if err != nil {
return false
}
return disable
}
func (c *KubeTestPlatform) cpuLimit() string {
cpu := os.Getenv("DAPR_SIDECAR_CPU_LIMIT")
if cpu != "" {
return cpu
}
return defaultSidecarCPULimit
}
func (c *KubeTestPlatform) cpuRequest() string {
cpu := os.Getenv("DAPR_SIDECAR_CPU_REQUEST")
if cpu != "" {
return cpu
}
return defaultSidecarCPURequest
}
func (c *KubeTestPlatform) memoryRequest() string {
mem := os.Getenv("DAPR_SIDECAR_MEMORY_REQUEST")
if mem != "" {
return mem
}
return defaultSidecarMemoryRequest
}
func (c *KubeTestPlatform) memoryLimit() string {
mem := os.Getenv("DAPR_SIDECAR_MEMORY_LIMIT")
if mem != "" {
return mem
}
return defaultSidecarMemoryLimit
}
// AcquireAppExternalURL returns the external url for 'name'.
func (c *KubeTestPlatform) AcquireAppExternalURL(name string) string {
app := c.AppResources.FindActiveResource(name)
return app.(*kube.AppManager).AcquireExternalURL()
}
// GetAppHostDetails returns the name and IP address of the host(pod) running 'name'
func (c *KubeTestPlatform) GetAppHostDetails(name string) (string, string, error) {
app := c.AppResources.FindActiveResource(name)
hostname, hostIP, err := app.(*kube.AppManager).GetHostDetails()
if err != nil {
return "", "", err
}
return hostname, hostIP, nil
}
// Scale changes the number of replicas of the app
func (c *KubeTestPlatform) Scale(name string, replicas int32) error {
app := c.AppResources.FindActiveResource(name)
appManager := app.(*kube.AppManager)
if err := appManager.ScaleDeploymentReplica(replicas); err != nil {
return err
}
_, err := appManager.WaitUntilDeploymentState(appManager.IsDeploymentDone)
return err
}
// Restart restarts all instances for the app.
func (c *KubeTestPlatform) Restart(name string) error {
// To minic the restart behavior, scale to 0 and then scale to the original replicas.
app := c.AppResources.FindActiveResource(name)
originalReplicas := app.(*kube.AppManager).App().Replicas
if err := c.Scale(name, 0); err != nil {
return err
}
return c.Scale(name, originalReplicas)
}
// PortForwardToApp opens a new connection to the app on a the target port and returns the local port or error.
func (c *KubeTestPlatform) PortForwardToApp(appName string, targetPorts ...int) ([]int, error) {
app := c.AppResources.FindActiveResource(appName)
appManager := app.(*kube.AppManager)
_, err := appManager.WaitUntilDeploymentState(appManager.IsDeploymentDone)
if err != nil {
return nil, err
}
if targetPorts == nil {
return nil, fmt.Errorf("cannot open connection with no target ports")
}
return appManager.DoPortForwarding("", targetPorts...)
}
// GetAppUsage returns the Cpu and Memory usage for the dapr container for a given app
func (c *KubeTestPlatform) GetAppUsage(appName string) (*AppUsage, error) {
app := c.AppResources.FindActiveResource(appName)
appManager := app.(*kube.AppManager)
cpu, mem, err := appManager.GetAppCPUAndMemory()
if err != nil {
return nil, err
}
return &AppUsage{
CPU: fmt.Sprintf("%vm", cpu),
Memory: fmt.Sprintf("%.2fMb", mem),
}, nil
}
|
[
"\"DAPR_TEST_REGISTRY\"",
"\"DAPR_TEST_TAG\"",
"\"DAPR_DISABLE_TELEMETRY\"",
"\"DAPR_SIDECAR_CPU_LIMIT\"",
"\"DAPR_SIDECAR_CPU_REQUEST\"",
"\"DAPR_SIDECAR_MEMORY_REQUEST\"",
"\"DAPR_SIDECAR_MEMORY_LIMIT\""
] |
[] |
[
"DAPR_SIDECAR_CPU_REQUEST",
"DAPR_TEST_REGISTRY",
"DAPR_SIDECAR_CPU_LIMIT",
"DAPR_DISABLE_TELEMETRY",
"DAPR_SIDECAR_MEMORY_REQUEST",
"DAPR_TEST_TAG",
"DAPR_SIDECAR_MEMORY_LIMIT"
] |
[]
|
["DAPR_SIDECAR_CPU_REQUEST", "DAPR_TEST_REGISTRY", "DAPR_SIDECAR_CPU_LIMIT", "DAPR_DISABLE_TELEMETRY", "DAPR_SIDECAR_MEMORY_REQUEST", "DAPR_TEST_TAG", "DAPR_SIDECAR_MEMORY_LIMIT"]
|
go
| 7 | 0 | |
twarc/client.py
|
# -*- coding: utf-8 -*-
import os
import re
import sys
import json
import types
import logging
import datetime
import requests
import ssl
from requests.exceptions import ConnectionError
from requests.packages.urllib3.exceptions import ProtocolError
from .decorators import *
from requests_oauthlib import OAuth1, OAuth1Session, OAuth2Session
from oauthlib.oauth2 import BackendApplicationClient
if sys.version_info[:2] <= (2, 7):
# Python 2
get_input = raw_input
str_type = unicode
import ConfigParser as configparser
from urlparse import parse_qs
else:
# Python 3
get_input = input
str_type = str
import configparser
from urllib.parse import parse_qs
log = logging.getLogger('twarc')
class Twarc(object):
"""
Twarc allows you retrieve data from the Twitter API. Each method
is an iterator that runs to completion, and handles rate limiting so
that it will go to sleep when Twitter tells it to, and wake back up
when it is able to retrieve data from the API again.
"""
def __init__(self, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None,
connection_errors=0, http_errors=0, config=None,
profile="", protected=False, tweet_mode="extended",
app_auth=False, validate_keys=True):
"""
Instantiate a Twarc instance. If keys aren't set we'll try to
discover them in the environment or a supplied profile. If no
profile is indicated the first section of the config files will
be used.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
self.connection_errors = connection_errors
self.http_errors = http_errors
self.profile = profile
self.client = None
self.last_response = None
self.tweet_mode = tweet_mode
self.protected = protected
self.app_auth = app_auth
if config:
self.config = config
else:
self.config = self.default_config()
self.get_keys()
if validate_keys:
self.validate_keys()
@filter_protected
def search(self, q, max_id=None, since_id=None, lang=None,
result_type='recent', geocode=None, max_pages=None):
"""
Pass in a query with optional max_id, min_id, lang, geocode, or
max_pages, and get back an iterator for decoded tweets. Defaults to
recent (i.e. not mixed, the API default, or popular) tweets.
"""
url = "https://api.twitter.com/1.1/search/tweets.json"
params = {
"count": 100,
"q": q,
"include_ext_alt_text": 'true',
"include_entities": "true"
}
if lang is not None:
params['lang'] = lang
if geocode is not None:
params['geocode'] = geocode
if since_id:
# Make the since_id inclusive, so we can avoid retrieving
# an empty page of results in some cases
params['since_id'] = str(int(since_id) - 1)
if result_type in ['mixed', 'recent', 'popular']:
params['result_type'] = result_type
else:
params['result_type'] = 'recent'
retrieved_pages = 0
reached_end = False
while True:
# note: max_id changes as results are retrieved
if max_id:
params['max_id'] = max_id
resp = self.get(url, params=params)
retrieved_pages += 1
statuses = resp.json()["statuses"]
if len(statuses) == 0:
log.info("no new tweets matching %s", params)
break
for status in statuses:
# We've certainly reached the end of new results
if since_id is not None and status['id_str'] == str(since_id):
reached_end = True
break
yield status
if reached_end:
log.info("no new tweets matching %s", params)
break
if max_pages is not None and retrieved_pages == max_pages:
log.info("reached max page limit for %s", params)
break
max_id = str(int(status["id_str"]) - 1)
def premium_search(self, q, product, environment, from_date=None,
to_date=None, max_results=None, sandbox=False, limit=0):
"""
Search using the Premium Search API. You will need to pass in a query
a product (30day or fullarchive) and and environment to use. Optionally
you can pass in a from_date and to_date to limit the search using
datetime objects. If you would like to set max_results you can, or
you can accept the maximum results (500). If using the a sandbox
environment you will want to set sandbox=True to lower the max_results
to 100. The limit option will cause your search to finish after it has
return more than that number of tweets (0 means no limit).
"""
if not self.app_auth:
raise RuntimeError(
"This endpoint is only available with application authentication. "
"Pass app_auth=True in Python or --app-auth on the command line."
)
if from_date and not isinstance(from_date, datetime.date):
raise RuntimeError("from_date must be a datetime.date or datetime.datetime object")
if to_date and not isinstance(to_date, datetime.date):
raise RuntimeError("to_date must be a datetime.date or datetime.datetime object")
if product not in ['30day', 'fullarchive']:
raise RuntimeError(
'Invalid Premium Search API product: {}'.format(product)
)
# set default max_results based on whether its sandboxed
if max_results is None:
if sandbox:
max_results = 100
else:
max_results = 500
url = 'https://api.twitter.com/1.1/tweets/search/{}/{}.json'.format(
product,
environment
)
params = {
"query": q,
"fromDate": from_date.strftime('%Y%m%d%H%M') if from_date else None,
"toDate": to_date.strftime('%Y%m%d%H%M') if to_date else None,
"maxResults": max_results
}
count = 0
stop = False
while not stop:
resp = self.get(url, params=params)
if resp.status_code == 200:
data = resp.json()
for tweet in data['results']:
count += 1
yield tweet
if limit != 0 and count >= limit:
stop = True
break
if 'next' in data:
params['next'] = data['next']
else:
stop = True
def timeline(self, user_id=None, screen_name=None, max_id=None,
since_id=None, max_pages=None):
"""
Returns a collection of the most recent tweets posted
by the user indicated by the user_id or screen_name parameter.
Provide a user_id or screen_name.
"""
if user_id and screen_name:
raise ValueError('only user_id or screen_name may be passed')
# Strip if screen_name is prefixed with '@'
if screen_name:
screen_name = screen_name.lstrip('@')
id = screen_name or str(user_id)
id_type = "screen_name" if screen_name else "user_id"
log.info("starting user timeline for user %s", id)
if screen_name or user_id:
url = "https://api.twitter.com/1.1/statuses/user_timeline.json"
else:
url = "https://api.twitter.com/1.1/statuses/home_timeline.json"
params = {"count": 200, id_type: id, "include_ext_alt_text": "true"}
retrieved_pages = 0
reached_end = False
while True:
if since_id:
# Make the since_id inclusive, so we can avoid retrieving
# an empty page of results in some cases
params['since_id'] = str(int(since_id) - 1)
if max_id:
params['max_id'] = max_id
try:
resp = self.get(url, params=params, allow_404=True)
retrieved_pages += 1
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
log.warn("no timeline available for %s", id)
break
elif e.response.status_code == 401:
log.warn("protected account %s", id)
break
raise e
statuses = resp.json()
if len(statuses) == 0:
log.info("no new tweets matching %s", params)
break
for status in statuses:
# We've certainly reached the end of new results
if since_id is not None and status['id_str'] == str(since_id):
reached_end = True
break
# If you request an invalid user_id, you may still get
# results so need to check.
if not user_id or id == status.get("user",
{}).get("id_str"):
yield status
if reached_end:
log.info("no new tweets matching %s", params)
break
if max_pages is not None and retrieved_pages == max_pages:
log.info("reached max page limit for %s", params)
break
max_id = str(int(status["id_str"]) - 1)
def user_lookup(self, ids, id_type="user_id"):
"""
A generator that returns users for supplied user ids, screen_names,
or an iterator of user_ids of either. Use the id_type to indicate
which you are supplying (user_id or screen_name)
"""
if id_type not in ['user_id', 'screen_name']:
raise RuntimeError("id_type must be user_id or screen_name")
if not isinstance(ids, types.GeneratorType):
ids = iter(ids)
# TODO: this is similar to hydrate, maybe they could share code?
lookup_ids = []
def do_lookup():
ids_str = ",".join(lookup_ids)
log.info("looking up users %s", ids_str)
url = 'https://api.twitter.com/1.1/users/lookup.json'
params = {id_type: ids_str}
try:
resp = self.get(url, params=params, allow_404=True)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
log.warning("no users matching %s", ids_str)
raise e
return resp.json()
for id in ids:
lookup_ids.append(id.strip())
if len(lookup_ids) == 100:
for u in do_lookup():
yield u
lookup_ids = []
if len(lookup_ids) > 0:
for u in do_lookup():
yield u
def follower_ids(self, user, max_pages=None):
"""
Returns Twitter user id lists for the specified user's followers.
A user can be a specific using their screen_name or user_id
"""
user = str(user)
user = user.lstrip('@')
url = 'https://api.twitter.com/1.1/followers/ids.json'
if re.match(r'^\d+$', user):
params = {'user_id': user, 'cursor': -1}
else:
params = {'screen_name': user, 'cursor': -1}
retrieved_pages = 0
while params['cursor'] != 0:
try:
resp = self.get(url, params=params, allow_404=True)
retrieved_pages += 1
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
log.info("no users matching %s", screen_name)
raise e
user_ids = resp.json()
for user_id in user_ids['ids']:
yield str_type(user_id)
params['cursor'] = user_ids['next_cursor']
if max_pages is not None and retrieved_pages == max_pages:
log.info("reached max follower page limit for %s", params)
break
def friend_ids(self, user, max_pages=None):
"""
Returns Twitter user id lists for the specified user's friend. A user
can be specified using their screen_name or user_id.
"""
user = str(user)
user = user.lstrip('@')
url = 'https://api.twitter.com/1.1/friends/ids.json'
if re.match(r'^\d+$', user):
params = {'user_id': user, 'cursor': -1}
else:
params = {'screen_name': user, 'cursor': -1}
retrieved_pages = 0
while params['cursor'] != 0:
try:
resp = self.get(url, params=params, allow_404=True)
retrieved_pages += 1
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
log.error("no users matching %s", user)
raise e
user_ids = resp.json()
for user_id in user_ids['ids']:
yield str_type(user_id)
params['cursor'] = user_ids['next_cursor']
if max_pages is not None and retrieved_pages == max_pages:
log.info("reached max friend page limit for %s", params)
break
@filter_protected
def filter(self, track=None, follow=None, locations=None, lang=[],
event=None, record_keepalive=False):
"""
Returns an iterator for tweets that match a given filter track from
the livestream of tweets happening right now.
If a threading.Event is provided for event and the event is set,
the filter will be interrupted.
"""
if locations is not None:
if type(locations) == list:
locations = ','.join(locations)
locations = locations.replace('\\', '')
url = 'https://stream.twitter.com/1.1/statuses/filter.json'
params = {
"stall_warning": True,
"include_ext_alt_text": True
}
if track:
params["track"] = track
if follow:
params["follow"] = follow
if locations:
params["locations"] = locations
if lang:
# should be a list, but just in case
if isinstance(lang, list):
params['language'] = ','.join(lang)
else:
params['language'] = lang
headers = {'accept-encoding': 'deflate, gzip'}
errors = 0
while True:
try:
log.info("connecting to filter stream for %s", params)
resp = self.post(url, params, headers=headers, stream=True)
errors = 0
for line in resp.iter_lines(chunk_size=1024):
if event and event.is_set():
log.info("stopping filter")
# Explicitly close response
resp.close()
return
if not line:
log.info("keep-alive")
if record_keepalive:
yield "keep-alive"
continue
try:
yield json.loads(line.decode())
except Exception as e:
log.error("json parse error: %s - %s", e, line)
except requests.exceptions.HTTPError as e:
errors += 1
log.error("caught http error %s on %s try", e, errors)
if self.http_errors and errors == self.http_errors:
log.warning("too many errors")
raise e
if e.response.status_code == 420:
if interruptible_sleep(errors * 60, event):
log.info("stopping filter")
return
else:
if interruptible_sleep(errors * 5, event):
log.info("stopping filter")
return
except Exception as e:
errors += 1
log.error("caught exception %s on %s try", e, errors)
if self.http_errors and errors == self.http_errors:
log.warning("too many exceptions")
raise e
log.error(e)
if interruptible_sleep(errors, event):
log.info("stopping filter")
return
def sample(self, event=None, record_keepalive=False):
"""
Returns a small random sample of all public statuses. The Tweets
returned by the default access level are the same, so if two different
clients connect to this endpoint, they will see the same Tweets.
If a threading.Event is provided for event and the event is set,
the sample will be interrupted.
"""
url = 'https://stream.twitter.com/1.1/statuses/sample.json'
params = {"stall_warning": True}
headers = {'accept-encoding': 'deflate, gzip'}
errors = 0
while True:
try:
log.info("connecting to sample stream")
resp = self.post(url, params, headers=headers, stream=True)
errors = 0
for line in resp.iter_lines(chunk_size=512):
if event and event.is_set():
log.info("stopping sample")
# Explicitly close response
resp.close()
return
if line == "":
log.info("keep-alive")
if record_keepalive:
yield "keep-alive"
continue
try:
yield json.loads(line.decode())
except Exception as e:
log.error("json parse error: %s - %s", e, line)
except requests.exceptions.HTTPError as e:
errors += 1
log.error("caught http error %s on %s try", e, errors)
if self.http_errors and errors == self.http_errors:
log.warning("too many errors")
raise e
if e.response.status_code == 420:
if interruptible_sleep(errors * 60, event):
log.info("stopping filter")
return
else:
if interruptible_sleep(errors * 5, event):
log.info("stopping filter")
return
except Exception as e:
errors += 1
log.error("caught exception %s on %s try", e, errors)
if self.http_errors and errors == self.http_errors:
log.warning("too many errors")
raise e
if interruptible_sleep(errors, event):
log.info("stopping filter")
return
def dehydrate(self, iterator):
"""
Pass in an iterator of tweets' JSON and get back an iterator of the
IDs of each tweet.
"""
for line in iterator:
try:
yield json.loads(line)['id_str']
except Exception as e:
log.error("uhoh: %s\n" % e)
def hydrate(self, iterator, trim_user=False):
"""
Pass in an iterator of tweet ids and get back an iterator for the
decoded JSON for each corresponding tweet.
"""
ids = []
url = "https://api.twitter.com/1.1/statuses/lookup.json"
# lookup 100 tweets at a time
for tweet_id in iterator:
tweet_id = str(tweet_id)
tweet_id = tweet_id.strip() # remove new line if present
ids.append(tweet_id)
if len(ids) == 100:
log.info("hydrating %s ids", len(ids))
resp = self.post(url, data={
"id": ','.join(ids),
"include_ext_alt_text": 'true',
"include_entities": 'true',
"trim_user": trim_user
})
tweets = resp.json()
tweets.sort(key=lambda t: t['id_str'])
for tweet in tweets:
yield tweet
ids = []
# hydrate any remaining ones
if len(ids) > 0:
log.info("hydrating %s", ids)
resp = self.post(url, data={
"id": ','.join(ids),
"include_ext_alt_text": 'true',
"include_entities": 'true',
"trim_user": trim_user
})
for tweet in resp.json():
yield tweet
def tweet(self, tweet_id):
try:
return next(self.hydrate([tweet_id]))
except StopIteration:
return []
def retweets(self, tweet_ids):
"""
Retrieves up to the last 100 retweets for the provided iterator of tweet_ids.
"""
if not isinstance(tweet_ids, types.GeneratorType):
tweet_ids = iter(tweet_ids)
for tweet_id in tweet_ids:
if hasattr(tweet_id, 'strip'):
tweet_id = tweet_id.strip()
log.info("retrieving retweets of %s", tweet_id)
url = "https://api.twitter.com/1.1/statuses/retweets/""{}.json".format(
tweet_id)
try:
resp = self.get(url, params={"count": 100}, allow_404=True)
for tweet in resp.json():
yield tweet
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
log.info("can't get tweets for non-existent tweet: %s", tweet_id)
def trends_available(self):
"""
Returns a list of regions for which Twitter tracks trends.
"""
url = 'https://api.twitter.com/1.1/trends/available.json'
try:
resp = self.get(url)
except requests.exceptions.HTTPError as e:
raise e
return resp.json()
def trends_place(self, woeid, exclude=None):
"""
Returns recent Twitter trends for the specified WOEID. If
exclude == 'hashtags', Twitter will remove hashtag trends from the
response.
"""
url = 'https://api.twitter.com/1.1/trends/place.json'
params = {'id': woeid}
if exclude:
params['exclude'] = exclude
try:
resp = self.get(url, params=params, allow_404=True)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
log.info("no region matching WOEID %s", woeid)
raise e
return resp.json()
def trends_closest(self, lat, lon):
"""
Returns the closest regions for the supplied lat/lon.
"""
url = 'https://api.twitter.com/1.1/trends/closest.json'
params = {'lat': lat, 'long': lon}
try:
resp = self.get(url, params=params)
except requests.exceptions.HTTPError as e:
raise e
return resp.json()
def replies(self, tweet, recursive=False, prune=()):
"""
replies returns a generator of tweets that are replies for a given
tweet. It includes the original tweet. If you would like to fetch the
replies to the replies use recursive=True which will do a depth-first
recursive walk of the replies. It also walk up the reply chain if you
supply a tweet that is itself a reply to another tweet. You can
optionally supply a tuple of tweet ids to ignore during this traversal
using the prune parameter.
"""
yield tweet
# get replies to the tweet
screen_name = tweet['user']['screen_name']
tweet_id = tweet['id_str']
log.info("looking for replies to: %s", tweet_id)
for reply in self.search("to:%s" % screen_name, since_id=tweet_id):
if reply['in_reply_to_status_id_str'] != tweet_id:
continue
if reply['id_str'] in prune:
log.info("ignoring pruned tweet id %s", reply['id_str'])
continue
log.info("found reply: %s", reply["id_str"])
if recursive:
if reply['id_str'] not in prune:
prune = prune + (tweet_id,)
for r in self.replies(reply, recursive, prune):
yield r
else:
yield reply
# if this tweet is itself a reply to another tweet get it and
# get other potential replies to it
reply_to_id = tweet.get('in_reply_to_status_id_str')
log.info("prune=%s", prune)
if recursive and reply_to_id and reply_to_id not in prune:
t = self.tweet(reply_to_id)
if t:
log.info("found reply-to: %s", t['id_str'])
prune = prune + (tweet['id_str'],)
for r in self.replies(t, recursive=True, prune=prune):
yield r
# if this tweet is a quote go get that too whatever tweets it
# may be in reply to
quote_id = tweet.get('quoted_status_id_str')
if recursive and quote_id and quote_id not in prune:
t = self.tweet(quote_id)
if t:
log.info("found quote: %s", t['id_str'])
prune = prune + (tweet['id_str'],)
for r in self.replies(t, recursive=True, prune=prune):
yield r
def list_members(self, list_id=None, slug=None, owner_screen_name=None, owner_id=None):
"""
Returns the members of a list.
List id or (slug and (owner_screen_name or owner_id)) are required
"""
assert list_id or (slug and (owner_screen_name or owner_id))
url = 'https://api.twitter.com/1.1/lists/members.json'
params = {'cursor': -1}
if list_id:
params['list_id'] = list_id
else:
params['slug'] = slug
if owner_screen_name:
params['owner_screen_name'] = owner_screen_name
else:
params['owner_id'] = owner_id
while params['cursor'] != 0:
try:
resp = self.get(url, params=params, allow_404=True)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
log.error("no matching list")
raise e
users = resp.json()
for user in users['users']:
yield user
params['cursor'] = users['next_cursor']
def oembed(self, tweet_url, **params):
"""
Returns the oEmbed JSON for a tweet. The JSON includes an html
key that contains the HTML for the embed. You can pass in
parameters that correspond to the paramters that Twitter's
statuses/oembed endpoint supports. For example:
o = client.oembed('https://twitter.com/biz/status/21', theme='dark')
"""
log.info("generating embedding for tweet %s", tweet_url)
url = "https://publish.twitter.com/oembed"
params['url'] = tweet_url
resp = self.get(url, params=params)
return resp.json()
@rate_limit
@catch_conn_reset
@catch_timeout
@catch_gzip_errors
def get(self, *args, **kwargs):
if not self.client:
self.connect()
# set default tweet_mode
if "params" not in kwargs:
kwargs["params"] = {"tweet_mode": self.tweet_mode}
else:
kwargs["params"]["tweet_mode"] = self.tweet_mode
# Pass allow 404 to not retry on 404
allow_404 = kwargs.pop('allow_404', False)
connection_error_count = kwargs.pop('connection_error_count', 0)
try:
log.info("getting %s %s", args, kwargs)
r = self.last_response = self.client.get(*args, timeout=(3.05, 31),
**kwargs)
# this has been noticed, believe it or not
# https://github.com/edsu/twarc/issues/75
if r.status_code == 404 and not allow_404:
log.warning("404 from Twitter API! trying again")
time.sleep(1)
r = self.get(*args, **kwargs)
return r
except (ssl.SSLError, ConnectionError, ProtocolError) as e:
connection_error_count += 1
log.error("caught connection error %s on %s try", e,
connection_error_count)
if (self.connection_errors and
connection_error_count == self.connection_errors):
log.error("received too many connection errors")
raise e
else:
self.connect()
kwargs['connection_error_count'] = connection_error_count
kwargs['allow_404'] = allow_404
return self.get(*args, **kwargs)
@rate_limit
@catch_conn_reset
@catch_timeout
@catch_gzip_errors
def post(self, *args, **kwargs):
if not self.client:
self.connect()
if "data" in kwargs:
kwargs["data"]["tweet_mode"] = self.tweet_mode
connection_error_count = kwargs.pop('connection_error_count', 0)
try:
log.info("posting %s %s", args, kwargs)
self.last_response = self.client.post(*args, timeout=(3.05, 31),
**kwargs)
return self.last_response
except (ssl.SSLError, ConnectionError, ProtocolError) as e:
connection_error_count += 1
log.error("caught connection error %s on %s try", e,
connection_error_count)
if (self.connection_errors and
connection_error_count == self.connection_errors):
log.error("received too many connection errors")
raise e
else:
self.connect()
kwargs['connection_error_count'] = connection_error_count
return self.post(*args, **kwargs)
@catch_timeout
def connect(self):
"""
Sets up the HTTP session to talk to Twitter. If one is active it is
closed and another one is opened.
"""
if not (self.consumer_key and self.consumer_secret and self.access_token
and self.access_token_secret):
raise RuntimeError("MissingKeys")
if self.client:
log.info("closing existing http session")
self.client.close()
if self.last_response:
log.info("closing last response")
self.last_response.close()
log.info("creating http session")
if not self.app_auth:
logging.info('creating OAuth1 user authentication')
self.client = OAuth1Session(
client_key=self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret
)
else:
logging.info('creating OAuth2 app authentication')
client = BackendApplicationClient(client_id=self.consumer_key)
oauth = OAuth2Session(client=client)
token = oauth.fetch_token(
token_url='https://api.twitter.com/oauth2/token',
client_id=self.consumer_key,
client_secret=self.consumer_secret
)
self.client = oauth
def get_keys(self):
"""
Get the Twitter API keys. Order of precedence is command line,
environment, config file. Return True if all the keys were found
and False if not.
"""
env = os.environ.get
if not self.consumer_key:
self.consumer_key = env('CONSUMER_KEY')
if not self.consumer_secret:
self.consumer_secret = env('CONSUMER_SECRET')
if not self.access_token:
self.access_token = env('ACCESS_TOKEN')
if not self.access_token_secret:
self.access_token_secret = env('ACCESS_TOKEN_SECRET')
if self.config and not (self.consumer_key and
self.consumer_secret and
self.access_token and
self.access_token_secret):
self.load_config()
def validate_keys(self):
"""
Validate the keys provided are authentic credentials.
"""
url = 'https://api.twitter.com/1.1/account/verify_credentials.json'
keys_present = self.consumer_key and self.consumer_secret and \
self.access_token and self.access_token_secret
if self.app_auth:
# no need to validate keys when using OAuth2 App Auth.
return True
if keys_present:
try:
# Need to explicitly reconnect to confirm the current creds
# are used in the session object.
self.connect()
self.get(url)
return True
except requests.HTTPError as e:
if e.response.status_code == 401:
raise RuntimeError('Invalid credentials provided.')
else:
raise e
else:
print('Incomplete credentials provided.')
print('Please run the command "twarc configure" to get started.')
sys.exit()
def load_config(self):
path = self.config
profile = self.profile
log.info("loading %s profile from config %s", profile, path)
if not path or not os.path.isfile(path):
return {}
config = configparser.ConfigParser()
config.read(self.config)
if len(config.sections()) >= 1 and not profile:
profile = config.sections()[0]
data = {}
for key in ['access_token', 'access_token_secret',
'consumer_key', 'consumer_secret']:
try:
setattr(self, key, config.get(profile, key))
except configparser.NoSectionError:
sys.exit("no such profile %s in %s" % (profile, path))
except configparser.NoOptionError:
sys.exit("missing %s from profile %s in %s" % (
key, profile, path))
return data
def save_config(self, profile):
if not self.config:
return
config = configparser.ConfigParser()
config.read(self.config)
if config.has_section(profile):
config.remove_section(profile)
config.add_section(profile)
config.set(profile, 'consumer_key', self.consumer_key)
config.set(profile, 'consumer_secret', self.consumer_secret)
config.set(profile, 'access_token', self.access_token)
config.set(profile, 'access_token_secret',
self.access_token_secret)
with open(self.config, 'w') as config_file:
config.write(config_file)
return config
def configure(self):
print("\nTwarc needs to know a few things before it can talk to Twitter on your behalf.\n")
reuse = False
if self.consumer_key and self.consumer_secret:
print("You already have these application keys in your config %s\n" % self.config)
print("consumer key: %s" % self.consumer_key)
print("consumer secret: %s" % self.consumer_secret)
reuse = get_input("\nWould you like to use those for your new profile? [y/n] ")
reuse = reuse.lower() == 'y'
if not reuse:
print("\nPlease enter your Twitter application credentials from apps.twitter.com:\n")
self.consumer_key = get_input('consumer key: ')
self.consumer_secret = get_input('consumer secret: ')
answered = False
while not answered:
print("\nHow would you like twarc to obtain your user keys?\n\n1) generate access keys by visiting Twitter\n2) manually enter your access token and secret\n")
answer = get_input('Please enter your choice [1/2] ')
if answer == "1":
answered = True
generate = True
elif answer == "2":
answered = True
generate = False
if generate:
request_token_url = 'https://api.twitter.com/oauth/request_token'
oauth = OAuth1(self.consumer_key, client_secret=self.consumer_secret)
r = requests.post(url=request_token_url, auth=oauth)
credentials = parse_qs(r.text)
if not credentials:
print("\nError: invalid credentials.")
print("Please check that you are copying and pasting correctly and try again.\n")
return
resource_owner_key = credentials.get('oauth_token')[0]
resource_owner_secret = credentials.get('oauth_token_secret')[0]
base_authorization_url = 'https://api.twitter.com/oauth/authorize'
authorize_url = base_authorization_url + '?oauth_token=' + resource_owner_key
print('\nPlease log into Twitter and visit this URL in your browser:\n%s' % authorize_url)
verifier = get_input('\nAfter you have authorized the application please enter the displayed PIN: ')
access_token_url = 'https://api.twitter.com/oauth/access_token'
oauth = OAuth1(self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier)
r = requests.post(url=access_token_url, auth=oauth)
credentials = parse_qs(r.text)
if not credentials:
print('\nError: invalid PIN')
print('Please check that you entered the PIN correctly and try again.\n')
return
self.access_token = resource_owner_key = credentials.get('oauth_token')[0]
self.access_token_secret = credentials.get('oauth_token_secret')[0]
screen_name = credentials.get('screen_name')[0]
else:
self.access_token = get_input("Enter your Access Token: ")
self.access_token_secret = get_input("Enter your Access Token Secret: ")
screen_name = "default"
config = self.save_config(screen_name)
print('\nThe credentials for %s have been saved to your configuration file at %s' % (screen_name, self.config))
print('\n✨ ✨ ✨ Happy twarcing! ✨ ✨ ✨\n')
if len(config.sections()) > 1:
print('Note: you have multiple profiles in %s so in order to use %s you will use --profile\n' % (self.config, screen_name))
def default_config(self):
return os.path.join(os.path.expanduser("~"), ".twarc")
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
bin/make_manual.py
|
#!/usr/bin/env python3
"""
Make single page versions of the documentation for release and
conversion into man pages etc.
"""
import os
import re
import time
from datetime import datetime
docpath = "docs/content"
outfile = "MANUAL.md"
# Order to add docs segments to make outfile
docs = [
"_index.md",
"install.md",
"docs.md",
"remote_setup.md",
"filtering.md",
"gui.md",
"rc.md",
"overview.md",
"flags.md",
"docker.md",
# Keep these alphabetical by full name
"fichier.md",
"alias.md",
"amazonclouddrive.md",
"s3.md",
"b2.md",
"box.md",
"cache.md",
"chunker.md",
"sharefile.md",
"crypt.md",
"compress.md",
"dropbox.md",
"filefabric.md",
"ftp.md",
"googlecloudstorage.md",
"drive.md",
"googlephotos.md",
"hasher.md",
"hdfs.md",
"http.md",
"hubic.md",
"jottacloud.md",
"koofr.md",
"mailru.md",
"mega.md",
"memory.md",
"azureblob.md",
"onedrive.md",
"opendrive.md",
"qingstor.md",
"sia.md",
"swift.md",
"pcloud.md",
"premiumizeme.md",
"putio.md",
"seafile.md",
"sftp.md",
"sugarsync.md",
"tardigrade.md",
"uptobox.md",
"union.md",
"webdav.md",
"yandex.md",
"zoho.md",
"local.md",
"changelog.md",
"bugs.md",
"faq.md",
"licence.md",
"authors.md",
"contact.md",
]
# Order to put the commands in - any not on here will be in sorted order
commands_order = [
"rclone_config.md",
"rclone_copy.md",
"rclone_sync.md",
"rclone_move.md",
"rclone_delete.md",
"rclone_purge.md",
"rclone_mkdir.md",
"rclone_rmdir.md",
"rclone_check.md",
"rclone_ls.md",
"rclone_lsd.md",
"rclone_lsl.md",
"rclone_md5sum.md",
"rclone_sha1sum.md",
"rclone_size.md",
"rclone_version.md",
"rclone_cleanup.md",
"rclone_dedupe.md",
]
# Docs which aren't made into outfile
ignore_docs = [
"downloads.md",
"privacy.md",
"donate.md",
]
def read_doc(doc):
"""Read file as a string"""
path = os.path.join(docpath, doc)
with open(path) as fd:
contents = fd.read()
parts = contents.split("---\n", 2)
if len(parts) != 3:
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
contents = parts[2].strip()+"\n\n"
# Remove icons
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
# Interpret img shortcodes
# {{< img ... >}}
contents = re.sub(r'\{\{<\s*img\s+(.*?)>\}\}', r"<img \1>", contents)
# Make any img tags absolute
contents = re.sub(r'(<img.*?src=")/', r"\1https://rclone.org/", contents)
# Make [...](/links/) absolute
contents = re.sub(r'\]\((\/.*?\/(#.*)?)\)', r"](https://rclone.org\1)", contents)
# Add additional links on the front page
contents = re.sub(r'\{\{< rem MAINPAGELINK >\}\}', "- [Donate.](https://rclone.org/donate/)", contents)
# Interpret provider shortcode
# {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
contents = re.sub(r'\{\{<\s*provider.*?name="(.*?)".*?>\}\}', r"- \1", contents)
# Remove remaining shortcodes
contents = re.sub(r'\{\{<.*?>\}\}', r"", contents)
contents = re.sub(r'\{\{%.*?%\}\}', r"", contents)
return contents
def check_docs(docpath):
"""Check all the docs are in docpath"""
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
files -= set(ignore_docs)
docs_set = set(docs)
if files == docs_set:
return
print("Files on disk but not in docs variable: %s" % ", ".join(files - docs_set))
print("Files in docs variable but not on disk: %s" % ", ".join(docs_set - files))
raise ValueError("Missing files")
def read_command(command):
doc = read_doc("commands/"+command)
doc = re.sub(r"### Options inherited from parent commands.*$", "", doc, 0, re.S)
doc = doc.strip()+"\n"
return doc
def read_commands(docpath):
"""Reads the commands an makes them into a single page"""
files = set(f for f in os.listdir(docpath + "/commands") if f.endswith(".md"))
docs = []
for command in commands_order:
docs.append(read_command(command))
files.remove(command)
for command in sorted(files):
if command != "rclone.md":
docs.append(read_command(command))
return "\n".join(docs)
def main():
check_docs(docpath)
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
build_date = datetime.utcfromtimestamp(
int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
with open(outfile, "w") as out:
out.write("""\
%% rclone(1) User Manual
%% Nick Craig-Wood
%% %s
""" % build_date.strftime("%b %d, %Y"))
for doc in docs:
contents = read_doc(doc)
# Substitute the commands into doc.md
if doc == "docs.md":
contents = re.sub(r"The main rclone commands.*?for the full list.", command_docs, contents, 0, re.S)
out.write(contents)
print("Written '%s'" % outfile)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"SOURCE_DATE_EPOCH"
] |
[]
|
["SOURCE_DATE_EPOCH"]
|
python
| 1 | 0 | |
code/src/features.py
|
import pandas as pd
import numpy as np
import os, sys, gc, random
import datetime
import dateutil.relativedelta
# Machine learning
from sklearn.preprocessing import LabelEncoder
from sklearn.impute import SimpleImputer
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
# Custom library
from utils import seed_everything, print_score
TOTAL_THRES = 300 # 구매액 임계값
SEED = 42 # 랜덤 시드
seed_everything(SEED) # 시드 고정
data_dir = '../input/train.csv' # os.environ['SM_CHANNEL_TRAIN']
model_dir = '../model' # os.environ['SM_MODEL_DIR']
'''
입력인자로 받는 year_month에 대해 고객 ID별로 총 구매액이
구매액 임계값을 넘는지 여부의 binary label을 생성하는 함수
'''
def generate_label(df, year_month, total_thres=TOTAL_THRES, print_log=False):
df = df.copy()
# year_month에 해당하는 label 데이터 생성
df['year_month'] = df['order_date'].dt.strftime('%Y-%m')
df.reset_index(drop=True, inplace=True)
# year_month 이전 월의 고객 ID 추출
cust = df[df['year_month']<year_month]['customer_id'].unique()
# year_month에 해당하는 데이터 선택
df = df[df['year_month']==year_month]
# label 데이터프레임 생성
label = pd.DataFrame({'customer_id':cust})
label['year_month'] = year_month
# year_month에 해당하는 고객 ID의 구매액의 합 계산
grped = df.groupby(['customer_id','year_month'], as_index=False)[['total']].sum()
# label 데이터프레임과 merge하고 구매액 임계값을 넘었는지 여부로 label 생성
label = label.merge(grped, on=['customer_id','year_month'], how='left')
label['total'].fillna(0.0, inplace=True)
label['label'] = (label['total'] > total_thres).astype(int)
# 고객 ID로 정렬
label = label.sort_values('customer_id').reset_index(drop=True)
if print_log: print(f'{year_month} - final label shape: {label.shape}')
return label
def feature_preprocessing(train, test, features, do_imputing=True):
x_tr = train.copy()
x_te = test.copy()
# 범주형 피처 이름을 저장할 변수
cate_cols = []
# 레이블 인코딩
for f in features:
if x_tr[f].dtype.name == 'object': # 데이터 타입이 object(str)이면 레이블 인코딩
cate_cols.append(f)
le = LabelEncoder()
# train + test 데이터를 합쳐서 레이블 인코딩 함수에 fit
le.fit(list(x_tr[f].values) + list(x_te[f].values))
# train 데이터 레이블 인코딩 변환 수행
x_tr[f] = le.transform(list(x_tr[f].values))
# test 데이터 레이블 인코딩 변환 수행
x_te[f] = le.transform(list(x_te[f].values))
print('categorical feature:', cate_cols)
if do_imputing:
# 중위값으로 결측치 채우기
imputer = SimpleImputer(strategy='median')
x_tr[features] = imputer.fit_transform(x_tr[features])
x_te[features] = imputer.transform(x_te[features])
return x_tr, x_te
def feature_engineering(df, year_month):
df = df.copy()
# year_month 이전 월 계산
d = datetime.datetime.strptime(year_month, "%Y-%m")
prev_ym = d - dateutil.relativedelta.relativedelta(months=1)
prev_ym = prev_ym.strftime('%Y-%m')
# train, test 데이터 선택
train = df[df['order_date'] < prev_ym]
test = df[df['order_date'] < year_month]
# train, test 레이블 데이터 생성
train_label = generate_label(df, prev_ym)[['customer_id','year_month','label']]
test_label = generate_label(df, year_month)[['customer_id','year_month','label']]
# group by aggregation 함수 선언
agg_func = ['mean','max','min','sum','count','std','skew']
all_train_data = pd.DataFrame()
for i, tr_ym in enumerate(train_label['year_month'].unique()):
# group by aggretation 함수로 train 데이터 피처 생성
train_agg = train.loc[train['order_date'] < tr_ym].groupby(['customer_id']).agg(agg_func)
# 멀티 레벨 컬럼을 사용하기 쉽게 1 레벨 컬럼명으로 변경
new_cols = []
for col in train_agg.columns.levels[0]:
for stat in train_agg.columns.levels[1]:
new_cols.append(f'{col}-{stat}')
train_agg.columns = new_cols
train_agg.reset_index(inplace = True)
train_agg['year_month'] = tr_ym
all_train_data = all_train_data.append(train_agg)
all_train_data = train_label.merge(all_train_data, on=['customer_id', 'year_month'], how='left')
features = all_train_data.drop(columns=['customer_id', 'label', 'year_month']).columns
# group by aggretation 함수로 test 데이터 피처 생성
test_agg = test.groupby(['customer_id']).agg(agg_func)
test_agg.columns = new_cols
test_data = test_label.merge(test_agg, on=['customer_id'], how='left')
# train, test 데이터 전처리
x_tr, x_te = feature_preprocessing(all_train_data, test_data, features)
print('x_tr.shape', x_tr.shape, ', x_te.shape', x_te.shape)
return x_tr, x_te, all_train_data['label'], features
def feature_engineering1(df, year_month):
df = df.copy()
# customer_id 기준으로 pandas group by 후 total, quantity, price 누적합 계산
df['cumsum_total_by_cust_id'] = df.groupby(['customer_id'])['total'].cumsum()
df['cumsum_quantity_by_cust_id'] = df.groupby(['customer_id'])['quantity'].cumsum()
df['cumsum_price_by_cust_id'] = df.groupby(['customer_id'])['price'].cumsum()
# product_id 기준으로 pandas group by 후 total, quantity, price 누적합 계산
df['cumsum_total_by_prod_id'] = df.groupby(['product_id'])['total'].cumsum()
df['cumsum_quantity_by_prod_id'] = df.groupby(['product_id'])['quantity'].cumsum()
df['cumsum_price_by_prod_id'] = df.groupby(['product_id'])['price'].cumsum()
# order_id 기준으로 pandas group by 후 total, quantity, price 누적합 계산
df['cumsum_total_by_order_id'] = df.groupby(['order_id'])['total'].cumsum()
df['cumsum_quantity_by_order_id'] = df.groupby(['order_id'])['quantity'].cumsum()
df['cumsum_price_by_order_id'] = df.groupby(['order_id'])['price'].cumsum()
# year_month 이전 월 계산
d = datetime.datetime.strptime(year_month, "%Y-%m")
prev_ym = d - dateutil.relativedelta.relativedelta(months=1)
prev_ym = prev_ym.strftime('%Y-%m')
# train, test 데이터 선택
train = df[df['order_date'] < prev_ym]
test = df[df['order_date'] < year_month]
# train, test 레이블 데이터 생성
train_label = generate_label(df, prev_ym)[['customer_id', 'year_month', 'label']]
test_label = generate_label(df, year_month)[['customer_id', 'year_month', 'label']]
# group by aggregation 함수 선언
agg_func = ['mean', 'max', 'min', 'sum', 'count', 'std', 'skew']
agg_dict = {
'quantity': agg_func,
'price': agg_func,
'total': agg_func,
'cumsum_total_by_cust_id': agg_func,
'cumsum_quantity_by_cust_id': agg_func,
'cumsum_price_by_cust_id': agg_func,
'cumsum_total_by_prod_id': agg_func,
'cumsum_quantity_by_prod_id': agg_func,
'cumsum_price_by_prod_id': agg_func,
'cumsum_total_by_order_id': agg_func,
'cumsum_quantity_by_order_id': agg_func,
'cumsum_price_by_order_id': agg_func,
'order_id': ['nunique'],
'product_id': ['nunique'],
}
all_train_data = pd.DataFrame()
for i, tr_ym in enumerate(train_label['year_month'].unique()):
# group by aggretation 함수로 train 데이터 피처 생성
train_agg = train.loc[train['order_date'] < tr_ym].groupby(['customer_id']).agg(agg_dict)
new_cols = []
for col in agg_dict.keys():
for stat in agg_dict[col]:
if type(stat) is str:
new_cols.append(f'{col}-{stat}')
else:
new_cols.append(f'{col}-mode')
train_agg.columns = new_cols
train_agg.reset_index(inplace=True)
train_agg['year_month'] = tr_ym
all_train_data = all_train_data.append(train_agg)
all_train_data = train_label.merge(all_train_data, on=['customer_id', 'year_month'], how='left')
features = all_train_data.drop(columns=['customer_id', 'label', 'year_month']).columns
# group by aggretation 함수로 test 데이터 피처 생성
test_agg = test.groupby(['customer_id']).agg(agg_dict)
test_agg.columns = new_cols
test_data = test_label.merge(test_agg, on=['customer_id'], how='left')
# train, test 데이터 전처리
x_tr, x_te = feature_preprocessing(all_train_data, test_data, features)
print('x_tr.shape', x_tr.shape, ', x_te.shape', x_te.shape)
return x_tr, x_te, all_train_data['label'], features
# def get_year_month_list(df, year_month):
# df = df.copy()
#
# df['year_month-mode'] = df['order_date'].dt.strftime('%Y-%m')
# dd = df.groupby(['year_month-mode', 'customer_id'])['total'].sum()
# cust_ids = df['customer_id'].unique()
#
# # year_month 이전 월 계산
# bef_12_d = datetime.datetime.strptime(year_month, "%Y-%m")
# bef_12_prev_ym = bef_12_d - dateutil.relativedelta.relativedelta(months=12)
# bef_12_prev_ym = bef_12_prev_ym.strftime('%Y-%m')
#
# # ddt = df[df['year_month-mode'] == bef_12_prev_ym]
#
# first_bef = []
# for id in cust_ids:
# dd[:, bef_12_prev_ym]
# # first_bef.append(dd.xs((id, bef_12_prev_ym)))
#
# # df['cycle_month'] = pd.Series(first_bef)
#
# print(df)
def make_time_series_data(df, Input, year_month, stand):
# 기준을 잡습니다. 기준은 여기서 %Y-%m 입니다.
standard = ['customer_id'] + [stand]
data = Input.copy()
df = df.copy()
data[stand] = pd.to_datetime(df['order_date']).dt.strftime(stand)
data.order_date = pd.to_datetime(data['order_date'])
# 월단위의 틀을 만들어주고, 기준으로 aggregation을 해준 다음에 merge를 해줄 것입니다
times = pd.date_range('2009-12-01', periods=(data.order_date.max() - data.order_date.min()).days + 1, freq='1d')
customerid_frame = np.repeat(data.customer_id.unique(), len(times))
date_frame = np.tile(times, len(data.customer_id.unique()))
frame = pd.DataFrame({'customer_id': customerid_frame, 'order_date': date_frame})
frame[stand] = pd.to_datetime(frame.order_date).dt.strftime(stand)
# group by
data_group = data.groupby(standard).sum().reset_index()
frame_group = frame.groupby(standard).count().reset_index().drop(['order_date'], axis=1)
# merge
merge = pd.merge(frame_group, data_group, on=standard, how='left').fillna(0)
merge = merge.rename(columns={stand: 'standard'})
merge_test = merge[merge['standard'] == year_month].drop(columns=['standard', 'quantity', 'price']) #.drop(merge.columns.tolist() - ['customer_id', 'total'])
return merge_test
def add_trend(df, year_month):
df = df.copy()
df['year_month'] = df['order_date'].dt.strftime('%Y-%m')
# year_month 이전 월 계산
d = datetime.datetime.strptime(year_month, "%Y-%m")
prev_ym = d - dateutil.relativedelta.relativedelta(months=1)
# train과 test 데이터 생성
train = df[df['order_date'] < prev_ym] # 2009-12부터 2011-10 데이터 추출
test = df[df['order_date'] < year_month] # 2009-12부터 2011-11 데이터 추출
train_window_ym = []
test_window_ym = []
for month_back in [1, 2, 3, 5, 7, 12, 20, 23]: # 1개월, 2개월, ... 20개월, 23개월 전 year_month 파악
train_window_ym.append((prev_ym - dateutil.relativedelta.relativedelta(months=month_back)).strftime('%Y-%m'))
test_window_ym.append((d - dateutil.relativedelta.relativedelta(months=month_back)).strftime('%Y-%m'))
# aggregation 함수 선언
agg_func = ['max', 'min', 'sum', 'mean', 'count', 'std', 'skew']
# group by aggregation with Dictionary
agg_dict = {
'quantity': agg_func,
'price': agg_func,
'total': agg_func,
}
# general statistics for train data with time series trend
for i, tr_ym in enumerate(train_window_ym):
# group by aggretation 함수로 train 데이터 피처 생성
train_agg = train.loc[train['year_month'] >= tr_ym].groupby(['customer_id']).agg(
agg_dict) # 해당 year_month 이후부터 모든 데이터에 대한 aggregation을 실시
# 멀티 레벨 컬럼을 사용하기 쉽게 1 레벨 컬럼명으로 변경
new_cols = []
for level1, level2 in train_agg.columns:
new_cols.append(f'{level1}-{level2}-{i}')
train_agg.columns = new_cols
train_agg.reset_index(inplace=True)
if i == 0:
train_data = train_agg
else:
train_data = train_data.merge(train_agg, on=['customer_id'], how='right')
# general statistics for test data with time series trend
for i, tr_ym in enumerate(test_window_ym):
# group by aggretation 함수로 test 데이터 피처 생성
test_agg = test.loc[test['year_month'] >= tr_ym].groupby(['customer_id']).agg(agg_dict)
# 멀티 레벨 컬럼을 사용하기 쉽게 1 레벨 컬럼명으로 변경
new_cols = []
for level1, level2 in test_agg.columns:
new_cols.append(f'{level1}-{level2}-{i}')
test_agg.columns = new_cols
test_agg.reset_index(inplace=True)
if i == 0:
test_data = test_agg
else:
test_data = test_data.merge(test_agg, on=['customer_id'], how='right')
return train_data, test_data
def add_seasonality(df, year_month):
df = df.copy()
df['year_month'] = df['order_date'].dt.strftime('%Y-%m')
# year_month 이전 월 계산
d = datetime.datetime.strptime(year_month, "%Y-%m")
prev_ym = d - dateutil.relativedelta.relativedelta(months=1)
# train과 test 데이터 생성
train = df[df['order_date'] < prev_ym] # 2009-12부터 2011-10 데이터 추출
test = df[df['order_date'] < year_month] # 2009-12부터 2011-11 데이터 추출
train_window_ym = []
test_window_ym = []
for month_back in [1, 6, 12, 18]: # 각 주기성을 파악하고 싶은 구간을 생성
train_window_ym.append(
(
(prev_ym - dateutil.relativedelta.relativedelta(months=month_back)).strftime('%Y-%m'),
(prev_ym - dateutil.relativedelta.relativedelta(months=month_back + 2)).strftime('%Y-%m')
# 1~3, 6~8, 12~14, 18~20 Pair를 만들어준다
)
)
test_window_ym.append(
(
(d - dateutil.relativedelta.relativedelta(months=month_back)).strftime('%Y-%m'),
(d - dateutil.relativedelta.relativedelta(months=month_back + 2)).strftime('%Y-%m')
)
)
# aggregation 함수 선언
agg_func = ['max', 'min', 'sum', 'mean', 'count', 'std', 'skew']
# group by aggregation with Dictionary
agg_dict = {
'quantity': agg_func,
'price': agg_func,
'total': agg_func,
}
# seasonality for train data with time series
for i, (tr_ym, tr_ym_3) in enumerate(train_window_ym):
# group by aggretation 함수로 train 데이터 피처 생성
# 구간 사이에 존재하는 월들에 대해서 aggregation을 진행
train_agg = train.loc[(train['year_month'] >= tr_ym_3) & (train['year_month'] <= tr_ym)].groupby(
['customer_id']).agg(agg_dict)
# 멀티 레벨 컬럼을 사용하기 쉽게 1 레벨 컬럼명으로 변경
new_cols = []
for level1, level2 in train_agg.columns:
new_cols.append(f'{level1}-{level2}-season{i}')
train_agg.columns = new_cols
train_agg.reset_index(inplace=True)
if i == 0:
train_data = train_agg
else:
train_data = train_data.merge(train_agg, on=['customer_id'], how='right')
# seasonality for test data with time series
for i, (tr_ym, tr_ym_3) in enumerate(test_window_ym):
# group by aggretation 함수로 train 데이터 피처 생성
test_agg = test.loc[(test['year_month'] >= tr_ym_3) & (test['year_month'] <= tr_ym)].groupby(
['customer_id']).agg(agg_dict)
# 멀티 레벨 컬럼을 사용하기 쉽게 1 레벨 컬럼명으로 변경
new_cols = []
for level1, level2 in test_agg.columns:
new_cols.append(f'{level1}-{level2}-season{i}')
test_agg.columns = new_cols
test_agg.reset_index(inplace=True)
if i == 0:
test_data = test_agg
else:
test_data = test_data.merge(test_agg, on=['customer_id'], how='right')
return train_data, test_data
def feature_engineering2(df, year_month):
df = df.copy()
# customer_id 기준으로 pandas group by 후 total, quantity, price 누적합 계산
df['cumsum_total_by_cust_id'] = df.groupby(['customer_id'])['total'].cumsum()
df['cumsum_quantity_by_cust_id'] = df.groupby(['customer_id'])['quantity'].cumsum()
df['cumsum_price_by_cust_id'] = df.groupby(['customer_id'])['price'].cumsum()
# product_id 기준으로 pandas group by 후 total, quantity, price 누적합 계산
df['cumsum_total_by_prod_id'] = df.groupby(['product_id'])['total'].cumsum()
df['cumsum_quantity_by_prod_id'] = df.groupby(['product_id'])['quantity'].cumsum()
df['cumsum_price_by_prod_id'] = df.groupby(['product_id'])['price'].cumsum()
# order_id 기준으로 pandas group by 후 total, quantity, price 누적합 계산
df['cumsum_total_by_order_id'] = df.groupby(['order_id'])['total'].cumsum()
df['cumsum_quantity_by_order_id'] = df.groupby(['order_id'])['quantity'].cumsum()
df['cumsum_price_by_order_id'] = df.groupby(['order_id'])['price'].cumsum()
# oredr_ts
df['order_ts'] = df['order_date'].astype(np.int64)//1e9
df['order_ts_diff'] = df.groupby(['customer_id'])['order_ts'].diff()
df['quantity_diff'] = df.groupby(['customer_id'])['quantity'].diff()
df['price_diff'] = df.groupby(['customer_id'])['price'].diff()
df['total_diff'] = df.groupby(['customer_id'])['total'].diff()
# mode
df['month-mode'] = df['order_date'].dt.month
df['year_month-mode'] = df['order_date'].dt.strftime('%Y-%m')
# oredr_ts_plus ===
df['order_ts_plus'] = df[df['total'] > 0]['order_date'].astype(np.int64) // 1e9
df['order_ts_plus_diff'] = df[df['total'] > 0].groupby(['customer_id'])['order_ts'].diff()
df['order_ts_plus'] = df['order_ts_plus'].fillna(0)
df['order_ts_plus_diff'] = df['order_ts_plus_diff'].fillna(0)
# df[~(df.order_id.str.contains('C'))].groupby(['customer_id'])['order_date'].last().astype(np.int64) // 1e9
# ================================================================================================
# year_month 이전 월 계산
d = datetime.datetime.strptime(year_month, "%Y-%m")
prev_ym = d - dateutil.relativedelta.relativedelta(months=1)
prev_ym = prev_ym.strftime('%Y-%m')
# train, test 데이터 선택
train = df[df['order_date'] < prev_ym]
test = df[df['order_date'] < year_month]
# train, test 레이블 데이터 생성
train_label = generate_label(df, prev_ym)[['customer_id', 'year_month', 'label']]
test_label = generate_label(df, year_month)[['customer_id', 'year_month', 'label']]
# ================================================================================================
# 연월 피처 생성
target = datetime.datetime.strptime('2011-11', "%Y-%m") # 타겟 연월
prev = target - dateutil.relativedelta.relativedelta(years=1) # 전년 연월
prev = prev.strftime('%Y-%m') # 문자열로 변환
groupby = train.groupby(['customer_id', 'year_month-mode'])['total'].sum() # 고객별, 월별 total 합
groupby = groupby.unstack() # 월별을 컬럼으로 변환
prev_pprev_total = groupby.loc[:, [prev]] # 전년, 전전년 데이터만 추출
prev_pprev_total = prev_pprev_total.fillna(0)
train_1224 = (prev_pprev_total['2010-11']) / 2
target = datetime.datetime.strptime('2011-12', "%Y-%m") # 타겟 연월
prev = target - dateutil.relativedelta.relativedelta(years=1) # 전년 연월
pprev = prev - dateutil.relativedelta.relativedelta(years=1) # 전전년 연월
prev, pprev = prev.strftime('%Y-%m'), pprev.strftime('%Y-%m') # 문자열로 변환
groupby = test.groupby(['customer_id', 'year_month-mode'])['total'].sum() # 고객별, 월별 total 합
groupby = groupby.unstack() # 월별을 컬럼으로 변환
prev_pprev_total = groupby.loc[:, [prev, pprev]] # 전년, 전전년 데이터만 추출
prev_pprev_total = prev_pprev_total.fillna(0)
test_1224 = (prev_pprev_total['2010-12'] + prev_pprev_total['2009-12']) / 2
# ================================================================================================
# lambda 식
mode_f = lambda x: x.value_counts().index[0]
# group by aggregation 함수 선언
agg_func = ['mean', 'max', 'min', 'sum', 'count', 'std', 'skew']
# agg_func = ['mean', 'max'] # , 'min', 'sum', 'count', 'std', 'skew']
agg_dict = {
'order_ts': ['first', 'last'],
'order_ts_diff': agg_func,
'order_ts_plus': ['first', 'last'],
'order_ts_plus_diff': agg_func,
'quantity_diff': agg_func,
'price_diff': agg_func,
'total_diff': agg_func,
'quantity': agg_func,
'price': agg_func,
'total': agg_func,
'cumsum_total_by_cust_id': agg_func,
'cumsum_quantity_by_cust_id': agg_func,
'cumsum_price_by_cust_id': agg_func,
'cumsum_total_by_prod_id': agg_func,
'cumsum_quantity_by_prod_id': agg_func,
'cumsum_price_by_prod_id': agg_func,
'cumsum_total_by_order_id': agg_func,
'cumsum_quantity_by_order_id': agg_func,
'cumsum_price_by_order_id': agg_func,
'order_id': ['nunique'],
'product_id': ['nunique'],
'month-mode': [mode_f],
'year_month-mode': [mode_f],
}
all_train_data = pd.DataFrame()
for i, tr_ym in enumerate(train_label['year_month'].unique()):
# group by aggretation 함수로 train 데이터 피처 생성
train_agg = train.loc[train['order_date'] < tr_ym].groupby(['customer_id']).agg(agg_dict)
new_cols = []
for col in agg_dict.keys():
for stat in agg_dict[col]:
if type(stat) is str:
new_cols.append(f'{col}-{stat}')
else:
new_cols.append(f'{col}-mode')
train_agg.columns = new_cols
train_agg.reset_index(inplace=True)
train_agg['year_month'] = tr_ym
all_train_data = all_train_data.append(train_agg)
all_train_data = train_label.merge(all_train_data, on=['customer_id', 'year_month'], how='left')
all_train_data['cycle_1224'] = train_1224.to_numpy()
# ================================================================================================
data = pd.read_csv("/opt/ml/code/input/train.csv", parse_dates=["order_date"])
# # baseline feature engineering
# train, test, y, features = feature_engineering(data, '2011-12')
# trend
train_t, test_t = add_trend(data, year_month='2011-12')
# seasonality
train_s, test_s = add_seasonality(data, year_month='2011-12')
# train 데이터 병합
all_train_data = all_train_data.merge(train_t, on=['customer_id'], how='left')
all_train_data = all_train_data.merge(train_s, on=['customer_id'], how='left')
all_train_data = all_train_data.fillna(0)
# ================================================================================================
features = all_train_data.drop(columns=['customer_id', 'label', 'year_month']).columns
print(features.shape)
import csv
with open("../output/feature.csv", 'w', newline='') as f:
writer = csv.writer(f)
for items in features.tolist():
print(items)
writer.writerow([items])
test_agg = test.groupby(['customer_id']).agg(agg_dict)
test_agg.columns = new_cols
test_agg['cycle_1224'] = test_1224
test_data = test_label.merge(test_agg, on=['customer_id'], how='left')
# test 데이터 병합 ===================================================================================
test_data = test_data.merge(test_t, on=['customer_id'], how='left')
test_data = test_data.merge(test_s, on=['customer_id'], how='left')
test_data = test_data.fillna(0)
# train, test 데이터 전처리
print(all_train_data.shape)
print(test_data.shape)
x_tr, x_te = feature_preprocessing(all_train_data, test_data, features)
print('x_tr.shape', x_tr.shape, ', x_te.shape', x_te.shape)
return x_tr, x_te, all_train_data['label'], features
def feature_engineering3(df, year_month):
my_pick = [
'order_ts-last',
'order_ts-first',
'price_diff-skew',
'price-skew',
'order_ts_diff-max',
'quantity_diff-skew',
'cumsum_total_by_prod_id-skew',
'cumsum_price_by_prod_id-skew',
'cumsum_total_by_cust_id-skew',
'cumsum_quantity_by_prod_id-sum',
'quantity-skew',
'cumsum_total_by_order_id-skew',
'cumsum_price_by_cust_id-skew',
'cumsum_price_by_order_id-skew',
'year_month-mode',
'total_diff-skew',
'price-mean',
'cumsum_quantity_by_order_id-skew',
'cumsum_quantity_by_prod_id-skew',
'price_diff-mean',
]
df = df.copy()
# customer_id 기준으로 pandas group by 후 total, quantity, price 누적합 계산
df['cumsum_total_by_cust_id'] = df.groupby(['customer_id'])['total'].cumsum()
df['cumsum_quantity_by_cust_id'] = df.groupby(['customer_id'])['quantity'].cumsum()
df['cumsum_price_by_cust_id'] = df.groupby(['customer_id'])['price'].cumsum()
# product_id 기준으로 pandas group by 후 total, quantity, price 누적합 계산
df['cumsum_total_by_prod_id'] = df.groupby(['product_id'])['total'].cumsum()
df['cumsum_quantity_by_prod_id'] = df.groupby(['product_id'])['quantity'].cumsum()
df['cumsum_price_by_prod_id'] = df.groupby(['product_id'])['price'].cumsum()
# order_id 기준으로 pandas group by 후 total, quantity, price 누적합 계산
df['cumsum_total_by_order_id'] = df.groupby(['order_id'])['total'].cumsum()
df['cumsum_quantity_by_order_id'] = df.groupby(['order_id'])['quantity'].cumsum()
df['cumsum_price_by_order_id'] = df.groupby(['order_id'])['price'].cumsum()
# oredr_ts
df['order_ts'] = df['order_date'].astype(np.int64)//1e9
df['order_ts_diff'] = df.groupby(['customer_id'])['order_ts'].diff()
df['quantity_diff'] = df.groupby(['customer_id'])['quantity'].diff()
df['price_diff'] = df.groupby(['customer_id'])['price'].diff()
df['total_diff'] = df.groupby(['customer_id'])['total'].diff()
# mode
df['month-mode'] = df['order_date'].dt.month
df['year_month-mode'] = df['order_date'].dt.strftime('%Y-%m')
# oredr_ts_plus ===
df['order_ts_plus'] = df[df['total'] > 0]['order_date'].astype(np.int64) // 1e9
df['order_ts_plus_diff'] = df[df['total'] > 0].groupby(['customer_id'])['order_ts'].diff()
df['order_ts_plus'] = df['order_ts_plus'].fillna(0)
df['order_ts_plus_diff'] = df['order_ts_plus_diff'].fillna(0)
# df[~(df.order_id.str.contains('C'))].groupby(['customer_id'])['order_date'].last().astype(np.int64) // 1e9
# ================================================================================================
# year_month 이전 월 계산
d = datetime.datetime.strptime(year_month, "%Y-%m")
prev_ym = d - dateutil.relativedelta.relativedelta(months=1)
prev_ym = prev_ym.strftime('%Y-%m')
# train, test 데이터 선택
train = df[df['order_date'] < prev_ym]
test = df[df['order_date'] < year_month]
# train, test 레이블 데이터 생성
train_label = generate_label(df, prev_ym)[['customer_id', 'year_month', 'label']]
test_label = generate_label(df, year_month)[['customer_id', 'year_month', 'label']]
####################################################################################
# year_month 이전 월 계산
bef_12_d1 = datetime.datetime.strptime(year_month, "%Y-%m")
bef_12_prev_ym1 = bef_12_d1 - dateutil.relativedelta.relativedelta(months=12)
bef_12_prev_ym1 = bef_12_prev_ym1.strftime('%Y-%m')
merge_df_12_train = make_time_series_data(train, train, bef_12_prev_ym1, "%Y-%m")
print(bef_12_prev_ym1)
bef_24_d1 = datetime.datetime.strptime(year_month, "%Y-%m")
bef_24_prev_ym1 = bef_24_d1 - dateutil.relativedelta.relativedelta(months=24)
bef_24_prev_ym1 = bef_24_prev_ym1.strftime('%Y-%m')
merge_df_24_train = make_time_series_data(train, train, bef_24_prev_ym1, "%Y-%m")
print(bef_24_prev_ym1)
merge_1224_train = merge_df_24_train.merge(merge_df_12_train, on=['customer_id'], how='left')
series_1224_train = (merge_1224_train['total_x'] + merge_1224_train['total_y']) / 2
####################################################################################
# year_month 이전 월 계산
bef_12_d2 = datetime.datetime.strptime(prev_ym, "%Y-%m")
bef_12_prev_ym2 = bef_12_d2 - dateutil.relativedelta.relativedelta(months=12)
bef_12_prev_ym2 = bef_12_prev_ym2.strftime('%Y-%m')
merge_df_12_test = make_time_series_data(test, test, bef_12_prev_ym2, "%Y-%m")
print(bef_12_prev_ym2)
bef_24_d2 = datetime.datetime.strptime(prev_ym, "%Y-%m")
bef_24_prev_ym2 = bef_24_d2 - dateutil.relativedelta.relativedelta(months=24)
bef_24_prev_ym2 = bef_24_prev_ym2.strftime('%Y-%m')
merge_df_24_test = make_time_series_data(test, test, bef_24_prev_ym2, "%Y-%m")
print(bef_24_prev_ym2)
merge_1224_test = merge_df_24_test.merge(merge_df_12_test, on=['customer_id'], how='left')
series_1224_test = (merge_1224_test['total_x'] + merge_1224_test['total_y']) / 2
####################################################################################
# lambda 식
mode_f = lambda x: x.value_counts().index[0]
# group by aggregation 함수 선언
# agg_func = ['mean', 'max', 'min', 'sum', 'count', 'std', 'skew']
agg_func = ['mean', 'max'] # , 'min', 'sum', 'count', 'std', 'skew']
agg_dict = {
'order_ts': ['first', 'last'],
'order_ts_diff': agg_func,
# 'order_ts_plus': ['first', 'last'],
# 'order_ts_plus_diff': agg_func,
# 'quantity_diff': agg_func,
# 'price_diff': agg_func,
# 'total_diff': agg_func,
# 'quantity': agg_func,
# 'price': agg_func,
# 'total': agg_func,
# 'cumsum_total_by_cust_id': agg_func,
# 'cumsum_quantity_by_cust_id': agg_func,
# 'cumsum_price_by_cust_id': agg_func,
# 'cumsum_total_by_prod_id': agg_func,
# 'cumsum_quantity_by_prod_id': agg_func,
# 'cumsum_price_by_prod_id': agg_func,
# 'cumsum_total_by_order_id': agg_func,
# 'cumsum_quantity_by_order_id': agg_func,
# 'cumsum_price_by_order_id': agg_func,
# 'order_id': ['nunique'],
# 'product_id': ['nunique'],
# 'month-mode': [mode_f],
# 'year_month-mode': [mode_f],
}
all_train_data = pd.DataFrame()
for i, tr_ym in enumerate(train_label['year_month'].unique()):
# group by aggretation 함수로 train 데이터 피처 생성
train_agg = train.loc[train['order_date'] < tr_ym].groupby(['customer_id']).agg(agg_dict)
new_cols = []
for col in agg_dict.keys():
for stat in agg_dict[col]:
if type(stat) is str:
new_cols.append(f'{col}-{stat}')
else:
new_cols.append(f'{col}-mode')
train_agg.columns = new_cols
train_agg.reset_index(inplace=True)
train_agg['year_month'] = tr_ym
all_train_data = all_train_data.append(train_agg)
all_train_data = train_label.merge(all_train_data, on=['customer_id', 'year_month'], how='left')
all_train_data['cycle_1224'] = series_1224_train
features = all_train_data.drop(columns=['customer_id', 'label', 'year_month']).columns
import csv
with open("../output/feature.csv", 'w', newline='') as f:
writer = csv.writer(f)
for items in features.tolist():
print(items)
writer.writerow([items])
test_agg = test.groupby(['customer_id']).agg(agg_dict)
test_agg.columns = new_cols
test_agg['cycle_1224'] = series_1224_test
test_data = test_label.merge(test_agg, on=['customer_id'], how='left')
# train, test 데이터 전처리
x_tr, x_te = feature_preprocessing(all_train_data, test_data, features)
# x_tr = x_tr[my_pick]
# x_te = x_te[my_pick]
# features = pd.Index(my_pick)
print('x_tr.shape', x_tr.shape, ', x_te.shape', x_te.shape)
return x_tr, x_te, all_train_data['label'], features
if __name__ == '__main__':
print('data_dir', data_dir)
|
[] |
[] |
[
"SM_CHANNEL_TRAIN",
"SM_MODEL_DIR"
] |
[]
|
["SM_CHANNEL_TRAIN", "SM_MODEL_DIR"]
|
python
| 2 | 0 | |
auth_test.go
|
package main
import "testing"
func TestGetAuthToken(t *testing.T) {
cred := new(credentials)
cred.baseurl = "http://192.168.99.100:8085"
cred.username = "admin"
cred.password = "password"
resp := getAuthToken(*cred)
if resp == "" {
t.Error("Error")
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
test/functional/test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
# 20 minutes represented in seconds
TRAVIS_TIMEOUT_DURATION = 20 * 60
BASE_SCRIPTS = [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'feature_block.py',
'rpc_fundrawtransaction.py',
# FIXME: Reenable when it supports always-segit.
#'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_labels.py',
'p2p_segwit.py',
'wallet_dump.py',
'wallet_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
# FIXME: Enable once we activate BIP9.
#'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py',
# FIXME: Debug and re-enable.
#'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'wallet_disableprivatekeys.py',
'wallet_disableprivatekeys.py --usecli',
'interface_http.py',
'rpc_psbt.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'wallet_groups.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'mining_prioritisetransaction.py',
'p2p_invalid_block.py',
'p2p_invalid_tx.py',
'rpc_createmultisig.py',
# FIXME: Reenable and possibly fix once the BIP9 mining is activated.
#'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'rpc_zmq.py',
'rpc_signmessage.py',
'feature_nulldummy.py',
'mempool_accept.py',
'wallet_import_rescan.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_fallbackfee.py',
'feature_minchainwork.py',
'rpc_getblockstats.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'p2p_unrequested_blocks.py',
'feature_includeconf.py',
'rpc_scantxoutset.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_blocksdir.py',
'feature_config_args.py',
'feature_help.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
# auxpow tests
'auxpow_getwork.py',
'auxpow_getwork.py --segwit',
'auxpow_mining.py',
'auxpow_mining.py --segwit',
# name tests
'name_immature_inputs.py',
'name_ismine.py',
'name_list.py',
'name_listunspent.py',
'name_multisig.py',
'name_multisig.py --bip16-active',
'name_pending.py',
'name_rawtx.py',
'name_registration.py',
'name_reorg.py',
'name_scanning.py',
'name_sendcoins.py',
'name_wallet.py',
# Xaya-specific tests
'xaya_dualalgo.py',
'xaya_gameblocks.py',
'xaya_premine.py',
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
# vv Tests less than 20m vv
'feature_fee_estimation.py',
# vv Tests less than 5m vv
'feature_maxuploadtarget.py',
'mempool_packages.py',
'feature_dbcrash.py',
# vv Tests less than 2m vv
'feature_bip68_sequence.py',
'mining_getblocktemplate_longpoll.py',
'p2p_timeouts.py',
# vv Tests less than 60s vv
'p2p_feefilter.py',
# vv Tests less than 30s vv
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
]
# Tests that are currently being skipped (e. g., because of BIP9).
SKIPPED = [
'feature_csv_activation.py',
'feature_versionbits_warning.py',
'p2p_compactblocks.py',
# Disabled, as they take too long with neoscrypt (they mine a lot of
# blocks). They are also not relevant, since all BIP34-activated forks
# are active from the start in Xaya.
'feature_dersig.py',
'feature_cltv.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/bitcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", test) + ".py" for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [re.sub("\.py$", "", test) + ".py" for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list,
config["environment"]["SRCDIR"],
config["environment"]["BUILDDIR"],
tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
)
def run_tests(test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "xayad"]) is not None:
print("%sWARNING!%s There is already a xayad process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if os.getenv('TRAVIS') == 'true' and int(time.time() - start_time) > TRAVIS_TIMEOUT_DURATION:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet|auxpow|name|xaya)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS + SKIPPED)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
[] |
[] |
[
"TRAVIS"
] |
[]
|
["TRAVIS"]
|
python
| 1 | 0 | |
libcloud/test/test_connection.py
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
import sys
import ssl
from mock import Mock, patch
import requests_mock
import libcloud.common.base
from libcloud.test import unittest
from libcloud.common.base import Connection, CertificateConnection
from libcloud.http import LibcloudBaseConnection
from libcloud.http import LibcloudConnection
from libcloud.http import SignedHTTPSAdapter
from libcloud.utils.misc import retry
from libcloud.utils.py3 import assertRaisesRegex
class BaseConnectionClassTestCase(unittest.TestCase):
def setUp(self):
self.orig_proxy = os.environ.pop('http_proxy', None)
def tearDown(self):
if self.orig_proxy:
os.environ['http_proxy'] = self.orig_proxy
elif 'http_proxy' in os.environ:
del os.environ['http_proxy']
libcloud.common.base.ALLOW_PATH_DOUBLE_SLASHES = False
@classmethod
def tearDownClass(cls):
if 'http_proxy' in os.environ:
del os.environ['http_proxy']
libcloud.common.base.ALLOW_PATH_DOUBLE_SLASHES = False
def test_parse_proxy_url(self):
conn = LibcloudBaseConnection()
proxy_url = 'http://127.0.0.1:3128'
result = conn._parse_proxy_url(proxy_url=proxy_url)
self.assertEqual(result[0], 'http')
self.assertEqual(result[1], '127.0.0.1')
self.assertEqual(result[2], 3128)
self.assertIsNone(result[3])
self.assertIsNone(result[4])
proxy_url = 'https://127.0.0.2:3129'
result = conn._parse_proxy_url(proxy_url=proxy_url)
self.assertEqual(result[0], 'https')
self.assertEqual(result[1], '127.0.0.2')
self.assertEqual(result[2], 3129)
self.assertIsNone(result[3])
self.assertIsNone(result[4])
proxy_url = 'http://user1:[email protected]:3128'
result = conn._parse_proxy_url(proxy_url=proxy_url)
self.assertEqual(result[0], 'http')
self.assertEqual(result[1], '127.0.0.1')
self.assertEqual(result[2], 3128)
self.assertEqual(result[3], 'user1')
self.assertEqual(result[4], 'pass1')
proxy_url = 'https://user1:[email protected]:3129'
result = conn._parse_proxy_url(proxy_url=proxy_url)
self.assertEqual(result[0], 'https')
self.assertEqual(result[1], '127.0.0.2')
self.assertEqual(result[2], 3129)
self.assertEqual(result[3], 'user1')
self.assertEqual(result[4], 'pass1')
proxy_url = 'http://127.0.0.1'
expected_msg = 'proxy_url must be in the following format'
assertRaisesRegex(self, ValueError, expected_msg,
conn._parse_proxy_url,
proxy_url=proxy_url)
proxy_url = 'http://@127.0.0.1:3128'
expected_msg = 'URL is in an invalid format'
assertRaisesRegex(self, ValueError, expected_msg,
conn._parse_proxy_url,
proxy_url=proxy_url)
proxy_url = 'http://[email protected]:3128'
expected_msg = 'URL is in an invalid format'
assertRaisesRegex(self, ValueError, expected_msg,
conn._parse_proxy_url,
proxy_url=proxy_url)
def test_constructor(self):
proxy_url = 'http://127.0.0.2:3128'
os.environ['http_proxy'] = proxy_url
conn = LibcloudConnection(host='localhost', port=80)
self.assertEqual(conn.proxy_scheme, 'http')
self.assertEqual(conn.proxy_host, '127.0.0.2')
self.assertEqual(conn.proxy_port, 3128)
self.assertEqual(conn.session.proxies, {
'http': 'http://127.0.0.2:3128',
'https': 'http://127.0.0.2:3128',
})
_ = os.environ.pop('http_proxy', None)
conn = LibcloudConnection(host='localhost', port=80)
self.assertIsNone(conn.proxy_scheme)
self.assertIsNone(conn.proxy_host)
self.assertIsNone(conn.proxy_port)
proxy_url = 'http://127.0.0.3:3128'
conn.set_http_proxy(proxy_url=proxy_url)
self.assertEqual(conn.proxy_scheme, 'http')
self.assertEqual(conn.proxy_host, '127.0.0.3')
self.assertEqual(conn.proxy_port, 3128)
self.assertEqual(conn.session.proxies, {
'http': 'http://127.0.0.3:3128',
'https': 'http://127.0.0.3:3128',
})
proxy_url = 'http://127.0.0.4:3128'
conn = LibcloudConnection(host='localhost', port=80,
proxy_url=proxy_url)
self.assertEqual(conn.proxy_scheme, 'http')
self.assertEqual(conn.proxy_host, '127.0.0.4')
self.assertEqual(conn.proxy_port, 3128)
self.assertEqual(conn.session.proxies, {
'http': 'http://127.0.0.4:3128',
'https': 'http://127.0.0.4:3128',
})
os.environ['http_proxy'] = proxy_url
proxy_url = 'http://127.0.0.5:3128'
conn = LibcloudConnection(host='localhost', port=80,
proxy_url=proxy_url)
self.assertEqual(conn.proxy_scheme, 'http')
self.assertEqual(conn.proxy_host, '127.0.0.5')
self.assertEqual(conn.proxy_port, 3128)
self.assertEqual(conn.session.proxies, {
'http': 'http://127.0.0.5:3128',
'https': 'http://127.0.0.5:3128',
})
os.environ['http_proxy'] = proxy_url
proxy_url = 'https://127.0.0.6:3129'
conn = LibcloudConnection(host='localhost', port=80,
proxy_url=proxy_url)
self.assertEqual(conn.proxy_scheme, 'https')
self.assertEqual(conn.proxy_host, '127.0.0.6')
self.assertEqual(conn.proxy_port, 3129)
self.assertEqual(conn.session.proxies, {
'http': 'https://127.0.0.6:3129',
'https': 'https://127.0.0.6:3129',
})
def test_connection_to_unusual_port(self):
conn = LibcloudConnection(host='localhost', port=8080)
self.assertIsNone(conn.proxy_scheme)
self.assertIsNone(conn.proxy_host)
self.assertIsNone(conn.proxy_port)
self.assertEqual(conn.host, 'http://localhost:8080')
conn = LibcloudConnection(host='localhost', port=80)
self.assertEqual(conn.host, 'http://localhost')
def test_connection_url_merging(self):
"""
Test that the connection class will parse URLs correctly
"""
conn = Connection(url='http://test.com/')
conn.connect()
self.assertEqual(conn.connection.host, 'http://test.com')
with requests_mock.mock() as m:
m.get('http://test.com/test', text='data')
response = conn.request('/test')
self.assertEqual(response.body, 'data')
def test_morph_action_hook(self):
conn = Connection(url="http://test.com")
conn.request_path = ''
self.assertEqual(conn.morph_action_hook('/test'), '/test')
self.assertEqual(conn.morph_action_hook('test'), '/test')
conn.request_path = '/v1'
self.assertEqual(conn.morph_action_hook('/test'), '/v1/test')
self.assertEqual(conn.morph_action_hook('test'), '/v1/test')
conn.request_path = '/v1'
self.assertEqual(conn.morph_action_hook('/test'), '/v1/test')
self.assertEqual(conn.morph_action_hook('test'), '/v1/test')
conn.request_path = 'v1'
self.assertEqual(conn.morph_action_hook('/test'), '/v1/test')
self.assertEqual(conn.morph_action_hook('test'), '/v1/test')
conn.request_path = 'v1/'
self.assertEqual(conn.morph_action_hook('/test'), '/v1/test')
self.assertEqual(conn.morph_action_hook('test'), '/v1/test')
conn.request_path = '/a'
self.assertEqual(conn.morph_action_hook('//b/c.txt'), '/a/b/c.txt')
conn.request_path = '/b'
self.assertEqual(conn.morph_action_hook('/foo//'), '/b/foo/')
libcloud.common.base.ALLOW_PATH_DOUBLE_SLASHES = True
conn.request_path = '/'
self.assertEqual(conn.morph_action_hook('/'), '//')
conn.request_path = ''
self.assertEqual(conn.morph_action_hook('/'), '/')
conn.request_path = '/a'
self.assertEqual(conn.morph_action_hook('//b/c.txt'), '/a//b/c.txt')
conn.request_path = '/b'
self.assertEqual(conn.morph_action_hook('/foo//'), '/b/foo//')
def test_connect_with_prefix(self):
"""
Test that a connection with a base path (e.g. /v1/) will
add the base path to requests
"""
conn = Connection(url='http://test.com/')
conn.connect()
conn.request_path = '/v1'
self.assertEqual(conn.connection.host, 'http://test.com')
with requests_mock.mock() as m:
m.get('http://test.com/v1/test', text='data')
response = conn.request('/test')
self.assertEqual(response.body, 'data')
def test_secure_connection_unusual_port(self):
"""
Test that the connection class will default to secure (https) even
when the port is an unusual (non 443, 80) number
"""
conn = Connection(secure=True, host='localhost', port=8081)
conn.connect()
self.assertEqual(conn.connection.host, 'https://localhost:8081')
conn2 = Connection(url='https://localhost:8081')
conn2.connect()
self.assertEqual(conn2.connection.host, 'https://localhost:8081')
def test_secure_by_default(self):
"""
Test that the connection class will default to secure (https)
"""
conn = Connection(host='localhost', port=8081)
conn.connect()
self.assertEqual(conn.connection.host, 'https://localhost:8081')
def test_implicit_port(self):
"""
Test that the port is not included in the URL if the protocol implies
the port, e.g. http implies 80
"""
conn = Connection(secure=True, host='localhost', port=443)
conn.connect()
self.assertEqual(conn.connection.host, 'https://localhost')
conn2 = Connection(secure=False, host='localhost', port=80)
conn2.connect()
self.assertEqual(conn2.connection.host, 'http://localhost')
def test_insecure_connection_unusual_port(self):
"""
Test that the connection will allow unusual ports and insecure
schemes
"""
conn = Connection(secure=False, host='localhost', port=8081)
conn.connect()
self.assertEqual(conn.connection.host, 'http://localhost:8081')
conn2 = Connection(url='http://localhost:8081')
conn2.connect()
self.assertEqual(conn2.connection.host, 'http://localhost:8081')
class ConnectionClassTestCase(unittest.TestCase):
def setUp(self):
self.originalConnect = Connection.connect
self.originalResponseCls = Connection.responseCls
Connection.connect = Mock()
Connection.responseCls = Mock()
Connection.allow_insecure = True
def tearDown(self):
Connection.connect = self.originalConnect
Connection.responseCls = Connection.responseCls
Connection.allow_insecure = True
def test_dont_allow_insecure(self):
Connection.allow_insecure = True
Connection(secure=False)
Connection.allow_insecure = False
expected_msg = (r'Non https connections are not allowed \(use '
r'secure=True\)')
assertRaisesRegex(self, ValueError, expected_msg, Connection,
secure=False)
def test_cache_busting(self):
params1 = {'foo1': 'bar1', 'foo2': 'bar2'}
params2 = [('foo1', 'bar1'), ('foo2', 'bar2')]
con = Connection()
con.connection = Mock()
con.pre_connect_hook = Mock()
con.pre_connect_hook.return_value = {}, {}
con.cache_busting = False
con.request(action='/path', params=params1)
args, kwargs = con.pre_connect_hook.call_args
self.assertFalse('cache-busting' in args[0])
self.assertEqual(args[0], params1)
con.request(action='/path', params=params2)
args, kwargs = con.pre_connect_hook.call_args
self.assertFalse('cache-busting' in args[0])
self.assertEqual(args[0], params2)
con.cache_busting = True
con.request(action='/path', params=params1)
args, kwargs = con.pre_connect_hook.call_args
self.assertTrue('cache-busting' in args[0])
con.request(action='/path', params=params2)
args, kwargs = con.pre_connect_hook.call_args
self.assertTrue('cache-busting' in args[0][len(params2)])
def test_context_is_reset_after_request_has_finished(self):
context = {'foo': 'bar'}
def responseCls(connection, response):
connection.called = True
self.assertEqual(connection.context, context)
con = Connection()
con.called = False
con.connection = Mock()
con.responseCls = responseCls
con.set_context(context)
self.assertEqual(con.context, context)
con.request('/')
# Context should have been reset
self.assertTrue(con.called)
self.assertEqual(con.context, {})
# Context should also be reset if a method inside request throws
con = Connection(timeout=1, retry_delay=0.1)
con.connection = Mock()
con.set_context(context)
self.assertEqual(con.context, context)
con.connection.request = Mock(side_effect=ssl.SSLError())
try:
con.request('/')
except ssl.SSLError:
pass
self.assertEqual(con.context, {})
con.connection = Mock()
con.set_context(context)
self.assertEqual(con.context, context)
con.responseCls = Mock(side_effect=ValueError())
try:
con.request('/')
except ValueError:
pass
self.assertEqual(con.context, {})
def _raise_socket_error(self):
raise socket.gaierror('')
def test_retry_with_sleep(self):
con = Connection()
con.connection = Mock()
connect_method = 'libcloud.common.base.Connection.request'
with patch(connect_method) as mock_connect:
mock_connect.__name__ = 'mock_connect'
with self.assertRaises(socket.gaierror):
mock_connect.side_effect = socket.gaierror('')
retry_request = retry(timeout=0.2, retry_delay=0.1,
backoff=1)
retry_request(con.request)(action='/')
self.assertGreater(mock_connect.call_count, 1,
'Retry logic failed')
def test_retry_with_timeout(self):
con = Connection()
con.connection = Mock()
connect_method = 'libcloud.common.base.Connection.request'
with patch(connect_method) as mock_connect:
mock_connect.__name__ = 'mock_connect'
with self.assertRaises(socket.gaierror):
mock_connect.side_effect = socket.gaierror('')
retry_request = retry(timeout=0.2, retry_delay=0.1,
backoff=1)
retry_request(con.request)(action='/')
self.assertGreater(mock_connect.call_count, 1,
'Retry logic failed')
def test_retry_with_backoff(self):
con = Connection()
con.connection = Mock()
connect_method = 'libcloud.common.base.Connection.request'
with patch(connect_method) as mock_connect:
mock_connect.__name__ = 'mock_connect'
with self.assertRaises(socket.gaierror):
mock_connect.side_effect = socket.gaierror('')
retry_request = retry(timeout=0.2, retry_delay=0.1,
backoff=1)
retry_request(con.request)(action='/')
self.assertGreater(mock_connect.call_count, 1,
'Retry logic failed')
class CertificateConnectionClassTestCase(unittest.TestCase):
def setUp(self):
self.connection = CertificateConnection(cert_file='test.pem',
url='https://test.com/test')
self.connection.connect()
def test_adapter_internals(self):
adapter = self.connection.connection.session.adapters['https://']
self.assertTrue(isinstance(adapter, SignedHTTPSAdapter))
self.assertEqual(adapter.cert_file, 'test.pem')
if __name__ == '__main__':
sys.exit(unittest.main())
|
[] |
[] |
[
"http_proxy"
] |
[]
|
["http_proxy"]
|
python
| 1 | 0 | |
mqfts.go
|
package main
/*
************************************************************************
* This file contains the source code for IBM MQ Managed File Transfer
* Log Capture parse utility
*
************************************************************************
* © Copyright IBM Corporation 2021, 2021
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
************************************************************************
*/
import (
"bufio"
"bytes"
"fmt"
"github.com/antchfx/xmlquery"
"github.com/icza/backscanner"
flag "github.com/spf13/pflag"
"io"
"log"
"os"
"strings"
"time"
)
// Map to cache transfer ids already processed
var transferIdMap map[string]string
var displayCount int
var displayTransferType int
var counter int
const transferSUCCESSFUL = 1
const transferPARTIALSUCCESS = 2
const transferFAILED = 3
const transferSTARTED = 4
const transferINPROGRESS = 5
// Main entry point
func main() {
var failedTransfers int
var successTransfers int
var partSuccessTransfers int
var startedTransfers int
var inProgressTransfers int
var logFilePath string
var transferId string
fmt.Printf("IBM MQ Managed File Transfer Status Utility\n")
flag.StringVar(&logFilePath, "lf", "", "Capture log file path")
flag.Lookup("lf").NoOptDefVal = ""
flag.StringVar(&transferId, "id", "", "Transfer ID")
flag.Lookup("id").NoOptDefVal = ""
flag.IntVar(&successTransfers, "sf", -1, "Display successful transfers")
flag.Lookup("sf").NoOptDefVal = "-1"
flag.IntVar(&partSuccessTransfers, "ps", -1, "Display partially successful transfers")
flag.Lookup("ps").NoOptDefVal = "-1"
flag.IntVar(&failedTransfers, "fl", -1, "Display failed transfers")
flag.Lookup("fl").NoOptDefVal = "-1"
flag.IntVar(&startedTransfers, "st", -1, "Display 'started' transfers")
flag.Lookup("st").NoOptDefVal = "-1"
flag.IntVar(&inProgressTransfers, "ip", -1, "Display 'In Progress' transfers")
flag.Lookup("ip").NoOptDefVal = "-1"
flag.Usage = func() {
fmt.Printf("Usage:\n")
fmt.Println(" mqfts Display status of transfers present in log file")
fmt.Println(" mqfts <--lf>=<capture log file>")
fmt.Println(" mqfts <--id>=<Transfer ID> Display details of single transfer. Specify * for all transfers")
fmt.Println(" mqfts <--fl>=<n> Display recent <n> failed transfers")
fmt.Println(" mqfts <--sf>=<n> Display recent <n> successful transfers")
fmt.Println(" mqfts <--ps>=<n> Display recent <n> partially successful transfers")
fmt.Println(" mqfts <--st>=<n> Display recent <n> transfers in 'started' state")
fmt.Println(" mqfts <--ip>=<n> Display recent <n> 'In Progress' transfers")
return
}
// Parse the provided command line
flag.Parse()
// Display usage if we have some unknown parameters
if len(flag.Args()) > 0 {
flag.Usage()
return
}
// Get capture log file path
var outputLogFilePath = getLogPath(logFilePath)
fmt.Printf("\nDisplaying transfer details from %s\n\n", outputLogFilePath)
if isFlagPassed("sf") {
displayCount = successTransfers
displayTransferType = transferSUCCESSFUL
}
if isFlagPassed("ps") {
displayCount = partSuccessTransfers
displayTransferType = transferPARTIALSUCCESS
}
if isFlagPassed("fl") {
displayCount = failedTransfers
displayTransferType = transferFAILED
}
if isFlagPassed("st") {
displayCount = startedTransfers
displayTransferType = transferSTARTED
}
if isFlagPassed("ip") {
displayCount = inProgressTransfers
displayTransferType = transferINPROGRESS
}
// Initialize map for storing transfer ids that have been processed.
transferIdMap = make(map[string]string, 1)
if transferId != "" {
parseAndDisplayTransfer (outputLogFilePath, transferId)
} else {
// Display transfer status
displayTransferStatus(outputLogFilePath)
}
}
func isFlagPassed(name string) bool {
found := false
flag.Visit(func(f *flag.Flag) {
if f.Name == name {
found = true
}
})
return found
}
// Get absolute captureX log file path
func getLogPath(logFilePath string) string {
var outputLogFilePath string
if isFlagPassed ("lf") {
outputLogFilePath = logFilePath
} else {
fmt.Println("Environment variables:")
var bfgDataPath string
agentNameEnv := os.Getenv("MFT_AGENT_NAME")
if strings.Trim(agentNameEnv, "") == "" {
fmt.Println("Failed to determine agent name from environment. Ensure 'MFT_AGENT_NAME' environment variable set to agent name")
os.Exit(1)
} else {
fmt.Printf("\tMFT_AGENT_NAME=%s\n", agentNameEnv)
}
var coordinationQMgr string
coordinationQMgr = os.Getenv("MFT_COORDINATION_QM")
if strings.Trim(coordinationQMgr, "") == "" {
fmt.Println("Failed to determine coordination queue manager name. Set 'MFT_COORDINATION_QM' environment variable with coordination queue manager")
os.Exit(1)
} else {
fmt.Printf("\tMFT_COORDINATION_QM=%s\n", coordinationQMgr)
}
// Get path from environment variable
bfgConfigMountPath := os.Getenv("BFG_DATA")
if strings.Trim(bfgConfigMountPath, "") == "" {
fmt.Println("Failed to determine agent configuration directory name from environment. Ensure BFG_DATA environment variable set to IBM MQ Managed File Transfer data directory")
os.Exit(1)
} else {
fmt.Printf("\tBFG_DATA=%s\n", bfgConfigMountPath)
}
if len(bfgConfigMountPath) > 0 {
bfgDataPath = bfgConfigMountPath
}
// Read the agentPid file from the agent logs directory
outputLogFilePath = bfgDataPath + "/mqft/logs/" + coordinationQMgr + "/agents/" + agentNameEnv + "/logs/capture0.log"
}
return outputLogFilePath
}
/**
* Parse the transfer XML and display details
* @param captureLogFileName - Capture log filename
*/
func displayTransferStatus(logFile string){
_, err := os.Stat(logFile)
if err != nil {
if strings.Contains(err.Error(), "no such file or directory") {
fmt.Println("No transfer logs available")
} else {
fmt.Println(err)
}
return
}
// Read the capture0.log file from the agent logs directory
outputLogFile, err := os.Open(logFile)
if err != nil {
fmt.Print(err)
return
}
// defer the closing of our jsonFile so that we can parse it later on
defer func(outputLogFile *os.File) {
err := outputLogFile.Close()
if err != nil {
fmt.Printf("An error occurred while closing file %s. The error is: %v\n",outputLogFile.Name(), err)
}
}(outputLogFile)
fi, err := outputLogFile.Stat()
if err != nil {
fmt.Printf("Error when finding capture0.log file. %v\n", err)
return
}
scanner := backscanner.New(outputLogFile, int(fi.Size()))
topicSystemFTELog := [] byte ("SYSTEM.FTE/Log/")
// Variable to print header only once
firstTime := true
for {
line, _, err := scanner.LineBytes()
if err != nil {
if err == io.EOF {
fmt.Printf("\nEnd of file reached.\n")
} else {
fmt.Println("Error:", err)
}
break
}
// Consider only those lines in the capture0.log file that contain SYSTEM.FTE/Log/ string for parsing
if bytes.Contains(line, topicSystemFTELog) {
// Split the line on '!' and then parse to get the latest status
tokens := strings.Split(string(line), "!")
if len(tokens) > 1 {
if firstTime {
fmt.Println(" Transfer ID \tStatus")
fmt.Println("-------------------------------------------------\t------------------")
firstTime = false
}
parseAndDisplay(tokens[len(tokens) - 1], displayTransferType)
if displayCount > 0 {
if counter == displayCount {
// Displayed required number of records. Exit
break
}
}
} // Number of tokens more
} // If line contains SYSTEM.FTE/Log
} // For loop
}
// Retrieves the transfer id from XML
func getTransferId(xmlMessage string) string {
var transferId string
// Create an parsed XML document
doc, err := xmlquery.Parse(strings.NewReader(xmlMessage))
if err != nil {
panic(err)
}
// Get required elements
transaction := xmlquery.FindOne(doc, "//transaction")
if transaction != nil {
transferId = transaction.SelectAttr("ID")
}
return transferId
}
/**
* Parse the transfer XML and display details of the given transfer
* @param captureLogFileName - Capture log filename
* @param transferId - ID of the transfer whose details to be displayed
*/
func parseAndDisplayTransfer(captureLogFileName string, transferId string) {
file, err := os.Open(captureLogFileName)
if err != nil {
fmt.Println(err)
return
}
defer func(file *os.File) {
err := file.Close()
if err != nil {
fmt.Println(err)
return
}
}(file)
topicSystemFTELog := "SYSTEM.FTE/Log/"
scanner := bufio.NewScanner(file)
// optionally, resize scanner's capacity for lines over 64K, see next example
for scanner.Scan() {
// Consider only those lines in the capture0.log file that contain SYSTEM.FTE/Log/ string for parsing
if strings.Contains(scanner.Text(), topicSystemFTELog) {
// Split the line on '!' and then parse to get the latest status
tokens := strings.Split(scanner.Text(), "!")
if len(tokens) > 1 {
transferIdXml := getTransferId(tokens[len(tokens)-1])
if strings.EqualFold(transferId, "*") {
// Display details of all transfers
displayTransferDetails(tokens[len(tokens)-1])
} else {
if transferId != "" {
if strings.EqualFold(transferIdXml, transferId) {
// Display details of specific transfer
displayTransferDetails(tokens[len(tokens)-1])
}
}
}
}
}
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
}
/**
* Parse the transfer XML and display details of the given transfer
* @param xmlMessage - transfer xml
*/
func displayTransferDetails(xmlMessage string){
// Replace all " with single quote
strings.ReplaceAll(xmlMessage, """, "'")
// Create an parsed XML document
doc, err := xmlquery.Parse(strings.NewReader(xmlMessage))
if err != nil {
panic(err)
}
// Get required 'transaction' element from Xml message
transaction := xmlquery.FindOne(doc, "//transaction")
if transaction != nil {
transferId := transaction.SelectAttr("ID")
if action := transaction.SelectElement("action"); action != nil {
if strings.EqualFold(action.InnerText(),"completed") {
// Process transfer complete Xml message
var supplementMsg string
status := transaction.SelectElement("status")
if status != nil {
supplementMsg = status.SelectElement("supplement").InnerText()
fmt.Printf("\n[%s] TransferID: %s Status: %s\n \tSupplement: %s\n",
action.SelectAttr("time"),
strings.ToUpper(transferId),
action.InnerText(),
supplementMsg)
}
destAgent := transaction.SelectElement("destinationAgent")
statistics := transaction.SelectElement("statistics")
// Retrieve statistics
var actualStartTimeText = ""
var retryCount string
var numFileFailures string
var numFileWarnings string
if statistics != nil {
actualStartTime := statistics.SelectElement("actualStartTime")
if actualStartTime != nil {
actualStartTimeText = actualStartTime.InnerText()
}
if statistics.SelectElement("retryCount") != nil {
retryCount = statistics.SelectElement("retryCount").InnerText()
}
if statistics.SelectElement("numFileFailures") != nil {
numFileFailures = statistics.SelectElement("numFileFailures").InnerText()
}
if statistics.SelectElement("numFileWarnings") != nil {
numFileWarnings = statistics.SelectElement("numFileWarnings").InnerText()
}
}
var elapsedTime time.Duration
if actualStartTimeText != "" {
startTime := getFormattedTime(actualStartTimeText)
completePublishTIme := getFormattedTime(action.SelectAttr("time"))
elapsedTime = completePublishTIme.Sub(startTime)
}
fmt.Printf("\tDestination Agent: %s\n\tStart time: %s\n\tCompletion Time: %s\n\tElapsed time: %s\n\tRetry Count: %s\n\tFailures:%s\n\tWarnings:%s\n\n",
destAgent.SelectAttr("agent"),
actualStartTimeText,
action.SelectAttr("time"),
elapsedTime,
retryCount,
numFileFailures,
numFileWarnings)
} else if strings.EqualFold(action.InnerText(),"progress") {
// Process transfer progress Xml message
destAgent := transaction.SelectElement("destinationAgent")
progressPublishTimeText := action.SelectAttr("time")
fmt.Printf("\n[%s] %s Status: %s Destination: %s \n", progressPublishTimeText,
strings.ToUpper(transferId),
action.InnerText(),
destAgent.SelectAttr("agent"))
transferSet := transaction.SelectElement("transferSet")
startTimeText := transferSet.SelectAttr("startTime")
//startTime := getFormattedTime(startTimeText)
//progressPublishTime := getFormattedTime(progressPublishTimeText)
//elapsedTime := progressPublishTime.Sub(startTime)
fmt.Printf("\tStart time: %s\n\tTotal items in transfer request: %s\n\tBytes sent: %s\n",
startTimeText,
transferSet.SelectAttr("total"),
transferSet.SelectAttr("bytesSent"))
// Loop through all items in the progress message and display details.
items := transferSet.SelectElements("item")
for i := 0 ; i < len(items); i++ {
status := items[i].SelectElement("status")
resultCode := status.SelectAttr("resultCode")
var sourceName string
var sourceSize = "-1"
queueSource := items[i].SelectElement("source/queue")
if queueSource != nil {
sourceName = queueSource.InnerText()
} else {
fileName := items[i].SelectElement("source/file")
if fileName != nil {
sourceName = fileName.InnerText()
sourceSize = fileName.SelectAttr("size")
}
}
var destinationName string
queueDest := items[i].SelectElement("destination/queue")
var destinationSize = "-1"
if queueDest != nil {
destinationName = queueDest.InnerText()
} else {
fileName := items[i].SelectElement("destination/file")
if fileName != nil {
destinationName = fileName.InnerText()
destinationSize = fileName.SelectAttr("size")
}
}
// Display details of each item
fmt.Printf("\tItem # %d\n\t\tSource: %s\tSize: %s bytes\n\t\tDestination: %s\tSize: %s bytes\n",
i+1,
sourceName, sourceSize,
destinationName, destinationSize)
// Process result code and append any supplement
if resultCode != "0" {
supplement := status.SelectElement("supplement")
if supplement != nil {
fmt.Printf("\t\tResult code %s Supplement %s\n", resultCode, supplement.InnerText())
} else {
fmt.Printf("\t\tResult code %s\n", resultCode)
}
} else {
fmt.Printf("\t\tResult code %s\n", resultCode)
}
}
} else if strings.EqualFold(action.InnerText(),"started") {
// Process transfer started Xml message
destAgent := transaction.SelectElement("destinationAgent")
destinationAgentName := destAgent.SelectAttr("agent")
transferSet := transaction.SelectElement("transferSet")
startTime := ""
if transferSet != nil {
startTime = transferSet.SelectAttr("startTime")
} else {
startTime = action.SelectAttr("time")
}
fmt.Printf("[%s] TransferID: %s Status: %s Destination: %s\n",
startTime,
strings.ToUpper(transferId),
action.InnerText(),
destinationAgentName)
}
}
}
}
func getFormattedTime(timeValue string) time.Time {
t, err := time.Parse(time.RFC3339, timeValue)
if err != nil {
fmt.Printf("%s\n", err)
}
return t
}
/*
* Parse the Transfer XML and display status of transfer in a list format.
* @param xmlMessage - Transfer XML message.
* @param displayTransferType - Type of transfers to display like failed only
or partial transfer etc
*/
func parseAndDisplay(xmlMessage string, displayTransferType int){
// Create an parsed XML document
doc, err := xmlquery.Parse(strings.NewReader(xmlMessage))
if err != nil {
panic(err)
}
// Get required elements
transaction := xmlquery.FindOne(doc, "//transaction")
if transaction != nil {
transferId := transaction.SelectAttr("ID")
if !strings.EqualFold(transferId, "") {
// Check if we have seen this transfer id already. If yes, then don't do anything
_, exists := transferIdMap[transferId]
// Does not exist in HashMap
if !exists {
if action := transaction.SelectElement("action"); action != nil {
if strings.EqualFold(action.InnerText(),"completed") {
status := transaction.SelectElement("status")
if status != nil {
supplement := status.SelectElement("supplement").InnerText()
if strings.Contains(supplement, "BFGRP0032I") {
if displayTransferType == transferSUCCESSFUL || displayTransferType == 0 {
fmt.Printf("%s\t%s\n", transaction.SelectAttr("ID"), "Successful" )
counter++
}
} else if strings.Contains(supplement, "BFGRP0034I"){
if displayTransferType == transferFAILED || displayTransferType == 0 {
fmt.Printf("%s\t%s\n", transaction.SelectAttr("ID"), "Failed" )
counter++
}
} else if strings.Contains(supplement, "BFGRP0033I") {
if displayTransferType == transferPARTIALSUCCESS || displayTransferType == 0 {
fmt.Printf("%s\t%s\n", transaction.SelectAttr("ID"), "Partially successful" )
counter++
}
} else if strings.Contains(supplement, "BFGRP0036I") {
if displayTransferType == transferFAILED || displayTransferType == 0 {
fmt.Printf("%s\t%s\n", transaction.SelectAttr("ID"), "Completed but no files transferred" )
counter++
}
} else if strings.Contains(supplement, "BFGRP0037I") {
if displayTransferType == transferFAILED || displayTransferType == 0 {
fmt.Printf("%s\t%s\n", transaction.SelectAttr("ID"), "Failed" )
counter++
}
}
}
} else if strings.EqualFold(action.InnerText(),"progress") {
if displayTransferType == transferINPROGRESS || displayTransferType == 0 {
fmt.Printf("%s\t%s\n", transaction.SelectAttr("ID"), "In progress")
counter++
}
} else if strings.EqualFold(action.InnerText(),"started") {
if displayTransferType == transferSTARTED || displayTransferType == 0 {
fmt.Printf("%s\t%s\n", transaction.SelectAttr("ID"), action.InnerText())
counter++
}
}
// Add the transfer id to the map
transferIdMap[transaction.SelectAttr("ID")] = action.InnerText()
}
}
}
}
}
|
[
"\"MFT_AGENT_NAME\"",
"\"MFT_COORDINATION_QM\"",
"\"BFG_DATA\""
] |
[] |
[
"MFT_COORDINATION_QM",
"BFG_DATA",
"MFT_AGENT_NAME"
] |
[]
|
["MFT_COORDINATION_QM", "BFG_DATA", "MFT_AGENT_NAME"]
|
go
| 3 | 0 | |
internal/micro_deposits.go
|
// Copyright 2020 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package internal
import (
"crypto/rand"
"database/sql"
"encoding/json"
"errors"
"fmt"
"io"
"math/big"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
accounts "github.com/moov-io/accounts/client"
"github.com/moov-io/ach"
"github.com/moov-io/base"
moovhttp "github.com/moov-io/base/http"
"github.com/moov-io/paygate/internal/route"
"github.com/moov-io/paygate/internal/secrets"
"github.com/moov-io/paygate/internal/util"
"github.com/moov-io/paygate/pkg/id"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/metrics/prometheus"
stdprometheus "github.com/prometheus/client_golang/prometheus"
)
var (
microDepositsInitiated = prometheus.NewCounterFrom(stdprometheus.CounterOpts{
Name: "micro_deposits_initiated",
Help: "Counter of micro-deposits initiated against depositories",
}, []string{"destination"})
microDepositsConfirmed = prometheus.NewCounterFrom(stdprometheus.CounterOpts{
Name: "micro_deposits_confirmed",
Help: "Counter of micro-deposits confirmed for a depository",
}, []string{"destination"})
)
// ODFIAccount represents the depository account micro-deposts are debited from
type ODFIAccount struct {
accountNumber string
routingNumber string
accountType AccountType
client AccountsClient
keeper *secrets.StringKeeper
mu sync.Mutex
accountID string
}
func NewODFIAccount(accountsClient AccountsClient, accountNumber string, routingNumber string, accountType AccountType, keeper *secrets.StringKeeper) *ODFIAccount {
return &ODFIAccount{
client: accountsClient,
accountNumber: accountNumber,
routingNumber: routingNumber,
accountType: accountType,
keeper: keeper,
}
}
func (a *ODFIAccount) getID(requestID string, userID id.User) (string, error) {
a.mu.Lock()
defer a.mu.Unlock()
// Note: In environments where the ODFI accountID changes paygate won't notice the change
// and so all micro-deposit calls will fail (or post to the wrong account).
if a.accountID != "" {
return a.accountID, nil
}
if a.client == nil {
return "", errors.New("ODFIAccount: nil AccountsClient")
}
// Otherwise, make our Accounts HTTP call and grab the ID
dep := &Depository{
RoutingNumber: a.routingNumber,
Type: a.accountType,
}
dep.keeper = a.keeper
dep.ReplaceAccountNumber(a.accountNumber)
acct, err := a.client.SearchAccounts(requestID, userID, dep)
if err != nil || (acct == nil || acct.ID == "") {
return "", fmt.Errorf("ODFIAccount: problem getting accountID: %v", err)
}
a.accountID = acct.ID // record account ID for calls later on
return a.accountID, nil
}
func (a *ODFIAccount) metadata() (*Originator, *Depository) {
orig := &Originator{
ID: "odfi", // TODO(adam): make this NOT querable via db.
DefaultDepository: id.Depository("odfi"),
Identification: util.Or(os.Getenv("ODFI_IDENTIFICATION"), "001"),
Metadata: "Moov - paygate micro-deposits",
}
num, err := a.keeper.EncryptString(a.accountNumber)
if err != nil {
return nil, nil
}
dep := &Depository{
ID: id.Depository("odfi"),
BankName: util.Or(os.Getenv("ODFI_BANK_NAME"), "Moov, Inc"),
Holder: util.Or(os.Getenv("ODFI_HOLDER"), "Moov, Inc"),
HolderType: Individual,
Type: a.accountType,
RoutingNumber: a.routingNumber,
EncryptedAccountNumber: num,
Status: DepositoryVerified,
keeper: a.keeper,
}
return orig, dep
}
type MicroDeposit struct {
Amount Amount
FileID string
TransactionID string
}
func (m MicroDeposit) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Amount Amount `json:"amount"`
}{
m.Amount,
})
}
func microDepositAmounts() []Amount {
rand := func() int {
n, _ := rand.Int(rand.Reader, big.NewInt(49)) // rand.Int returns [0, N) and we want a range of $0.01 to $0.50
return int(n.Int64()) + 1
}
// generate two amounts and a third that's the sum
n1, n2 := rand(), rand()
a1, _ := NewAmount("USD", fmt.Sprintf("0.%02d", n1)) // pad 1 to '01'
a2, _ := NewAmount("USD", fmt.Sprintf("0.%02d", n2))
return []Amount{*a1, *a2}
}
// initiateMicroDeposits will write micro deposits into the underlying database and kick off the ACH transfer(s).
//
func (r *DepositoryRouter) initiateMicroDeposits() http.HandlerFunc {
return func(w http.ResponseWriter, httpReq *http.Request) {
responder := route.NewResponder(r.logger, w, httpReq)
if responder == nil {
return
}
depID := GetDepositoryID(httpReq)
if depID == "" {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
// 404 - A depository with the specified ID was not found.
w.WriteHeader(http.StatusNotFound)
w.Write([]byte(`{"error": ""}`))
return
}
// Check the depository status and confirm it belongs to the user
dep, err := r.depositoryRepo.GetUserDepository(depID, responder.XUserID)
if err != nil {
responder.Log("microDeposits", err)
moovhttp.Problem(w, err)
return
}
if dep == nil {
w.WriteHeader(http.StatusNotFound)
return
}
dep.keeper = r.keeper
if dep.Status != DepositoryUnverified {
err = fmt.Errorf("depository %s in bogus status %s", dep.ID, dep.Status)
responder.Log("microDeposits", err)
moovhttp.Problem(w, err)
return
}
if r.microDepositAttemper != nil {
if !r.microDepositAttemper.Available(dep.ID) {
moovhttp.Problem(w, errors.New("no micro-deposit attempts available"))
return
}
}
// Our Depository needs to be Verified so let's submit some micro deposits to it.
amounts := microDepositAmounts()
microDeposits, err := r.submitMicroDeposits(responder.XUserID, responder.XRequestID, amounts, dep)
if err != nil {
err = fmt.Errorf("problem submitting micro-deposits: %v", err)
responder.Log("microDeposits", err)
moovhttp.Problem(w, err)
return
}
responder.Log("microDeposits", fmt.Sprintf("submitted %d micro-deposits for depository=%s", len(microDeposits), dep.ID))
// Write micro deposits into our db
if err := r.depositoryRepo.InitiateMicroDeposits(depID, responder.XUserID, microDeposits); err != nil {
responder.Log("microDeposits", err)
moovhttp.Problem(w, err)
return
}
responder.Log("microDeposits", fmt.Sprintf("stored micro-deposits for depository=%s", dep.ID))
microDepositsInitiated.With("destination", dep.RoutingNumber).Add(1)
w.WriteHeader(http.StatusCreated) // 201 - Micro deposits initiated
w.Write([]byte("{}"))
}
}
func postMicroDepositTransaction(logger log.Logger, client AccountsClient, accountID string, userID id.User, lines []transactionLine, requestID string) (*accounts.Transaction, error) {
if client == nil {
return nil, errors.New("nil Accounts client")
}
var transaction *accounts.Transaction
var err error
for i := 0; i < 3; i++ {
transaction, err = client.PostTransaction(requestID, userID, lines)
if err == nil {
break // quit after successful call
}
}
if err != nil {
return nil, fmt.Errorf("error creating transaction for transfer user=%s: %v", userID, err)
}
logger.Log("transfers", fmt.Sprintf("created transaction=%s for user=%s", transaction.ID, userID), "requestID", requestID)
return transaction, nil
}
func updateMicroDepositsWithTransactionIDs(logger log.Logger, ODFIAccount *ODFIAccount, client AccountsClient, userID id.User, dep *Depository, microDeposits []*MicroDeposit, sum int, requestID string) ([]*accounts.Transaction, error) {
if client == nil {
return nil, errors.New("nil Accounts client")
}
if len(microDeposits) != 2 {
return nil, fmt.Errorf("updateMicroDepositsWithTransactionIDs: got %d micro-deposits", len(microDeposits))
}
acct, err := client.SearchAccounts(requestID, userID, dep)
if err != nil || acct == nil {
return nil, fmt.Errorf("error reading account user=%s depository=%s: %v", userID, dep.ID, err)
}
ODFIAccountID, err := ODFIAccount.getID(requestID, userID)
if err != nil {
return nil, fmt.Errorf("posting micro-deposits: %v", err)
}
// Submit all micro-deposits
var transactions []*accounts.Transaction
for i := range microDeposits {
lines := []transactionLine{
{AccountID: acct.ID, Purpose: "ACHCredit", Amount: int32(microDeposits[i].Amount.Int())},
{AccountID: ODFIAccountID, Purpose: "ACHDebit", Amount: int32(microDeposits[i].Amount.Int())},
}
tx, err := postMicroDepositTransaction(logger, client, acct.ID, userID, lines, requestID)
if err != nil {
return nil, err // we retried and failed, so just exit early
}
microDeposits[i].TransactionID = tx.ID
transactions = append(transactions, tx)
}
// submit the reversal of our micro-deposits
lines := []transactionLine{
{AccountID: acct.ID, Purpose: "ACHDebit", Amount: int32(sum)},
{AccountID: ODFIAccountID, Purpose: "ACHCredit", Amount: int32(sum)},
}
tx, err := postMicroDepositTransaction(logger, client, acct.ID, userID, lines, requestID)
if err != nil {
return nil, fmt.Errorf("postMicroDepositTransaction: on sum transaction post: %v", err)
}
transactions = append(transactions, tx)
return transactions, nil
}
func stringifyAmounts(amounts []Amount) string {
buf := ""
for i := range amounts {
buf += fmt.Sprintf("%s,", amounts[i].String())
}
return strings.TrimSuffix(buf, ",")
}
// submitMicroDeposits will create ACH files to process multiple micro-deposit transfers to validate a Depository.
// The Originator used belongs to the ODFI (or Moov in tests).
//
// The steps needed are:
// - Grab related transfer objects for the user
// - Create several Transfers and create their ACH files (then validate)
// - Write micro-deposits to SQL table (used in /confirm endpoint)
//
// submitMicroDeposits assumes there are 2 amounts to credit and a third to debit.
func (r *DepositoryRouter) submitMicroDeposits(userID id.User, requestID string, amounts []Amount, dep *Depository) ([]*MicroDeposit, error) {
odfiOriginator, odfiDepository := r.odfiAccount.metadata()
if odfiOriginator == nil || odfiDepository == nil {
return nil, errors.New("unable to find ODFI originator or depository")
}
if r.microDepositAttemper != nil {
if !r.microDepositAttemper.Available(dep.ID) {
return nil, errors.New("no micro-deposit attempts available")
}
if err := r.microDepositAttemper.Record(dep.ID, stringifyAmounts(amounts)); err != nil {
return nil, errors.New("unable to record micro-deposits")
}
}
var microDeposits []*MicroDeposit
withdrawAmount, err := NewAmount("USD", "0.00") // TODO(adam): we need to add a test for the higher level endpoint (or see why no test currently fails)
if err != nil {
return nil, fmt.Errorf("error with withdrawAmount: %v", err)
}
idempotencyKey := base.ID()
rec := &Receiver{
ID: ReceiverID(fmt.Sprintf("%s-micro-deposit-verify", base.ID())),
Status: ReceiverVerified, // Something to pass constructACHFile validation logic
Metadata: dep.Holder, // Depository holder is getting the micro deposit
}
var file *ach.File
for i := range amounts {
req := &transferRequest{
Amount: amounts[i],
Originator: odfiOriginator.ID, // e.g. Moov, Inc
OriginatorDepository: odfiDepository.ID,
Description: fmt.Sprintf("%s micro-deposit verification", odfiDepository.BankName),
StandardEntryClassCode: ach.PPD,
}
// micro-deposits must balance, the 3rd amount is the other two's sum
if i == 0 || i == 1 {
req.Type = PushTransfer
}
req.Receiver, req.ReceiverDepository = rec.ID, dep.ID
if file == nil {
xfer := req.asTransfer(string(rec.ID))
f, err := constructACHFile(string(rec.ID), idempotencyKey, userID, xfer, rec, dep, odfiOriginator, odfiDepository)
if err != nil {
err = fmt.Errorf("problem constructing ACH file for userID=%s: %v", userID, err)
r.logger.Log("microDeposits", err, "requestID", requestID, "userID", userID)
return nil, err
}
file = f
} else {
if err := addMicroDeposit(file, amounts[i]); err != nil {
return nil, err
}
}
// We need to withdraw the micro-deposit from the remote account. To do this simply debit that account by adding another EntryDetail
if w, err := withdrawAmount.Plus(amounts[i]); err != nil {
return nil, fmt.Errorf("error adding %v to withdraw amount: %v", amounts[i].String(), err)
} else {
withdrawAmount = &w // Plus returns a new instance, so accumulate it
}
// If we're on the last micro-deposit then append our withdraw transaction
if i == len(amounts)-1 {
req.Type = PullTransfer // pull: withdraw funds
// Append our withdraw to a file so it's uploaded to the ODFI
if err := addMicroDepositWithdraw(file, withdrawAmount); err != nil {
return nil, fmt.Errorf("problem adding withdraw amount: %v", err)
}
}
microDeposits = append(microDeposits, &MicroDeposit{Amount: amounts[i]})
// Store the Transfer creation as an event
if err := writeTransferEvent(userID, req, r.eventRepo); err != nil {
return nil, fmt.Errorf("userID=%s problem writing micro-deposit transfer event: %v", userID, err)
}
}
// Submit the ACH file against moov's ACH service after adding every micro-deposit
fileID, err := r.achClient.CreateFile(idempotencyKey, file)
if err != nil {
err = fmt.Errorf("problem creating ACH file for userID=%s: %v", userID, err)
r.logger.Log("microDeposits", err, "requestID", requestID, "userID", userID)
return nil, err
}
if err := checkACHFile(r.logger, r.achClient, fileID, userID); err != nil {
return nil, err
}
r.logger.Log("microDeposits", fmt.Sprintf("created ACH file=%s for depository=%s", fileID, dep.ID), "requestID", requestID, "userID", userID)
for i := range microDeposits {
microDeposits[i].FileID = fileID
}
// Post the transaction against Accounts only if it's enabled (flagged via nil AccountsClient)
if r.accountsClient != nil {
transactions, err := updateMicroDepositsWithTransactionIDs(r.logger, r.odfiAccount, r.accountsClient, userID, dep, microDeposits, withdrawAmount.Int(), requestID)
if err != nil {
return microDeposits, fmt.Errorf("submitMicroDeposits: error posting to Accounts: %v", err)
}
r.logger.Log("microDeposits", fmt.Sprintf("created %d transactions for user=%s micro-deposits", len(transactions), userID), "requestID", requestID)
}
return microDeposits, nil
}
func addMicroDeposit(file *ach.File, amt Amount) error {
if file == nil || len(file.Batches) != 1 || len(file.Batches[0].GetEntries()) != 1 {
return errors.New("invalid micro-deposit ACH file for deposits")
}
// Copy the EntryDetail and replace TransactionCode
ed := *file.Batches[0].GetEntries()[0] // copy previous EntryDetail
ed.ID = base.ID()[:8]
// increment trace number
if n, _ := strconv.Atoi(ed.TraceNumber); n > 0 {
ed.TraceNumber = strconv.Itoa(n + 1)
}
// use our calculated amount to withdraw all micro-deposits
ed.Amount = amt.Int()
// append our new EntryDetail
file.Batches[0].AddEntry(&ed)
return nil
}
func addMicroDepositWithdraw(file *ach.File, withdrawAmount *Amount) error {
// we expect two EntryDetail records (one for each micro-deposit)
if file == nil || len(file.Batches) != 1 || len(file.Batches[0].GetEntries()) < 1 {
return errors.New("invalid micro-deposit ACH file for withdraw")
}
// We need to adjust ServiceClassCode as this batch has a debit and credit now
bh := file.Batches[0].GetHeader()
bh.ServiceClassCode = ach.MixedDebitsAndCredits
file.Batches[0].SetHeader(bh)
// Copy the EntryDetail and replace TransactionCode
entries := file.Batches[0].GetEntries()
ed := *entries[len(entries)-1] // take last entry detail
ed.ID = base.ID()[:8]
// TransactionCodes seem to follow a simple pattern:
// 37 SavingsDebit -> 32 SavingsCredit
// 27 CheckingDebit -> 22 CheckingCredit
ed.TransactionCode -= 5
// increment trace number
if n, _ := strconv.Atoi(ed.TraceNumber); n > 0 {
ed.TraceNumber = strconv.Itoa(n + 1)
}
// use our calculated amount to withdraw all micro-deposits
ed.Amount = withdrawAmount.Int()
// append our new EntryDetail
file.Batches[0].AddEntry(&ed)
return nil
}
type confirmDepositoryRequest struct {
Amounts []string `json:"amounts"`
}
// confirmMicroDeposits checks our database for a depository's micro deposits (used to validate the user owns the Depository)
// and if successful changes the Depository status to DepositoryVerified.
//
// TODO(adam): Should we allow a Depository to be confirmed before the micro-deposit ACH file is
// uploaded? Technically there's really no way for an end-user to see them before posting, however
// out demo and tests can lookup in Accounts right away and quickly verify the Depository.
func (r *DepositoryRouter) confirmMicroDeposits() http.HandlerFunc {
return func(w http.ResponseWriter, httpReq *http.Request) {
responder := route.NewResponder(r.logger, w, httpReq)
if responder == nil {
return
}
depID := GetDepositoryID(httpReq)
if depID == "" {
// 404 - A depository with the specified ID was not found.
w.WriteHeader(http.StatusNotFound)
w.Write([]byte(`{"error": "depository not found"}`))
return
}
// Check the depository status and confirm it belongs to the user
dep, err := r.depositoryRepo.GetUserDepository(depID, responder.XUserID)
if err != nil {
responder.Log("confirmMicroDeposits", err)
responder.Problem(err)
return
}
if dep.Status != DepositoryUnverified {
err = fmt.Errorf("depository %s in bogus status %s", dep.ID, dep.Status)
responder.Log("confirmMicroDeposits", err)
responder.Problem(err)
return
}
if r.microDepositAttemper != nil {
if !r.microDepositAttemper.Available(dep.ID) {
responder.Problem(errors.New("no micro-deposit attempts available"))
return
}
}
// Read amounts from request JSON
var req confirmDepositoryRequest
rr := io.LimitReader(httpReq.Body, maxReadBytes)
if err := json.NewDecoder(rr).Decode(&req); err != nil {
responder.Log("confirmDepositoryRequest", fmt.Sprintf("problem reading request: %v", err))
responder.Problem(err)
return
}
var amounts []Amount
for i := range req.Amounts {
amt := &Amount{}
if err := amt.FromString(req.Amounts[i]); err != nil {
continue
}
amounts = append(amounts, *amt)
}
if len(amounts) == 0 {
responder.Log("confirmMicroDeposits", "no micro-deposit amounts found")
// 400 - Invalid Amounts
responder.Problem(errors.New("invalid amounts, found none"))
return
}
if err := r.depositoryRepo.confirmMicroDeposits(depID, responder.XUserID, amounts); err != nil {
responder.Log("confirmMicroDeposits", fmt.Sprintf("problem confirming micro-deposits: %v", err))
responder.Problem(err)
return
}
// Update Depository status
if err := markDepositoryVerified(r.depositoryRepo, depID, responder.XUserID); err != nil {
responder.Log("confirmMicroDeposits", fmt.Sprintf("problem marking depository as Verified: %v", err))
return
}
microDepositsConfirmed.With("destination", dep.RoutingNumber).Add(1)
// 200 - Micro deposits verified
w.WriteHeader(http.StatusOK)
w.Write([]byte("{}"))
}
}
// GetMicroDeposits will retrieve the micro deposits for a given depository. This endpoint is designed for paygate's admin endpoints.
// If an amount does not parse it will be discardded silently.
func (r *SQLDepositoryRepo) GetMicroDeposits(id id.Depository) ([]*MicroDeposit, error) {
query := `select amount, file_id, transaction_id from micro_deposits where depository_id = ?`
stmt, err := r.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(id)
if err != nil {
return nil, err
}
defer rows.Close()
return accumulateMicroDeposits(rows)
}
// getMicroDepositsForUser will retrieve the micro deposits for a given depository. If an amount does not parse it will be discardded silently.
func (r *SQLDepositoryRepo) getMicroDepositsForUser(id id.Depository, userID id.User) ([]*MicroDeposit, error) {
query := `select amount, file_id, transaction_id from micro_deposits where user_id = ? and depository_id = ? and deleted_at is null`
stmt, err := r.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(userID, id)
if err != nil {
return nil, err
}
defer rows.Close()
return accumulateMicroDeposits(rows)
}
func accumulateMicroDeposits(rows *sql.Rows) ([]*MicroDeposit, error) {
var microDeposits []*MicroDeposit
for rows.Next() {
fileID, transactionID := "", ""
var value string
if err := rows.Scan(&value, &fileID, &transactionID); err != nil {
continue
}
amt := &Amount{}
if err := amt.FromString(value); err != nil {
continue
}
microDeposits = append(microDeposits, &MicroDeposit{
Amount: *amt,
FileID: fileID,
TransactionID: transactionID,
})
}
return microDeposits, rows.Err()
}
// InitiateMicroDeposits will save the provided []Amount into our database. If amounts have already been saved then
// no new amounts will be added.
func (r *SQLDepositoryRepo) InitiateMicroDeposits(id id.Depository, userID id.User, microDeposits []*MicroDeposit) error {
existing, err := r.getMicroDepositsForUser(id, userID)
if err != nil || len(existing) > 0 {
return fmt.Errorf("not initializing more micro deposits, already have %d or got error=%v", len(existing), err)
}
// write amounts
tx, err := r.db.Begin()
if err != nil {
return err
}
now, query := time.Now(), `insert into micro_deposits (depository_id, user_id, amount, file_id, transaction_id, created_at) values (?, ?, ?, ?, ?, ?)`
stmt, err := tx.Prepare(query)
if err != nil {
return fmt.Errorf("InitiateMicroDeposits: prepare error=%v rollback=%v", err, tx.Rollback())
}
defer stmt.Close()
for i := range microDeposits {
_, err = stmt.Exec(id, userID, microDeposits[i].Amount.String(), microDeposits[i].FileID, microDeposits[i].TransactionID, now)
if err != nil {
return fmt.Errorf("InitiateMicroDeposits: scan error=%v rollback=%v", err, tx.Rollback())
}
}
return tx.Commit()
}
// confirmMicroDeposits will compare the provided guessAmounts against what's been persisted for a user. If the amounts do not match
// or there are a mismatched amount the call will return a non-nil error.
func (r *SQLDepositoryRepo) confirmMicroDeposits(id id.Depository, userID id.User, guessAmounts []Amount) error {
microDeposits, err := r.getMicroDepositsForUser(id, userID)
if err != nil {
return fmt.Errorf("unable to confirm micro deposits, got error=%v", err)
}
if len(microDeposits) == 0 {
return errors.New("unable to confirm micro deposits, got 0 micro deposits")
}
// Check amounts, all must match
if len(guessAmounts) != len(microDeposits) || len(guessAmounts) == 0 {
return fmt.Errorf("incorrect amount of guesses, got %d", len(guessAmounts)) // don't share len(microDeposits), that's an info leak
}
found := 0
for i := range microDeposits {
for k := range guessAmounts {
if microDeposits[i].Amount.Equal(guessAmounts[k]) {
found += 1
break
}
}
}
if found != len(microDeposits) {
return errors.New("incorrect micro deposit guesses")
}
return nil
}
// GetMicroDepositCursor returns a microDepositCursor for iterating through micro-deposits in ascending order (by CreatedAt)
// beginning at the start of the current day.
func (r *SQLDepositoryRepo) GetMicroDepositCursor(batchSize int) *MicroDepositCursor {
now := time.Now()
return &MicroDepositCursor{
BatchSize: batchSize,
DepRepo: r,
newerThan: time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC),
}
}
// MicroDepositCursor allows for iterating through micro-deposits in ascending order (by CreatedAt)
// to merge into files uploaded to an ODFI.
type MicroDepositCursor struct {
BatchSize int
DepRepo *SQLDepositoryRepo
// newerThan represents the minimum (oldest) created_at value to return in the batch.
// The value starts at today's first instant and progresses towards time.Now() with each
// batch by being set to the batch's newest time.
newerThan time.Time
}
type UploadableMicroDeposit struct {
DepositoryID string
UserID string
Amount *Amount
FileID string
CreatedAt time.Time
}
// Next returns a slice of micro-deposit objects from the current day. Next should be called to process
// all objects for a given day in batches.
func (cur *MicroDepositCursor) Next() ([]UploadableMicroDeposit, error) {
query := `select depository_id, user_id, amount, file_id, created_at from micro_deposits where deleted_at is null and merged_filename is null and created_at > ? order by created_at asc limit ?`
stmt, err := cur.DepRepo.db.Prepare(query)
if err != nil {
return nil, fmt.Errorf("microDepositCursor.Next: prepare: %v", err)
}
defer stmt.Close()
rows, err := stmt.Query(cur.newerThan, cur.BatchSize)
if err != nil {
return nil, fmt.Errorf("microDepositCursor.Next: query: %v", err)
}
defer rows.Close()
max := cur.newerThan
var microDeposits []UploadableMicroDeposit
for rows.Next() {
var m UploadableMicroDeposit
var amt string
if err := rows.Scan(&m.DepositoryID, &m.UserID, &amt, &m.FileID, &m.CreatedAt); err != nil {
return nil, fmt.Errorf("transferCursor.Next: scan: %v", err)
}
var amount Amount
if err := amount.FromString(amt); err != nil {
return nil, fmt.Errorf("transferCursor.Next: %s Amount from string: %v", amt, err)
}
m.Amount = &amount
if m.CreatedAt.After(max) {
max = m.CreatedAt // advance to latest timestamp
}
microDeposits = append(microDeposits, m)
}
cur.newerThan = max
return microDeposits, rows.Err()
}
// MarkMicroDepositAsMerged will set the merged_filename on micro-deposits so they aren't merged into multiple files
// and the file uploaded to the Federal Reserve can be tracked.
func (r *SQLDepositoryRepo) MarkMicroDepositAsMerged(filename string, mc UploadableMicroDeposit) error {
query := `update micro_deposits set merged_filename = ?
where depository_id = ? and file_id = ? and amount = ? and (merged_filename is null or merged_filename = '') and deleted_at is null`
stmt, err := r.db.Prepare(query)
if err != nil {
return fmt.Errorf("MarkMicroDepositAsMerged: filename=%s: %v", filename, err)
}
defer stmt.Close()
_, err = stmt.Exec(filename, mc.DepositoryID, mc.FileID, mc.Amount.String())
return err
}
func (r *SQLDepositoryRepo) LookupMicroDepositFromReturn(id id.Depository, amount *Amount) (*MicroDeposit, error) {
query := `select file_id from micro_deposits where depository_id = ? and amount = ? and deleted_at is null order by created_at desc limit 1;`
stmt, err := r.db.Prepare(query)
if err != nil {
return nil, fmt.Errorf("LookupMicroDepositFromReturn prepare: %v", err)
}
defer stmt.Close()
var fileID string
if err := stmt.QueryRow(id, amount.String()).Scan(&fileID); err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, fmt.Errorf("LookupMicroDepositFromReturn scan: %v", err)
}
if string(fileID) != "" {
return &MicroDeposit{Amount: *amount, FileID: fileID}, nil
}
return nil, nil
}
// SetReturnCode will write the given returnCode (e.g. "R14") onto the row for one of a Depository's micro-deposit
func (r *SQLDepositoryRepo) SetReturnCode(id id.Depository, amount Amount, returnCode string) error {
query := `update micro_deposits set return_code = ? where depository_id = ? and amount = ? and return_code = '' and deleted_at is null;`
stmt, err := r.db.Prepare(query)
if err != nil {
return err
}
defer stmt.Close()
_, err = stmt.Exec(returnCode, id, amount.String())
return err
}
func (r *SQLDepositoryRepo) getMicroDepositReturnCodes(id id.Depository) []*ach.ReturnCode {
query := `select distinct md.return_code from micro_deposits as md
inner join depositories as deps on md.depository_id = deps.depository_id
where md.depository_id = ? and deps.status = ? and md.return_code <> '' and md.deleted_at is null and deps.deleted_at is null`
stmt, err := r.db.Prepare(query)
if err != nil {
return nil
}
defer stmt.Close()
rows, err := stmt.Query(id, DepositoryRejected)
if err != nil {
return nil
}
defer rows.Close()
returnCodes := make(map[string]*ach.ReturnCode)
for rows.Next() {
var code string
if err := rows.Scan(&code); err != nil {
return nil
}
if _, exists := returnCodes[code]; !exists {
returnCodes[code] = ach.LookupReturnCode(code)
}
}
var codes []*ach.ReturnCode
for k := range returnCodes {
codes = append(codes, returnCodes[k])
}
return codes
}
func ReadMergedFilename(repo *SQLDepositoryRepo, amount *Amount, id id.Depository) (string, error) {
query := `select merged_filename from micro_deposits where amount = ? and depository_id = ? limit 1;`
stmt, err := repo.db.Prepare(query)
if err != nil {
return "", err
}
defer stmt.Close()
var mergedFilename string
if err := stmt.QueryRow(amount.String(), id).Scan(&mergedFilename); err != nil {
return "", err
}
return mergedFilename, nil
}
|
[
"\"ODFI_IDENTIFICATION\"",
"\"ODFI_BANK_NAME\"",
"\"ODFI_HOLDER\""
] |
[] |
[
"ODFI_HOLDER",
"ODFI_BANK_NAME",
"ODFI_IDENTIFICATION"
] |
[]
|
["ODFI_HOLDER", "ODFI_BANK_NAME", "ODFI_IDENTIFICATION"]
|
go
| 3 | 0 | |
sprint1/sprint-challenge/acme_test.py
|
"""
Part 5 of Unit 3 sprint challenge.
Unit testing our code.
"""
#!/usr/bin/env python
import unittest
from acme import Product
from acme_report import generate_products, generate_report, ADJECTIVES, NOUNS
class AcmeProdTest(unittest.TestCase):
"""
Making sure Acme products are the tops!
"""
def test_default_price_self(self):
"""
Test default price == 10
"""
prod = Product('Test Product 1')
self.assertEqual(prod.price, 10)
def test_default_weight_self(self):
"""
Test default weight == 20
"""
prod = Product('Test Product 2')
self.assertEqual(prod.weight, 20)
def test_explode_method(self):
"""
Ensure explode works
"""
prod = Product('Test Product Bomb')
self.assertIsNotNone(prod.explode)
def test_steal_method(self):
"""
Ensure stealability working
"""
prod = Product('Test Product Stolen')
self.assertIsNotNone(prod.stealability)
class AcmeReportTest(unittest.TestCase):
"""
Verify reporter working as intended
"""
def test_default_num_products(self):
"""
Assert number of generated products!
"""
products = generate_products()
self.assertEqual(len(products), 30)
def test_legal_names(self):
"""
Confirm names are all properly assembled.
"""
products = generate_products()
for x in products:
self.assertIn(x.name.split()[0], ADJECTIVES)
self.assertIn(x.name.split()[1], NOUNS)
def test_report_names(self):
"""
Confirm names are randomly generated.
"""
report = generate_report(generate_products())
self.assertGreaterEqual(len(set(report[0])), 2)
def test_report_price_range(self):
"""
Confirm price range acceptable limits.
"""
report = generate_report(generate_products())
self.assertGreaterEqual(min(report[1]), 5.00)
self.assertLessEqual(max(report[1]), 1938.67)
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
reversedict/elastic.py
|
import elasticsearch
import elasticsearch.helpers as helpers
import elasticsearch.exceptions as exceptions
import os
HOST = os.environ.get('ELASTICSEARCH')
SEARCH_INDEX = 'reverse_dict'
class LazyClient(object):
def __init__(self):
self._client = False
def __getattr__(self,name):
if name == '_client':
return self._client
if not self._client:
print 'connecting to', HOST
if HOST:
self._client = elasticsearch.Elasticsearch([HOST], connection_class=elasticsearch.RequestsHttpConnection)
else:
self._client = elasticsearch.Elasticsearch(['localhost:9200'])
return getattr(self._client, name)
client = LazyClient()
def delete_index(index=None):
return client.indices.delete(index=index or SEARCH_INDEX, ignore=404)
def create_index(index=None):
return client.indices.create(index=index or SEARCH_INDEX, ignore=400)
def refresh_index(index=None):
return client.indices.refresh(index=index or SEARCH_INDEX)
|
[] |
[] |
[
"ELASTICSEARCH"
] |
[]
|
["ELASTICSEARCH"]
|
python
| 1 | 0 | |
naming/grpc/resolver.go
|
package resolver
import (
"fmt"
"net/url"
"os"
"strconv"
"strings"
"github.com/bilibili/discovery/naming"
log "github.com/bilibili/kratos/pkg/log"
"google.golang.org/grpc/resolver"
)
const (
// Scheme is the scheme of discovery address
Scheme = "grpc"
)
var (
_ resolver.Resolver = &Resolver{}
_ resolver.Builder = &Builder{}
)
// MD is context metadata for balancer and resolver.
type MD struct {
Weight int64
Color string
}
// Register register resolver builder if nil.
func Register(b naming.Builder) {
resolver.Register(&Builder{b})
}
// Builder is also a resolver builder.
// It's build() function always returns itself.
type Builder struct {
naming.Builder
}
// Build returns itself for Resolver, because it's both a builder and a resolver.
func (b *Builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
// discovery://default/service.name?zone=sh001&cluster=c1&cluster=c2&cluster=c3
dsn := strings.SplitN(target.Endpoint, "?", 2)
if len(dsn) == 0 {
return nil, fmt.Errorf("grpc resolver: parse target.Endpoint(%s) failed! the endpoint is empty", target.Endpoint)
}
// parse params info
zone := os.Getenv("ZONE")
clusters := map[string]struct{}{}
if len(dsn) == 2 {
if u, err := url.ParseQuery(dsn[1]); err == nil {
if zones := u[naming.MetaZone]; len(zones) > 0 {
zone = zones[0]
}
for _, c := range u[naming.MetaCluster] {
clusters[c] = struct{}{}
}
}
}
r := &Resolver{
cc: cc,
nr: b.Builder.Build(dsn[0]),
quit: make(chan struct{}, 1),
zone: zone,
clusters: clusters,
}
go r.watcher()
return r, nil
}
// Resolver watches for the updates on the specified target.
// Updates include address updates and service config updates.
type Resolver struct {
nr naming.Resolver
cc resolver.ClientConn
quit chan struct{}
zone string
clusters map[string]struct{}
}
// Close is a noop for Resolver.
func (r *Resolver) Close() {
select {
case r.quit <- struct{}{}:
r.nr.Close()
default:
}
}
// ResolveNow is a noop for Resolver.
func (r *Resolver) ResolveNow(o resolver.ResolveNowOption) {
}
func (r *Resolver) watcher() {
event := r.nr.Watch()
for {
select {
case <-r.quit:
return
case _, ok := <-event:
if !ok {
return
}
}
ins, ok := r.nr.Fetch()
if ok {
instances, ok := ins.Instances[r.zone]
if !ok {
for _, value := range ins.Instances {
instances = append(instances, value...)
}
}
if len(instances) > 0 {
r.newAddress(instances)
}
}
}
}
func (r *Resolver) newAddress(instances []*naming.Instance) {
var (
totalWeight int64
addrs = make([]resolver.Address, 0, len(instances))
)
for n, ins := range instances {
if len(r.clusters) > 0 {
if _, ok := r.clusters[ins.Metadata[naming.MetaCluster]]; !ok {
continue
}
}
rpcAddr, color, weight := extractAddrs(ins)
if rpcAddr == "" {
log.Warn("grpc resolver: invalid rpc address(%s,%s,%v) found!", ins.AppID, ins.Hostname, ins.Addrs)
continue
}
if weight <= 0 {
if totalWeight == 0 {
weight = 10
} else {
weight = totalWeight / int64(n)
}
}
totalWeight += weight
addr := resolver.Address{
Addr: rpcAddr,
Type: resolver.Backend,
ServerName: ins.AppID,
Metadata: &MD{Weight: weight, Color: color},
}
addrs = append(addrs, addr)
}
r.cc.NewAddress(addrs)
}
func extractAddrs(ins *naming.Instance) (addr, color string, weight int64) {
color = ins.Metadata[naming.MetaColor]
weight, _ = strconv.ParseInt(ins.Metadata[naming.MetaWeight], 10, 64)
for _, a := range ins.Addrs {
u, err := url.Parse(a)
if err == nil && u.Scheme == Scheme {
addr = u.Host
}
}
return
}
|
[
"\"ZONE\""
] |
[] |
[
"ZONE"
] |
[]
|
["ZONE"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"log"
"os"
"pomodoro_mod/commands_controller"
"pomodoro_mod/commands_service"
"github.com/joho/godotenv"
)
func loadDotenv() {
err := godotenv.Load()
if err != nil {
log.Fatal("Cannot load .env file")
}
}
func main() {
loadDotenv()
BotToken := os.Getenv("BOT_TOKEN")
//
commandsChan := make(chan commands_controller.Command)
controler := commands_controller.NewCommandsComnroller(commandsChan)
commandsService := commands_service.NewTelegramCommandsService(BotToken, commandsChan)
// start listen commands from user(incoming from commandsSevice)
controler.Listen()
// start listen messages from telegram
commandsService.Listen()
}
|
[
"\"BOT_TOKEN\""
] |
[] |
[
"BOT_TOKEN"
] |
[]
|
["BOT_TOKEN"]
|
go
| 1 | 0 | |
extractor_demo.py
|
#!/usr/bin/env python
import argparse
import os
import sys
import torch
import torch.nn as nn
import datasets
import models.resnet as ResNet
import models.senet as SENet
from trainer import Trainer, Validator
from extractor import Extractor
import utils
configurations = {
1: dict(
max_iteration=1000000,
lr=1.0e-1,
momentum=0.9,
weight_decay=0.0,
gamma=0.1, # "lr_policy: step"
step_size=1000000, # "lr_policy: step"
interval_validate=1000,
),
}
def get_parameters(model, bias=False):
for k, m in model._modules.items():
if k == "fc" and isinstance(m, nn.Linear):
if bias:
yield m.bias
else:
yield m.weight
N_IDENTITY = 8631 # the number of identities in VGGFace2 for which ResNet and SENet are trained
def main():
parser = argparse.ArgumentParser("PyTorch Face Recognizer")
parser.add_argument('cmd', type=str, choices=['train', 'test', 'extract'], help='train, test or extract')
parser.add_argument('--arch_type', type=str, default='resnet50_ft', help='model type',
choices=['resnet50_ft', 'senet50_ft', 'resnet50_scratch', 'senet50_scratch'])
parser.add_argument('--dataset_dir', type=str, default='/path/to/dataset_directory', help='dataset directory')
# parser.add_argument('--log_file', type=str, default='/path/to/log_file', help='log file')
# parser.add_argument('--train_img_list_file', type=str, default='/path/to/train_image_list.txt',
# help='text file containing image files used for training')
# parser.add_argument('--test_img_list_file', type=str, default='/path/to/test_image_list.txt',
# help='text file containing image files used for validation, test or feature extraction')
# parser.add_argument('--meta_file', type=str, default='/path/to/identity_meta.csv', help='meta file')
# parser.add_argument('--checkpoint_dir', type=str, default='/path/to/checkpoint_directory',
# help='checkpoints directory')
parser.add_argument('--feature_dir', type=str, default='/path/to/feature_directory',
help='directory where extracted features are saved')
# parser.add_argument('-c', '--config', type=int, default=1, choices=configurations.keys(),
# help='the number of settings and hyperparameters used in training')
# parser.add_argument('--batch_size', type=int, default=32, help='batch size')
# parser.add_argument('--resume', type=str, default='', help='checkpoint file')
parser.add_argument('--weight_file', type=str, default='/path/to/weight_file.pkl', help='weight file')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
# parser.add_argument('--horizontal_flip', action='store_true',
# help='horizontally flip images specified in test_img_list_file')
args = parser.parse_args()
print(args)
if args.cmd == "extract":
utils.create_dir(args.feature_dir)
if args.cmd == 'train':
utils.create_dir(args.checkpoint_dir)
cfg = configurations[args.config]
log_file = args.log_file
# resume = args.resume
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
cuda = torch.cuda.is_available()
if cuda:
print("torch.backends.cudnn.version: {}".format(torch.backends.cudnn.version()))
torch.manual_seed(1337)
if cuda:
torch.cuda.manual_seed(1337)
# 0. id label map
# meta_file = args.meta_file
# id_label_dict = utils.get_id_label_map(meta_file)
# 1. data loader
root = args.dataset_dir
# train_img_list_file = args.train_img_list_file
# test_img_list_file = args.test_img_list_file
kwargs = {'num_workers': args.workers, 'pin_memory': True} if cuda else {}
# if args.cmd == 'train':
# dt = datasets.VGG_Faces2(root, train_img_list_file, id_label_dict, split='train')
# train_loader = torch.utils.data.DataLoader(dt, batch_size=args.batch_size, shuffle=True, **kwargs)
dv = datasets.VGG_Faces2(root, test_img_list_file, id_label_dict, split='valid',
horizontal_flip=args.horizontal_flip)
val_loader = torch.utils.data.DataLoader(dv, batch_size=args.batch_size, shuffle=False, **kwargs)
# 2. model
include_top = True if args.cmd != 'extract' else False
if 'resnet' in args.arch_type:
model = ResNet.resnet50(num_classes=N_IDENTITY, include_top=include_top)
else:
model = SENet.senet50(num_classes=N_IDENTITY, include_top=include_top)
# print(model)
start_epoch = 0
start_iteration = 0
utils.load_state_dict(model, args.weight_file)
# if resume:
# checkpoint = torch.load(resume)
# model.load_state_dict(checkpoint['model_state_dict'])
# start_epoch = checkpoint['epoch']
# start_iteration = checkpoint['iteration']
# assert checkpoint['arch'] == args.arch_type
# print("Resume from epoch: {}, iteration: {}".format(start_epoch, start_iteration))
# else:
# utils.load_state_dict(model, args.weight_file)
# if args.cmd == 'train':
# model.fc.reset_parameters()
if cuda:
model = model.cuda()
criterion = nn.CrossEntropyLoss()
if cuda:
criterion = criterion.cuda()
extractor = Extractor(
cuda=cuda,
model=model,
val_loader=val_loader,
log_file=log_file,
feature_dir=args.feature_dir,
flatten_feature=True,
print_freq=1,
)
extractor.extract()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
scripts/jupyter_notebook_config.py
|
# source: https://stackoverflow.com/questions/47092878/auto-configure-jupyter-password-from-command-line
import os
#from IPython.lib import passwd
from notebook.auth import passwd
## Allow password to be changed at login for the notebook server.
#
# While loggin in with a token, the notebook server UI will give the opportunity
# to the user to enter a new password at the same time that will replace the
# token login mechanism.
#
# This can be set to false to prevent changing password from the UI/API.
c.NotebookApp.allow_password_change = True
# Set options for certfile, ip, password, and toggle off
# browser auto-opening
#c.NotebookApp.certfile = u'/absolute/path/to/your/certificate/mycert.pem'
#c.NotebookApp.keyfile = u'/absolute/path/to/your/certificate/mykey.key'
# Set ip to '*' to bind on all interfaces (ips) for the public server
c.NotebookApp.ip = '*'
#c.NotebookApp.ip = '0.0.0.0'
c.NotebookApp.port = int(os.getenv('PORT', 8888))
c.NotebookApp.open_browser = True
c.MultiKernelManager.default_kernel_name = 'python3'
# sets a password if PASSWORD is set in the environment
#if 'PASSWORD' in os.environ:
# c.NotebookApp.password = passwd(os.environ['PASSWORD'])
# del os.environ['PASSWORD']
# sets a password if PASSWORD is set in the environment
if not 'PASSWORD' in os.environ or os.environ['PASSWORD'] is None:
os.environ['PASSWORD']="ChangeMe!"
if 'PASSWORD' in os.environ:
print("===>> Passowrd=" + os.environ['PASSWORD'])
c.NotebookApp.password = passwd(os.environ['PASSWORD'])
print("Password file at " + os.environ['JUPYTER_CONF_DIR'] + "/jupyter_password.txt")
fp = open(os.environ['JUPYTER_CONF_DIR']+"/jupyter_password.txt", "w")
fp.write(os.environ['PASSWORD'])
fp.close()
#del os.environ['PASSWORD']
|
[] |
[] |
[
"PORT",
"JUPYTER_CONF_DIR",
"PASSWORD"
] |
[]
|
["PORT", "JUPYTER_CONF_DIR", "PASSWORD"]
|
python
| 3 | 0 | |
doc/sphinx/extensions/builders/snippet_source_linter.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 AVSystem <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
import re
from builders.dummy import DummyBuilder
from file_dirtiness_checker import FileDirtinessChecker
from snippet_source import SnippetSourceNode
class CodeChunk:
"""
A chunk of code that should be found in source file (+/- indents).
"""
def __init__(self,
doc_source_path,
doc_source_start_lineno,
code_source_path,
chunk_code):
"""
@param doc_source_path: str
- path to a documentation source @p chunk_code was
extracted from.
@param doc_source_start_lineno: int
- line number at which @p chunk_code can be found in
@p doc_source_path, used to provide extra debug info.
@param code_source_path: str
- path to the source code file the chunk should be in.
@param chunk_code: str
- code snippet, as found in docs.
"""
self.doc_source_path = doc_source_path
self.doc_source_start_lineno = doc_source_start_lineno
self.code_source_path = code_source_path
self.code = chunk_code
def to_regex(self):
"""
@returns: str
- regex matching the chunk represented by SELF +/- indents.
"""
return r'\s+'.join(re.escape(line.strip())
for line in self.code.split('\n') if line.strip())
def __str__(self):
return ('----- BEGIN CHUNK -----\n'
'doc: %s line %d\n'
'code: %s\n'
'----- CODE -----\n'
'%s\n'
'----- END CHUNK -----'
% (self.doc_source_path, self.doc_source_start_lineno,
self.code_source_path, self.code))
class CodeSnippet:
def __init__(self,
doc_source_path,
doc_source_start_lineno,
code_source_path,
code):
"""
Code snippet may consist of multiple chunks separated by
SNIPPET_SEPARATOR (i.e. "// ... something here" comment). In an actual
source file there may be some code in place of the separator - that
should not trigger a mismatch error.
@param doc_source_path: str
- path to a documentation source @p code_lines were
extracted from.
@param doc_source_start_lineno: int
- line number of the first line in @p code_lines in
@p doc_source_path, used to provide extra debug info.
@param code_source_path: str
- path to a source file @p code_lines associated with
@p code_lines (through ".. snippet-source:" comment).
Relative to PROJECT_SOURCE_ROOT.
@param code: str
- code snippet extracted from @p doc_source_path file.
"""
SNIPPET_SEPARATOR = r'^\s*//\s*\.\.\..*$'
self.code_source_path = code_source_path
self.chunks = []
chunk_lines = []
start_idx = 0
for idx, line in enumerate(code.split('\n')):
if re.match(SNIPPET_SEPARATOR, line):
self.chunks.append(CodeChunk(doc_source_path,
doc_source_start_lineno + start_idx,
code_source_path,
'\n'.join(chunk_lines)))
chunk_lines = []
start_idx = idx + 1
else:
chunk_lines.append(line)
self.chunks.append(CodeChunk(doc_source_path,
doc_source_start_lineno + start_idx,
code_source_path,
'\n'.join(chunk_lines)))
def get_invalid_chunks(self):
"""
@returns: List[CodeChunk]
- a list of chunks that were not found in associated source
code files.
"""
with open(os.path.join(os.environ['CMAKE_SOURCE_DIR'], self.code_source_path)) as f:
source = f.read()
invalid_chunks = []
for chunk in self.chunks:
if not re.search(chunk.to_regex(), source):
invalid_chunks.append(chunk)
return invalid_chunks
DocSourceErrors = collections.namedtuple('DocSourceErrors',
['invalid_chunks', # List[CodeChunk]
'dirty_referenced_paths', # Set[source_path: str]
'missing_referenced_paths']) # Dict[source_path: str, List[line: int]]
class SnippetSourceLintBuilder(DummyBuilder):
name = 'snippet_source_lint'
def __init__(self, *args, **kwargs):
super(SnippetSourceLintBuilder, self).__init__(*args, **kwargs)
# doc_filename: str -> DocSourceErrors
self.possibly_invalid_docs = dict()
def write_doc(self, docname, doctree):
dirtiness_checker = FileDirtinessChecker(os.environ['SNIPPET_SOURCE_MD5FILE'])
invalid_chunks = []
dirty_referenced_paths = set()
missing_referenced_paths = collections.defaultdict(lambda: [])
for node in doctree.traverse(SnippetSourceNode):
try:
snip = CodeSnippet(docname,
node.line,
node.source_filepath,
node.astext())
invalid_chunks += snip.get_invalid_chunks()
if dirtiness_checker.is_file_dirty(node.source_filepath):
dirty_referenced_paths.add(node.source_filepath)
except (OSError, IOError):
missing_referenced_paths[node.source_filepath].append(node.line)
if invalid_chunks or dirty_referenced_paths or missing_referenced_paths:
self.possibly_invalid_docs[docname] = DocSourceErrors(invalid_chunks, dirty_referenced_paths,
missing_referenced_paths)
def finish(self):
if self.possibly_invalid_docs:
print('')
print('Some potential errors detected in documentation:')
print('')
for doc_filepath, errors in sorted(self.possibly_invalid_docs.items()):
print('- %s:' % (doc_filepath,))
if errors.missing_referenced_paths:
print(' - %s references to missing files:' % (len(errors.missing_referenced_paths),))
for path, lines in sorted(errors.missing_referenced_paths.items()):
print(' - %s at line%s %s' % (
path, 's' if len(lines) > 1 else '', ', '.join(str(l) for l in lines)))
if errors.dirty_referenced_paths:
print(' - %d references to recently modified files:' % (len(errors.dirty_referenced_paths),))
for path in sorted(errors.dirty_referenced_paths):
print(' - %s' % (path,))
if errors.invalid_chunks:
print(' - %s chunks missing from sources' % (len(errors.invalid_chunks),))
print('')
for chunk in errors.invalid_chunks:
print(chunk)
print('')
if self.possibly_invalid_docs:
print('Resolve errors above, then use following command to update md5 hash cache:')
print('')
print(' cd "%s"; sphinx-build -Q -b snippet_source_list_references -c "%s" "%s" /tmp | sort | xargs md5sum > "%s"' % (
os.environ['CMAKE_SOURCE_DIR'], os.environ['ANJAY_SPHINX_DOC_CONF_DIR'],
os.environ['ANJAY_SPHINX_DOC_ROOT_DIR'], os.environ['SNIPPET_SOURCE_MD5FILE']))
print('')
raise Exception('Lint errors occurred')
else:
print('snippet-source lint OK')
def setup(app):
app.add_builder(SnippetSourceLintBuilder)
|
[] |
[] |
[
"ANJAY_SPHINX_DOC_CONF_DIR",
"ANJAY_SPHINX_DOC_ROOT_DIR",
"CMAKE_SOURCE_DIR",
"SNIPPET_SOURCE_MD5FILE"
] |
[]
|
["ANJAY_SPHINX_DOC_CONF_DIR", "ANJAY_SPHINX_DOC_ROOT_DIR", "CMAKE_SOURCE_DIR", "SNIPPET_SOURCE_MD5FILE"]
|
python
| 4 | 0 | |
cmd/query_test.go
|
package cmd
import (
"bytes"
"context"
"os"
"path/filepath"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/johnmanjiro13/lgotm/cmd/mock_cmd"
)
func TestQueryCommand_LGTM(t *testing.T) {
tests := map[string]struct {
query string
width uint
height uint
expectedFileName string
}{
"width: 400, height: 0": {
query: "query",
width: 400,
height: 0,
expectedFileName: "lgtm400x0.png",
},
"width: 0, height: 400": {
query: "query",
width: 0,
height: 400,
expectedFileName: "lgtm0x400.png",
},
"width: 300, height: 400": {
query: "query",
width: 300,
height: 400,
expectedFileName: "lgtm300x400.png",
},
}
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockCustomSearchRepo := mock_cmd.NewMockCustomSearchRepository(ctrl)
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
src, err := os.Open("testdata/image.jpg")
assert.NoError(t, err)
defer src.Close()
mockCustomSearchRepo.EXPECT().FindImage(gomock.Any(), tt.query).Return(src, nil)
c := &queryCommand{search: mockCustomSearchRepo}
res, err := c.lgtm(context.Background(), tt.query, tt.width, tt.height)
assert.NoError(t, err)
actual := new(bytes.Buffer)
_, err = actual.ReadFrom(res)
assert.NoError(t, err)
if os.Getenv("IS_CREATE_DST_FILE") == "true" {
createDstFile(t, actual.Bytes(), tt.expectedFileName)
}
expectedFile, err := os.Open(filepath.Join("testdata", tt.expectedFileName))
assert.NoError(t, err)
defer expectedFile.Close()
expected := new(bytes.Buffer)
_, err = expected.ReadFrom(expectedFile)
assert.NoError(t, err)
assert.Equal(t, expected.Bytes(), actual.Bytes())
})
}
}
func TestInitConfig(t *testing.T) {
tests := map[string]struct {
cfgFile string
apiKey string
engineID string
isEnv bool
}{
"with cfgFile": {
cfgFile: "testdata/config",
isEnv: false,
apiKey: "api_key",
engineID: "engine_id",
},
"with environment variables": {
cfgFile: "",
isEnv: true,
apiKey: "api_key_env",
engineID: "engine_id_env",
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
cfg := &QueryConfig{}
if tt.isEnv {
os.Setenv("API_KEY", tt.apiKey)
os.Setenv("ENGINE_ID", tt.engineID)
defer func() {
os.Unsetenv("API_KEY")
os.Unsetenv("ENGINE_ID")
}()
}
assert.NoError(t, initConfig(tt.cfgFile, cfg))
expected := &QueryConfig{
APIKey: tt.apiKey,
EngineID: tt.engineID,
}
assert.Equal(t, expected, cfg)
})
}
}
|
[
"\"IS_CREATE_DST_FILE\""
] |
[] |
[
"IS_CREATE_DST_FILE"
] |
[]
|
["IS_CREATE_DST_FILE"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"context"
"flag"
"fmt"
"log"
"net/http"
"os"
"os/exec"
"os/signal"
"strings"
"syscall"
"time"
"github.com/google/go-github/github"
"github.com/nicolai86/github-rebase-bot/processors"
"github.com/nicolai86/github-rebase-bot/repo"
"golang.org/x/oauth2"
)
var (
token string
repos repositories
mergeLabel string
)
type repositories []repository
func (rs repositories) Find(owner, name string) *repository {
for i := range rs {
if rs[i].Owner == owner && rs[i].Name == name {
return &rs[i]
}
}
return nil
}
func (hps *repositories) String() string {
return fmt.Sprint(*hps)
}
func (hps *repositories) Set(str string) error {
for _, hp := range strings.Split(str, ",") {
var h repository
if err := h.Set(hp); err != nil {
return err
}
*hps = append(*hps, h)
}
return nil
}
type repository struct {
processors.Repository
hook *github.Hook
}
func (h *repository) String() string {
return fmt.Sprintf("%s/%s#%s", h.Owner, h.Name, h.Mainline)
}
func (h *repository) Set(str string) error {
var parts = strings.Split(str, "/")
if len(parts) != 2 {
return fmt.Errorf("Invalid repository %q. Must be owner/name", str)
}
h.Owner = parts[0]
parts = strings.Split(parts[1], "#")
h.Name = parts[0]
if len(parts) == 2 {
h.Mainline = parts[1]
}
if h.Mainline == "" {
h.Mainline = "master"
}
return nil
}
func main() {
var publicDNS string
flag.StringVar(&token, "github-token", "", "auth token for GH")
if token == "" {
token = os.Getenv("GITHUB_TOKEN")
}
var addr string
flag.Var(&repos, "repos", "github repos (owner/repo separated by commas)")
flag.StringVar(&publicDNS, "public-dns", "", "publicly accessible dns endpoint for webhook push")
flag.StringVar(&mergeLabel, "merge-label", "", "which label is checked to kick off the merge process")
flag.StringVar(&addr, "addr", "", "address to listen on")
flag.Parse()
if token == "" {
log.Fatal("Missing github token.")
}
if len(repos) == 0 {
log.Fatal("Missing repositories.")
}
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: token},
)
tc := oauth2.NewClient(oauth2.NoContext, ts)
client := github.NewClient(tc)
user, _, err := client.Users.Get(context.Background(), "")
if err != nil {
log.Fatalf("resolving github user failed: %v", err)
}
username := *user.Login
log.Printf("Bot started for user %s.\n", username)
log.Printf("Using %q as merge-label.\n", mergeLabel)
if err := exec.Command("git", "config", "--global", "user.name", "rebase bot").Run(); err != nil {
log.Fatal("git config --global user.name failed: %q", err)
}
if err := exec.Command("git", "config", "--global", "user.email", "[email protected]").Run(); err != nil {
log.Fatal("git config --global user.email failed: %q", err)
}
for i, r := range repos {
url := fmt.Sprintf("https://%[email protected]/%s/%s.git", token, r.Owner, r.Name)
c, err := repo.Prepare(url, r.Mainline)
if err != nil {
log.Fatalf("prepare failed: %v", err)
}
repos[i].Cache = c
}
// On ^C, or SIGTERM handle exit.
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
signal.Notify(c, syscall.SIGTERM)
mux := http.NewServeMux()
for _, repo := range repos {
mux.HandleFunc(fmt.Sprintf("/events/%s/%s", repo.Owner, repo.Name), prHandler(repo, client))
}
srv := &http.Server{
Addr: addr,
Handler: mux,
}
log.Printf("Listening on %q\n", addr)
go func() {
srv.ListenAndServe()
}()
var h *github.Hook
if publicDNS != "" {
for i, repo := range repos {
h, err = registerHook(client, publicDNS, repo.Owner, repo.Name)
if err != nil {
log.Fatal(err)
}
repos[i].hook = h
}
}
sig := <-c
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
srv.Shutdown(ctx)
cancel()
log.Printf("Received %s, exiting.", sig.String())
if h != nil {
for _, repo := range repos {
client.Repositories.DeleteHook(context.Background(), repo.Owner, repo.Name, *repo.hook.ID)
}
}
}
func createHook(client *github.Client, publicDNS, owner, repo, hookTarget string) (*github.Hook, error) {
hook, _, err := client.Repositories.CreateHook(context.Background(), owner, repo, &github.Hook{
Name: github.String("web"),
Active: github.Bool(true),
Config: map[string]interface{}{
"url": hookTarget,
"content_type": "json",
},
Events: []string{"*"},
})
return hook, err
}
func lookupHook(client *github.Client, owner, repo, hookTarget string) (*github.Hook, error) {
hooks, _, err := client.Repositories.ListHooks(context.Background(), owner, repo, &github.ListOptions{})
if err != nil {
return nil, err
}
var h *github.Hook
for _, hook := range hooks {
if url, ok := hook.Config["url"].(string); ok {
if strings.Contains(url, hookTarget) {
h = hook
break
}
}
}
return h, nil
}
func registerHook(client *github.Client, publicDNS, owner, repo string) (*github.Hook, error) {
hookTarget := fmt.Sprintf("%s/events/%s/%s", publicDNS, owner, repo)
hook, err := lookupHook(client, owner, repo, hookTarget)
if err != nil {
return nil, err
}
if hook == nil {
hook, err = createHook(client, publicDNS, owner, repo, hookTarget)
if err != nil {
return nil, err
}
}
return hook, nil
}
|
[
"\"GITHUB_TOKEN\""
] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
go
| 1 | 0 | |
pkg/filesystem/local_directory_test.go
|
package filesystem_test
import (
"os"
"path/filepath"
"syscall"
"testing"
"github.com/buildbarn/bb-storage/pkg/filesystem"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func openTmpDir(t *testing.T) filesystem.Directory {
p := filepath.Join(os.Getenv("TEST_TMPDIR"), t.Name())
require.NoError(t, os.Mkdir(p, 0777))
d, err := filesystem.NewLocalDirectory(p)
require.NoError(t, err)
return d
}
func TestLocalDirectoryCreationFailure(t *testing.T) {
_, err := filesystem.NewLocalDirectory("/nonexistent")
require.True(t, os.IsNotExist(err))
}
func TestLocalDirectoryCreationSuccess(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Close())
}
func TestLocalDirectoryEnterBadName(t *testing.T) {
d := openTmpDir(t)
// Empty filename.
_, err := d.Enter("")
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"\""), err)
// Attempt to bypass directory hierarchy.
_, err = d.Enter(".")
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \".\""), err)
_, err = d.Enter("..")
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"..\""), err)
// Skipping of intermediate directory levels.
_, err = d.Enter("foo/bar")
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"foo/bar\""), err)
require.NoError(t, d.Close())
}
func TestLocalDirectoryEnterNonExistent(t *testing.T) {
d := openTmpDir(t)
_, err := d.Enter("nonexistent")
require.True(t, os.IsNotExist(err))
require.NoError(t, d.Close())
}
func TestLocalDirectoryEnterFile(t *testing.T) {
d := openTmpDir(t)
f, err := d.OpenFile("file", os.O_CREATE|os.O_WRONLY, 0666)
require.NoError(t, err)
require.NoError(t, f.Close())
_, err = d.Enter("file")
require.Equal(t, syscall.ENOTDIR, err)
require.NoError(t, d.Close())
}
func TestLocalDirectoryEnterSymlink(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Symlink("/", "symlink"))
_, err := d.Enter("symlink")
require.Equal(t, syscall.ENOTDIR, err)
require.NoError(t, d.Close())
}
func TestLocalDirectoryEnterSuccess(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Mkdir("subdir", 0777))
sub, err := d.Enter("subdir")
require.NoError(t, err)
require.NoError(t, sub.Close())
require.NoError(t, d.Close())
}
func TestLocalDirectoryLinkBadName(t *testing.T) {
d := openTmpDir(t)
// Invalid source name.
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"\""), d.Link("", d, "file"))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \".\""), d.Link(".", d, "file"))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"..\""), d.Link("..", d, "file"))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"foo/bar\""), d.Link("foo/bar", d, "file"))
// Invalid target name.
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"\""), d.Link("file", d, ""))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \".\""), d.Link("file", d, "."))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"..\""), d.Link("file", d, ".."))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"foo/bar\""), d.Link("file", d, "foo/bar"))
require.NoError(t, d.Close())
}
func TestLocalDirectoryLinkNotFound(t *testing.T) {
d := openTmpDir(t)
require.Equal(t, syscall.ENOENT, d.Link("source", d, "target"))
require.NoError(t, d.Close())
}
func TestLocalDirectoryLinkDirectory(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Mkdir("source", 0777))
require.True(t, os.IsPermission(d.Link("source", d, "target")))
require.NoError(t, d.Close())
}
func TestLocalDirectoryLinkTargetExists(t *testing.T) {
d := openTmpDir(t)
f, err := d.OpenFile("source", os.O_CREATE|os.O_WRONLY, 0666)
require.NoError(t, err)
require.NoError(t, f.Close())
f, err = d.OpenFile("target", os.O_CREATE|os.O_WRONLY, 0666)
require.NoError(t, err)
require.NoError(t, f.Close())
require.True(t, os.IsExist(d.Link("source", d, "target")))
require.NoError(t, d.Close())
}
func TestLocalDirectoryLinkSuccess(t *testing.T) {
d := openTmpDir(t)
f, err := d.OpenFile("source", os.O_CREATE|os.O_WRONLY, 0666)
require.NoError(t, err)
require.NoError(t, f.Close())
require.NoError(t, d.Link("source", d, "target"))
require.NoError(t, d.Close())
}
func TestLocalDirectoryLstatBadName(t *testing.T) {
d := openTmpDir(t)
_, err := d.Lstat("")
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"\""), err)
_, err = d.Lstat(".")
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \".\""), err)
_, err = d.Lstat("..")
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"..\""), err)
_, err = d.Lstat("foo/bar")
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"foo/bar\""), err)
require.NoError(t, d.Close())
}
func TestLocalDirectoryLstatNonExistent(t *testing.T) {
d := openTmpDir(t)
_, err := d.Lstat("hello")
require.True(t, os.IsNotExist(err))
require.NoError(t, d.Close())
}
func TestLocalDirectoryLstatFile(t *testing.T) {
d := openTmpDir(t)
f, err := d.OpenFile("file", os.O_CREATE|os.O_WRONLY, 0644)
require.NoError(t, err)
require.NoError(t, f.Close())
fi, err := d.Lstat("file")
require.NoError(t, err)
require.Equal(t, "file", fi.Name())
require.Equal(t, filesystem.FileTypeRegularFile, fi.Type())
require.NoError(t, d.Close())
}
func TestLocalDirectoryLstatSymlink(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Symlink("/", "symlink"))
fi, err := d.Lstat("symlink")
require.NoError(t, err)
require.Equal(t, "symlink", fi.Name())
require.Equal(t, filesystem.FileTypeSymlink, fi.Type())
require.NoError(t, d.Close())
}
func TestLocalDirectoryLstatDirectory(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Mkdir("directory", 0700))
fi, err := d.Lstat("directory")
require.NoError(t, err)
require.Equal(t, "directory", fi.Name())
require.Equal(t, filesystem.FileTypeDirectory, fi.Type())
require.NoError(t, d.Close())
}
func TestLocalDirectoryMkdirBadName(t *testing.T) {
d := openTmpDir(t)
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"\""), d.Mkdir("", 0777))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \".\""), d.Mkdir(".", 0777))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"..\""), d.Mkdir("..", 0777))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"foo/bar\""), d.Mkdir("foo/bar", 0777))
require.NoError(t, d.Close())
}
func TestLocalDirectoryMkdirExisting(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Symlink("/", "symlink"))
require.True(t, os.IsExist(d.Mkdir("symlink", 0777)))
require.NoError(t, d.Close())
}
func TestLocalDirectoryMkdirSuccess(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Mkdir("directory", 0777))
require.NoError(t, d.Close())
}
func TestLocalDirectoryOpenFileBadName(t *testing.T) {
d := openTmpDir(t)
_, err := d.OpenFile("", os.O_CREATE|os.O_WRONLY, 0666)
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"\""), err)
_, err = d.OpenFile(".", os.O_CREATE|os.O_WRONLY, 0666)
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \".\""), err)
_, err = d.OpenFile("..", os.O_CREATE|os.O_WRONLY, 0666)
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"..\""), err)
_, err = d.OpenFile("foo/bar", os.O_CREATE|os.O_WRONLY, 0666)
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"foo/bar\""), err)
require.NoError(t, d.Close())
}
func TestLocalDirectoryOpenFileExistent(t *testing.T) {
d := openTmpDir(t)
f, err := d.OpenFile("file", os.O_CREATE|os.O_WRONLY, 0666)
require.NoError(t, err)
require.NoError(t, f.Close())
_, err = d.OpenFile("file", os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0666)
require.True(t, os.IsExist(err))
require.NoError(t, d.Close())
}
func TestLocalDirectoryOpenFileNonExistent(t *testing.T) {
d := openTmpDir(t)
_, err := d.OpenFile("file", os.O_RDONLY, 0666)
require.True(t, os.IsNotExist(err))
require.NoError(t, d.Close())
}
func TestLocalDirectoryOpenFileSymlink(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Symlink("/etc/passwd", "symlink"))
_, err := d.OpenFile("symlink", os.O_RDONLY, 0)
require.Equal(t, syscall.ELOOP, err)
require.NoError(t, d.Close())
}
func TestLocalDirectoryOpenFileSuccess(t *testing.T) {
d := openTmpDir(t)
f, err := d.OpenFile("file", os.O_CREATE|os.O_WRONLY, 0666)
require.NoError(t, err)
require.NoError(t, f.Close())
require.NoError(t, d.Close())
}
func TestLocalDirectoryReadDir(t *testing.T) {
d := openTmpDir(t)
// Prepare file system.
f, err := d.OpenFile("file", os.O_CREATE|os.O_WRONLY, 0666)
require.NoError(t, err)
require.NoError(t, f.Close())
require.NoError(t, d.Mkdir("directory", 0777))
require.NoError(t, d.Symlink("/", "symlink"))
// Validate directory listing.
files, err := d.ReadDir()
require.NoError(t, err)
require.Equal(t, 3, len(files))
require.Equal(t, "directory", files[0].Name())
require.Equal(t, filesystem.FileTypeDirectory, files[0].Type())
require.Equal(t, "file", files[1].Name())
require.Equal(t, filesystem.FileTypeRegularFile, files[1].Type())
require.Equal(t, "symlink", files[2].Name())
require.Equal(t, filesystem.FileTypeSymlink, files[2].Type())
require.NoError(t, d.Close())
}
func TestLocalDirectoryReadlinkBadName(t *testing.T) {
d := openTmpDir(t)
_, err := d.Readlink("")
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"\""), err)
_, err = d.Readlink(".")
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \".\""), err)
_, err = d.Readlink("..")
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"..\""), err)
_, err = d.Readlink("foo/bar")
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"foo/bar\""), err)
require.NoError(t, d.Close())
}
func TestLocalDirectoryReadlinkNonExistent(t *testing.T) {
d := openTmpDir(t)
_, err := d.Readlink("nonexistent")
require.True(t, os.IsNotExist(err))
require.NoError(t, d.Close())
}
func TestLocalDirectoryReadlinkDirectory(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Mkdir("directory", 0777))
_, err := d.Readlink("directory")
require.Equal(t, syscall.EINVAL, err)
require.NoError(t, d.Close())
}
func TestLocalDirectoryReadlinkFile(t *testing.T) {
d := openTmpDir(t)
f, err := d.OpenFile("file", os.O_CREATE|os.O_WRONLY, 0666)
require.NoError(t, err)
require.NoError(t, f.Close())
_, err = d.Readlink("file")
require.Equal(t, syscall.EINVAL, err)
require.NoError(t, d.Close())
}
func TestLocalDirectoryReadlinkSuccess(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Symlink("/foo/bar/baz", "symlink"))
target, err := d.Readlink("symlink")
require.NoError(t, err)
require.Equal(t, "/foo/bar/baz", target)
require.NoError(t, d.Close())
}
func TestLocalDirectoryRemoveBadName(t *testing.T) {
d := openTmpDir(t)
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"\""), d.Remove(""))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \".\""), d.Remove("."))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"..\""), d.Remove(".."))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"foo/bar\""), d.Remove("foo/bar"))
require.NoError(t, d.Close())
}
func TestLocalDirectoryRemoveNonExistent(t *testing.T) {
d := openTmpDir(t)
require.True(t, os.IsNotExist(d.Remove("nonexistent")))
require.NoError(t, d.Close())
}
func TestLocalDirectoryRemoveDirectory(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Mkdir("directory", 0777))
require.NoError(t, d.Remove("directory"))
require.NoError(t, d.Close())
}
func TestLocalDirectoryRemoveFile(t *testing.T) {
d := openTmpDir(t)
f, err := d.OpenFile("file", os.O_CREATE|os.O_WRONLY, 0666)
require.NoError(t, err)
require.NoError(t, f.Close())
require.NoError(t, d.Remove("file"))
require.NoError(t, d.Close())
}
func TestLocalDirectoryRemoveSymlink(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Symlink("/", "symlink"))
require.NoError(t, d.Remove("symlink"))
require.NoError(t, d.Close())
}
func TestLocalDirectorySymlinkBadName(t *testing.T) {
d := openTmpDir(t)
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"\""), d.Symlink("/whatever", ""))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \".\""), d.Symlink("/whatever", "."))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"..\""), d.Symlink("/whatever", ".."))
require.Equal(t, status.Error(codes.InvalidArgument, "Invalid filename: \"foo/bar\""), d.Symlink("/whatever", "foo/bar"))
require.NoError(t, d.Close())
}
func TestLocalDirectorySymlinkExistent(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Mkdir("directory", 0777))
require.True(t, os.IsExist(d.Symlink("/", "directory")))
require.NoError(t, d.Close())
}
func TestLocalDirectorySymlinkSuccess(t *testing.T) {
d := openTmpDir(t)
require.NoError(t, d.Symlink("/", "symlink"))
require.NoError(t, d.Close())
}
// TODO(edsch): Add testing coverage for RemoveAll().
|
[
"\"TEST_TMPDIR\""
] |
[] |
[
"TEST_TMPDIR"
] |
[]
|
["TEST_TMPDIR"]
|
go
| 1 | 0 | |
mdetsims/end2end_sim_utils.py
|
import logging
import os
import functools
import numpy as np
import ngmix
import galsim
import fitsio
from .ps_psf import PowerSpectrumPSF
from .masking import generate_bad_columns, generate_cosmic_rays
from .interp import interpolate_image_and_noise
from .cs_interp import interpolate_image_and_noise_cs
from .symmetrize import symmetrize_bad_mask
from .coadd import coadd_image_noise_interpfrac, coadd_psfs
from .wcs_gen import gen_affine_wcs
from .defaults import WLDEBLEND_DES_FACTOR, WLDEBLEND_LSST_FACTOR
LOGGER = logging.getLogger(__name__)
@functools.lru_cache(maxsize=8)
def _cached_catalog_read(fname):
return fitsio.read(fname)
class End2EndSim(object):
"""An end-to-end simulation for metadetect testing.
Parameters
----------
rng : np.random.RandomState
An RNG to use for drawing the objects.
gal_type : str
The kind of galaxy to simulate.
psf_type : str
The kind of PSF to simulate.
scale : float
The pixel scale of the image.
position_angle_range : tuple of floats
The range of position angles to select from for rotating the image
WCS coordinares.
scale_frac_std : float
The fractional variance in the image pixel scale.
wcs_shear_std : float
The standard deviation of the Gaussian shear put into the SE WCS
solutions.
wcs_dither_range : tuple of floats
The lowest and highest dither in coadd pixels of the center of each
SE image.
shear_scene : bool, optional
Whether or not to shear the full scene.
n_coadd : int, optional
The number of single epoch images in a coadd per band.
g1 : float, optional
The simulated shear for the 1-axis.
g2 : float, optional
The simulated shear for the 2-axis.
dim : int, optional
The total dimension of the coadd image.
buff : int, optional
The width of the buffer region in the coadd image.
noise : float or list of floats, optional
The noise for a single epoch image. Can be different per band.
ngal : float, optional
The number of objects to simulate per arcminute.
n_bands : int, optional
The number of bands to simulate.
gal_grid : int or None
If not `None`, galaxies are laid out on a grid of `gal_grid` x
`gal_grid` dimensions in the central part of the image.
psf_kws : dict or None, optional
Extra keyword arguments to pass to the constructors for PSF objects.
See the doc strings of the PSF object `PowerSpectrumPSF`. You can also
supply the keywords `fwhm`, `fwhm_frac_std` and `shear_std` to set
the overall size of the PSF, the fractional variance in the size, and
the variance in the PSF shape for each epoch.
gal_kws : dict or None, optional
Extra keyword arguments to use when building galaxy objects.
For gal_type == 'wldeblend', these keywords can be
'survey_name' : str
The name of survey in all caps, e.g. 'DES', 'LSST'.
'catalog' : str
A path to the catalog to draw from. If this keyword is not
given, you need to have the one square degree catsim catalog
in the current working directory or in the directory given by
the environment variable 'CATSIM_DIR'.
'bands' : list of str
A list of strings with the desired bands.
mask_and_interp : bool, optional
Apply fake pixel masking for bad columns and cosmic rays and then
interpolate them.
add_bad_columns : bool, optional
If False, do not add bad columns. Otherwise they will be added when
`mask_and_interp` is True.
add_cosmic_rays : bool, optional
If False, do not add cosmic rays. Otherwise they will be added when
`mask_and_interp` is True.
bad_columns_kws : dict, optional
A set of keyword arguments to pass to the bad column generator.
symmetrize_masking : bool, optional
Symmetrize the masked regions with a 90 degree rotation.
interpolation_type : str, optional
One of
'cubic' : a 2d cubic interpolation
'cs-fourier' : a Fourier basis compressed sensing interpolant
The default is 'cubic'.
ngal_factor : float, optional
A factor to change the number density in the sims. It is set to 0.6
automatically when using the wldeblend galaxy type for DES and 0.45
when using this type for LSST.
Methods
-------
get_mbobs()
Make a simulated MultiBandObsList for metadetect.
Attributes
----------
area_sqr_arcmin : float
The effective area simulated in square arcmin assuming the pixel
scale is in arcsec.
Notes
-----
The valid kinds of galaxies are
'exp' : Sersic objects at very high s/n with n = 1
'wldeblend' : a sample drawn from the WeakLensingDeblending package
The valid kinds of PSFs are
'gauss' : a FWHM 0.9 arcsecond Gaussian
'ps' : a PSF from power spectrum model for shape variation and
cubic model for size variation
"""
def __init__(
self, *,
rng, gal_type, psf_type,
scale,
position_angle_range=(0, 0),
scale_frac_std=0,
wcs_shear_std=0,
wcs_dither_range=(0, 0),
shear_scene=True,
n_coadd=1,
g1=0.02, g2=0.0,
dim=225, buff=25,
noise=180,
ngal=45.0,
n_bands=1,
gal_grid=None,
psf_kws=None,
gal_kws=None,
mask_and_interp=False,
add_bad_columns=True,
add_cosmic_rays=True,
bad_columns_kws=None,
symmetrize_masking=True,
interpolation_type='cubic',
ngal_factor=None):
self.rng = rng
self.noise_rng = np.random.RandomState(seed=rng.randint(1, 2**32-1))
self.gal_type = gal_type
self.psf_type = psf_type
self.n_coadd = n_coadd
self.g1 = g1
self.g2 = g2
self.shear_scene = shear_scene
self.dim = dim
self.buff = buff
self.ngal = ngal
self.gal_grid = gal_grid
self.im_cen = (dim - 1) / 2
self.psf_kws = psf_kws
self.gal_kws = gal_kws
self.mask_and_interp = mask_and_interp
self.add_bad_columns = add_bad_columns
self.add_cosmic_rays = add_cosmic_rays
self.bad_columns_kws = bad_columns_kws or {}
self.interpolation_type = interpolation_type
self.symmetrize_masking = symmetrize_masking
self.noise = np.array(noise) * np.ones(n_bands)
self.ngal_factor = ngal_factor
self.area_sqr_arcmin = ((self.dim - 2*self.buff) * scale / 60)**2
# the SE image could be rotated, so we make it big nough to cover the
# whole coadd region
dfac = np.sqrt(2)
self.se_dim = int(np.ceil(self.dim * dfac)) + 10
if self.se_dim % 2 == 0:
self.se_dim = self.se_dim + 1
self._galsim_rng = galsim.BaseDeviate(
seed=self.rng.randint(low=1, high=2**32-1))
# wcs info
self.scale = scale
self.coadd_wcs = galsim.PixelScale(self.scale)
self.position_angle_range = position_angle_range
self.scale_frac_std = scale_frac_std
self.wcs_shear_std = wcs_shear_std
self.wcs_dither_range = wcs_dither_range
# frac of a single dimension that is used for drawing objects
frac = 1.0 - self.buff * 2 / self.dim
# half of the width of center of the patch that has objects
self.pos_width = self.dim * frac * 0.5 * self.scale
# for wldeblend galaxies, we have to adjust some of the input
# parameters since they are computed self consisently from the
# input catalog and/or package defaults
if self.gal_type == 'wldeblend':
self._extra_init_for_wldeblend()
# given the input number of objects to simulate per square arcminute,
# compute the number we actually need
if self.ngal_factor is None:
self.ngal_factor = 1
LOGGER.info('ngal adjustment factor: %f', self.ngal_factor)
self.nobj = int(
self.ngal * self.ngal_factor *
(self.dim * self.scale / 60 * frac)**2)
self.shear_mat = galsim.Shear(g1=self.g1, g2=self.g2).getMatrix()
# rset nobj to the number in a grid if we are using one
if self.gal_grid is not None:
self.nobj = self.gal_grid * self.gal_grid
self.n_bands = len(self.noise)
# because of the caching of psfs and the wcs below, we only
# allow the sim class to b used once
# this attribute gets set to True after it is used
self.called = False
LOGGER.info('simulating %d bands', self.n_bands)
def _extra_init_for_wldeblend(self):
# guard the import here
import descwl
# make sure to find the proper catalog
gal_kws = self.gal_kws or {}
if 'catalog' not in gal_kws:
fname = os.path.join(
os.environ.get('CATSIM_DIR', '.'),
'OneDegSq.fits')
else:
fname = gal_kws['catalog']
self._wldeblend_cat = _cached_catalog_read(fname)
self._wldeblend_cat['pa_disk'] = self.rng.uniform(
low=0.0, high=360.0, size=self._wldeblend_cat.size)
self._wldeblend_cat['pa_bulge'] = self._wldeblend_cat['pa_disk']
# set the survey name and exposure times
if 'survey_name' not in gal_kws:
survey_name = 'DES'
else:
survey_name = gal_kws['survey_name']
if survey_name == 'DES':
exptime = 90
if self.n_coadd != 10:
LOGGER.warning(
'simulating DES with descwl - '
'input n_coadd != 10!')
elif survey_name == 'LSST':
exptime = 15
if self.n_coadd != 360:
LOGGER.warning(
'simulating LSST with descwl - '
'input n_coadd != 360!')
else:
raise ValueError("Survey '%s' is not valid!" % survey_name)
bands = gal_kws.get('bands', ['r', 'i', 'z'])
LOGGER.debug('simulating bands: %s', bands)
self._surveys = []
self._builders = []
noises = []
for iband, band in enumerate(bands):
# make the survey and code to build galaxies from it
pars = descwl.survey.Survey.get_defaults(
survey_name=survey_name,
filter_band=band)
pars['survey_name'] = survey_name
pars['filter_band'] = band
pars['pixel_scale'] = self.scale
# note in the way we call the descwl package, the image width
# and height is not actually used
pars['image_width'] = self.dim
pars['image_height'] = self.dim
# reset the exposure times as needed
if survey_name == 'DES':
pars['exposure_time'] = exptime
elif survey_name == 'LSST':
pars['exposure_time'] = pars['exposure_time'] / self.n_coadd
# some versions take in the PSF and will complain if it is not
# given
try:
_svy = descwl.survey.Survey(**pars)
except Exception:
pars['psf_model'] = None
_svy = descwl.survey.Survey(**pars)
self._surveys.append(_svy)
self._surveys.append(descwl.survey.Survey(**pars))
self._builders.append(descwl.model.GalaxyBuilder(
survey=self._surveys[iband],
no_disk=False,
no_bulge=False,
no_agn=False,
verbose_model=False))
noises.append(np.sqrt(self._surveys[iband].mean_sky_level))
self.noise = noises
# when we sample from the catalog, we need to pull the right number
# of objects. Since the default catalog is one square degree
# and we fill a fraction of the image, we need to set the
# base source density `ngal`. This is in units of number per
# square arcminute.
self.ngal = self._wldeblend_cat.size / (60 * 60)
# we use a factor of 0.6 to make sure the depth matches that in
# the real data
if self.ngal_factor is None:
if survey_name == 'DES':
self.ngal_factor = WLDEBLEND_DES_FACTOR
elif survey_name == 'LSST':
self.ngal_factor = WLDEBLEND_LSST_FACTOR
else:
raise ValueError("Survey '%s' is not valid!" % survey_name)
LOGGER.info('catalog density: %f per sqr arcmin', self.ngal)
def get_mbobs(self, return_band_images=False):
"""Make a simulated MultiBandObsList for metadetect.
The underlying simulation is done per epoch and then coadded.
Parameters
----------
return_band_images : bool
If True, return a list of list of numpy arrays holding the
SE images in each band.
Returns
-------
mbobs : MultiBandObsList
"""
assert not self.called, "you can only call a sim object once!"
self.called = True
all_band_obj, uv_positions = self._get_band_objects()
method = 'auto'
LOGGER.debug("using draw method '%s'", method)
mbobs = ngmix.MultiBandObsList()
if return_band_images:
band_images = []
for band in range(self.n_bands):
# generate the data I need
band_objects = [o[band] for o in all_band_obj]
wcs_objs = self._get_all_epoch_wcs_objs(band)
# draw the images
se_images = []
for epoch, wcs in enumerate(wcs_objs):
se_im = self._build_se_image(
band=band, epoch=epoch, wcs=wcs,
band_objects=band_objects,
uv_positions=uv_positions, method=method)
se_images.append(se_im.array)
if return_band_images:
band_images.append(se_images)
# add noise, maybe mask them, get noise/wt images and coadd
(coadd_im, coadd_noise, coadd_intp,
coadd_bmask, coadd_wgts) = self._add_noise_and_coadd(
band=band, wcs_objs=wcs_objs, se_images=se_images)
# coadd the PSFs
coadd_psf = self._coadd_psfs(
band=band, wcs_objs=wcs_objs,
coadd_wgts=coadd_wgts, method=method)
# make the final obs
obs_jac = ngmix.jacobian.Jacobian(
row=self.im_cen,
col=self.im_cen,
wcs=self.coadd_wcs.jacobian())
psf_jac = ngmix.jacobian.Jacobian(
row=self._psf_cen,
col=self._psf_cen,
wcs=self.coadd_wcs.jacobian())
psf_obs = ngmix.Observation(
coadd_psf,
weight=0.0 * coadd_psf + 1.0 / self.noise[band]**2,
jacobian=psf_jac)
obs = ngmix.Observation(
coadd_im,
weight=0.0 * coadd_im + 1.0 / np.var(coadd_noise),
bmask=coadd_bmask,
ormask=coadd_bmask.copy(),
jacobian=obs_jac,
psf=psf_obs,
noise=coadd_noise)
obs.meta['fmask'] = coadd_intp
obslist = ngmix.ObsList()
obslist.append(obs)
mbobs.append(obslist)
if return_band_images:
return mbobs, band_images
else:
return mbobs
def _build_se_image(self, *, band, epoch, wcs,
band_objects, uv_positions, method):
se_im = galsim.ImageD(
nrow=self.se_dim, ncol=self.se_dim, xmin=0, ymin=0)
for obj, uv_pos in zip(band_objects, uv_positions):
# deal with WCS stuff
pos = wcs.toImage(uv_pos)
local_wcs = wcs.local(world_pos=uv_pos)
# get the psf
psf = self._get_psf_model(
band=band, epoch=epoch, x=pos.x, y=pos.y)
# draw with setup_only to get the image size
_im = galsim.Convolve(obj, psf).drawImage(
wcs=local_wcs,
method=method,
setup_only=True).array
assert _im.shape[0] == _im.shape[1]
# now get location of the stamp
x_ll = int(pos.x - (_im.shape[1] - 1)/2)
y_ll = int(pos.y - (_im.shape[0] - 1)/2)
# get the offset of the center
dx = pos.x - (x_ll + (_im.shape[1] - 1)/2)
dy = pos.y - (y_ll + (_im.shape[0] - 1)/2)
# draw and set the proper origin
stamp = galsim.Convolve(obj, psf).drawImage(
nx=_im.shape[1],
ny=_im.shape[0],
wcs=local_wcs,
offset=galsim.PositionD(x=dx, y=dy),
method=method)
stamp.setOrigin(x_ll, y_ll)
# intersect and add to total image
overlap = stamp.bounds & se_im.bounds
se_im[overlap] += stamp[overlap]
return se_im
def _add_noise_and_coadd(self, *, band, wcs_objs, se_images):
se_noises = []
se_interp_fracs = []
coadd_wgts = []
final_se_images = []
for se_im in se_images:
se_im += self.noise_rng.normal(
scale=self.noise[band], size=se_im.shape)
se_nse = self.noise_rng.normal(size=se_im.shape) * self.noise[band]
if self.mask_and_interp:
final_se_im, se_nse, bad_msk = self._mask_and_interp(
se_im, se_nse)
se_interp_frac = bad_msk.astype(np.float32)
else:
final_se_im = se_im
se_interp_frac = np.zeros_like(se_im)
final_se_images.append(final_se_im)
se_noises.append(se_nse)
se_interp_fracs.append(se_interp_frac)
coadd_wgts.append(1.0 / self.noise[band]**2)
coadd_wgts = np.array(coadd_wgts)
# coadd them
coadd_im, coadd_noise, coadd_intp = coadd_image_noise_interpfrac(
final_se_images, se_noises, se_interp_fracs, wcs_objs,
coadd_wgts, self.scale, self.dim)
coadd_bmask = np.zeros_like(coadd_im, dtype=np.int32)
coadd_bmask[coadd_intp > 0] = 1
return coadd_im, coadd_noise, coadd_intp, coadd_bmask, coadd_wgts
def _mask_and_interp(self, image, noise):
LOGGER.debug('applying masking and interpolation')
# here we make the mask
bad_mask = np.zeros(image.shape, dtype=np.bool)
if self.add_bad_columns:
bad_mask |= generate_bad_columns(
image.shape, rng=self.noise_rng,
mean_bad_cols=1,
**self.bad_columns_kws)
if self.add_cosmic_rays:
bad_mask |= generate_cosmic_rays(
image.shape, rng=self.noise_rng,
mean_cosmic_rays=1)
# applies a 90 degree rotation to make it symmetric
if self.symmetrize_masking:
symmetrize_bad_mask(bad_mask)
# muck the image
image[bad_mask] = 1e12
# now we inteprolate the pixels in the noise and image field
# that are masked
if self.interpolation_type == 'cs-fourier':
_im, _nse = interpolate_image_and_noise_cs(
image=image,
noise=noise,
bad_mask=bad_mask,
rng=self.noise_rng,
c=1000,
sampling_rate=1)
elif self.interpolation_type == 'cubic':
_im, _nse = interpolate_image_and_noise(
image=image,
noise=noise,
bad_mask=bad_mask,
rng=self.noise_rng)
else:
raise ValueError(
'interpolation "%s" is not defined' % self.interpolation_type)
return _im, _nse, bad_mask.astype(np.int32)
def _coadd_psfs(self, *, band, wcs_objs, coadd_wgts, method):
psf_dim = 53
self._psf_cen = (psf_dim - 1)/2
coadd_offset = int(self.im_cen - (psf_dim - 1) / 2)
se_psf_dim = int(np.ceil(psf_dim * np.sqrt(2)))
if se_psf_dim % 2 == 0:
se_psf_dim += 1
se_psf_half = int((se_psf_dim - 1) / 2)
uv_pos = galsim.PositionD(
x=self.im_cen * self.scale, y=self.im_cen * self.scale)
se_offsets = []
se_psfs = []
for epoch, wcs in enumerate(wcs_objs):
pos = wcs.toImage(uv_pos)
local_wcs = wcs.local(world_pos=uv_pos)
# this is the sub pixel offset
dx = pos.x - int(pos.x+0.5)
dy = pos.y - int(pos.y+0.5)
# these are the starting location of the PSF image
se_offsets.append((
int(pos.x+0.5) - se_psf_half,
int(pos.y+0.5) - se_psf_half
))
psf = self._get_psf_model(
band=band, epoch=epoch, x=pos.x, y=pos.y)
se_psfs.append(psf.drawImage(
nx=se_psf_dim,
ny=se_psf_dim,
wcs=local_wcs,
method=method,
offset=galsim.PositionD(x=dx, y=dy)).array.copy())
coadd_psf = coadd_psfs(
se_psfs, wcs_objs, coadd_wgts,
self.scale, psf_dim, coadd_offset, se_offsets)
return coadd_psf / np.sum(coadd_psf)
def _get_all_epoch_wcs_objs(self, band):
if not hasattr(self, '_band_wcs_objs'):
se_cen = (self.se_dim - 1) / 2
world_origin = galsim.PositionD(
x=self.im_cen * self.scale,
y=self.im_cen * self.scale)
origin = galsim.PositionD(x=se_cen, y=se_cen)
self._band_wcs_objs = []
for _ in range(self.n_bands):
wcs_objs = []
for _ in range(self.n_coadd):
wcs_objs.append(gen_affine_wcs(
rng=self.rng,
position_angle_range=self.position_angle_range,
dither_range=self.wcs_dither_range,
scale=self.scale,
scale_frac_std=self.scale_frac_std,
shear_std=self.wcs_shear_std,
world_origin=world_origin,
origin=origin # this gets a dither
))
self._band_wcs_objs.append(wcs_objs)
return self._band_wcs_objs[band]
def _get_dudv(self):
if self.gal_grid is not None:
yind, xind = np.unravel_index(
self._gal_grid_ind, (self.gal_grid, self.gal_grid))
dg = self.pos_width * 2 / self.gal_grid
self._gal_grid_ind += 1
return (
yind * dg + dg/2 - self.pos_width,
xind * dg + dg/2 - self.pos_width)
else:
return self.rng.uniform(
low=-self.pos_width,
high=self.pos_width,
size=2)
def _get_nobj(self):
if self.gal_grid is not None:
return self.nobj
else:
return self.rng.poisson(self.nobj)
def _get_gal_exp(self):
flux = 10**(0.4 * (30 - 18))
half_light_radius = 0.5
_gal = []
for _ in range(self.n_bands):
obj = galsim.Sersic(
half_light_radius=half_light_radius,
n=1,
).withFlux(flux)
_gal.append(obj)
return _gal
def _get_gal_wldeblend(self):
rind = self.rng.choice(self._wldeblend_cat.size)
angle = self.rng.uniform() * 360
gals = [
self._builders[band].from_catalog(
self._wldeblend_cat[rind], 0, 0,
self._surveys[band].filter_band).model.rotate(
angle * galsim.degrees)
for band in range(len(self._builders))]
return gals
def _get_band_objects(self):
"""Get a list of effective PSF-convolved galsim images w/ their
offsets in the image.
Returns
-------
all_band_objs : list of lists
A list of lists of objects in each band.
uv_positions : list of galsim.PositionD
A list of galsim positions for each object.
"""
all_band_obj = []
uv_positions = []
nobj = self._get_nobj()
if self.gal_grid is not None:
self._gal_grid_ind = 0
for i in range(nobj):
# unsheared offset from center of uv image
du, dv = self._get_dudv()
# get the galaxy
if self.gal_type == 'exp':
gals = self._get_gal_exp()
elif self.gal_type == 'wldeblend':
gals = self._get_gal_wldeblend()
else:
raise ValueError('gal_type "%s" not valid!' % self.gal_type)
# compute the final image position
if self.shear_scene:
sdu, sdv = np.dot(self.shear_mat, np.array([du, dv]))
else:
sdu = du
sdv = dv
pos = galsim.PositionD(
x=sdu + self.im_cen * self.scale,
y=sdv + self.im_cen * self.scale)
# shear the galaxy
_obj = []
for gal in gals:
_obj.append(gal.shear(g1=self.g1, g2=self.g2))
all_band_obj.append(_obj)
uv_positions.append(pos)
return all_band_obj, uv_positions
def _make_ps_psfs(self):
kwargs = self.psf_kws or {}
self._ps_psfs = []
for band in range(self.n_bands):
band_psfs = []
for epoch in range(self.n_coadd):
band_psfs.append(
PowerSpectrumPSF(
rng=self.rng,
im_width=self.dim,
buff=self.dim/2,
scale=self.scale,
**kwargs)
)
self._ps_psfs.append(band_psfs)
def _get_psf_model(self, *, band, epoch, x, y):
if not hasattr(self, '_psf_fwhms'):
kws = self.psf_kws or {}
fwhm = kws.get('fwhm', 0.9)
fwhm_std = kws.get('fwhm_frac_std', 0.0)
shear_std = kws.get('shear_std', 0.0)
self._psf_fwhm = fwhm
self._psf_fwhms = []
self._psf_shears = []
for _ in range(self.n_bands):
self._psf_fwhms.append(
(1.0 + self.rng.normal(size=self.n_coadd) * fwhm_std) *
fwhm)
g1s = self.rng.normal(size=self.n_coadd) * shear_std
g2s = self.rng.normal(size=self.n_coadd) * shear_std
self._psf_shears.append([
galsim.Shear(g1=g1, g2=g2) for g1, g2 in zip(g1s, g2s)])
if self.psf_type == 'gauss':
psf = galsim.Gaussian(
fwhm=self._psf_fwhms[band][epoch]
).shear(
self._psf_shears[band][epoch])
return psf
elif self.psf_type == 'wldeblend':
return self._surveys[band].psf_model.dilate(
self._psf_fwhms[band][epoch] / self._psf_fwhm
).shear(
self._psf_shears[band][epoch])
elif self.psf_type == 'ps':
if not hasattr(self, '_ps_psfs'):
self._make_ps_psfs()
return self._ps_psfs[band][epoch].getPSF(
galsim.PositionD(x=x, y=y))
else:
raise ValueError('psf_type "%s" not valid!' % self.psf_type)
|
[] |
[] |
[
"CATSIM_DIR"
] |
[]
|
["CATSIM_DIR"]
|
python
| 1 | 0 | |
Algorithms/Implementation/Between_Two_Sets.java
|
import java.io.*;
import java.math.*;
import java.text.*;
import java.util.*;
import java.util.regex.*;
public class Solution {
//O(n^2) - time complexity
static int getTotalX(int[] a, int[] b) {
int lcm = lcm(a);
int gcd = gcd(b);
int resultCount = 0;
for (int i = lcm; i <= gcd; i += lcm) {
if (gcd % i == 0) {
resultCount++;
}
}
return resultCount;
}
private static int gcd(int[] arr) {
int result = arr[0];
for (int i = 1; i < arr.length; i++) {
result = gcd(result, arr[i]);
}
return result;
}
private static int gcd(int a, int b) {
int min = a < b ? a : b;
int gcd = 1;
for (int i = 1; i <= min; i++) {
if (a % i == 0 && b % i == 0) {
gcd = i;
}
}
return gcd;
}
private static int lcm(int[] arr) {
int result = arr[0];
for (int i = 1; i < arr.length; i++) {
result = lcm(result, arr[i]);
}
return result;
}
private static int lcm(int a, int b) {
return (a * b) / gcd(a, b);
}
private static final Scanner scan = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bw = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
String[] nm = scan.nextLine().split(" ");
int n = Integer.parseInt(nm[0].trim());
int m = Integer.parseInt(nm[1].trim());
int[] a = new int[n];
String[] aItems = scan.nextLine().split(" ");
for (int aItr = 0; aItr < n; aItr++) {
int aItem = Integer.parseInt(aItems[aItr].trim());
a[aItr] = aItem;
}
int[] b = new int[m];
String[] bItems = scan.nextLine().split(" ");
for (int bItr = 0; bItr < m; bItr++) {
int bItem = Integer.parseInt(bItems[bItr].trim());
b[bItr] = bItem;
}
int total = getTotalX(a, b);
bw.write(String.valueOf(total));
bw.newLine();
bw.close();
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
commands/v2/org_users_command.go
|
package v2
import (
"os"
"code.cloudfoundry.org/cli/cf/cmd"
"code.cloudfoundry.org/cli/commands"
"code.cloudfoundry.org/cli/commands/flags"
)
type OrgUsersCommand struct {
RequiredArgs flags.Organization `positional-args:"yes"`
AllUsers bool `short:"a" description:"List all users in the org"`
usage interface{} `usage:"CF_NAME org-users ORG"`
relatedCommands interface{} `related_commands:"orgs"`
}
func (_ OrgUsersCommand) Setup(config commands.Config, ui commands.UI) error {
return nil
}
func (_ OrgUsersCommand) Execute(args []string) error {
cmd.Main(os.Getenv("CF_TRACE"), os.Args)
return nil
}
|
[
"\"CF_TRACE\""
] |
[] |
[
"CF_TRACE"
] |
[]
|
["CF_TRACE"]
|
go
| 1 | 0 | |
main_test.go
|
// +build !integration integration
package main_test
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
)
const failure = `Environment variables from GitLab detected in tests,
these should be cleared: https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27965`
func TestEnvVariablesCleaned(t *testing.T) {
assert.Empty(t, os.Getenv("CI_GITLAB"), failure)
assert.Empty(t, os.Getenv("CI_API_V4_URL"), failure)
assert.NotEmpty(t, os.Getenv("CI"), "If running locally, use `export CI=0` explicitly.")
}
|
[
"\"CI_GITLAB\"",
"\"CI_API_V4_URL\"",
"\"CI\""
] |
[] |
[
"CI_GITLAB",
"CI_API_V4_URL",
"CI"
] |
[]
|
["CI_GITLAB", "CI_API_V4_URL", "CI"]
|
go
| 3 | 0 | |
profiles_api/wsgi.py
|
"""
WSGI config for profiles_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'profiles_api.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
codec/abi_decoder_test.go
|
package codec
import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"os"
"regexp"
"strconv"
"strings"
"testing"
"time"
pbcodec "github.com/dfuse-io/dfuse-eosio/pb/dfuse/eosio/codec/v1"
"github.com/streamingfast/jsonpb"
"github.com/eoscanada/eos-go"
"github.com/eoscanada/eos-go/system"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func TestABIDecoder(t *testing.T) {
type expectation struct {
path string
// If value is a hex string, it expects `rawData` to match it, otherwise, it expects `jsonData` to match it
value string
}
type testData struct {
name string
abiDumps map[string]*eos.ABI
blocks []*pbcodec.Block
expectations []expectation
}
in := func(blocks ...*pbcodec.Block) []*pbcodec.Block {
return blocks
}
tokenABI1 := readABI(t, "token.1.abi.json")
tokenABI2 := readABI(t, "token.2.abi.json")
testABI1 := readABI(t, "test.1.abi.json")
testABI2 := readABI(t, "test.2.abi.json")
testABI3 := readABI(t, "test.3.abi.json")
systemABI := readABI(t, "system.abi.json")
softFailStatus := pbcodec.TransactionStatus_TRANSACTIONSTATUS_SOFTFAIL
hardFailStatus := pbcodec.TransactionStatus_TRANSACTIONSTATUS_HARDFAIL
tests := []testData{
{
name: "setabi and usage, same trace",
blocks: in(testBlock(t, "00000002aa", "00000001aa",
trxTrace(t,
actionTraceSetABI(t, "test", 0, 1, testABI1),
actionTrace(t, "test:test:act1", 1, 2, testABI1, `{"from":"test1"}`),
),
)),
expectations: []expectation{
{"block 0/trace 0/action 1", `{"from":"test1"}`},
},
},
{
name: "setabi and usage, same block, two traces",
blocks: in(testBlock(t, "00000002aa", "00000001aa",
trxTrace(t, actionTraceSetABI(t, "test", 0, 1, testABI1)),
trxTrace(t, actionTrace(t, "test:test:act1", 0, 2, testABI1, `{"from":"test1"}`)),
)),
expectations: []expectation{
{"block 0/trace 1/action 0", `{"from":"test1"}`},
},
},
{
name: "setabi and usage, two different blocks",
blocks: in(
testBlock(t, "00000002aa", "00000001aa",
trxTrace(t, actionTraceSetABI(t, "test", 0, 1, testABI1)),
),
testBlock(t, "00000003aa", "00000002aa",
trxTrace(t, actionTrace(t, "test:test:act1", 0, 2, testABI1, `{"from":"test1"}`)),
),
),
expectations: []expectation{
{"block 1/trace 0/action 0", `{"from":"test1"}`},
},
},
{
name: "set multiple times, within same transaction, two different blocks",
blocks: in(
testBlock(t, "00000002aa", "00000001aa",
trxTrace(t,
actionTraceSetABI(t, "test", 0, 1, testABI1),
actionTrace(t, "test:test:act1", 1, 2, testABI1, `{"from":"test1"}`),
actionTraceSetABI(t, "test", 2, 3, testABI2),
actionTrace(t, "test:test:act2", 2, 4, testABI2, `{"to":20}`),
actionTraceSetABI(t, "test", 4, 5, testABI3),
),
),
testBlock(t, "00000003aa", "00000002aa",
trxTrace(t, actionTrace(t, "test:test:act3", 0, 6, testABI3, `{"quantity":"1.0 EOS"}`)),
),
),
expectations: []expectation{
{"block 0/trace 0/action 1", `{"from":"test1"}`},
{"block 0/trace 0/action 3", `{"to":20}`},
{"block 1/trace 0/action 0", `{"quantity":"1.0 EOS"}`},
},
},
{
name: "set multiple times, across transactions, two different blocks",
blocks: in(
testBlock(t, "00000002aa", "00000001aa",
trxTrace(t, actionTraceSetABI(t, "test", 0, 1, testABI1)),
trxTrace(t, actionTrace(t, "test:test:act1", 0, 2, testABI1, `{"from":"test1"}`)),
trxTrace(t, actionTraceSetABI(t, "test", 0, 3, testABI2)),
trxTrace(t, actionTrace(t, "test:test:act2", 0, 4, testABI2, `{"to":20}`)),
trxTrace(t, actionTraceSetABI(t, "test", 0, 5, testABI3)),
),
testBlock(t, "00000003aa", "00000002aa",
trxTrace(t, actionTrace(t, "test:test:act3", 0, 6, testABI3, `{"quantity":"1.0 EOS"}`)),
),
),
expectations: []expectation{
{"block 0/trace 1/action 0", `{"from":"test1"}`},
{"block 0/trace 3/action 0", `{"to":20}`},
{"block 1/trace 0/action 0", `{"quantity":"1.0 EOS"}`},
},
},
{
name: "fork multiple block",
blocks: in(
testBlock(t, "00000002aa", "00000001aa",
trxTrace(t, actionTraceSetABI(t, "test", 0, 1, testABI1)),
trxTrace(t, actionTraceSetABI(t, "token", 0, 2, tokenABI1)),
),
testBlock(t, "00000002bb", "00000001aa",
trxTrace(t, actionTrace(t, "test:test:act1", 0, 3, testABI1, `{"from":"test1"}`)),
trxTrace(t, actionTraceSetABI(t, "test", 0, 4, testABI2)),
trxTrace(t, actionTrace(t, "test:test:act2", 0, 5, testABI2, `{"to":20}`)),
),
testBlock(t, "00000003bb", "00000002bb",
trxTrace(t, actionTrace(t, "test:test:act2", 0, 6, testABI2, `{"to":20}`)),
trxTrace(t, actionTraceSetABI(t, "token", 0, 7, tokenABI2)),
),
testBlock(t, "00000003aa", "00000002aa",
trxTrace(t, actionTrace(t, "test:test:act1", 0, 3, testABI1, `{"from":"test1"}`)),
trxTrace(t, actionTrace(t, "token:token:transfer", 0, 4, tokenABI1, `{"to":"transfer3"}`)),
),
),
expectations: []expectation{
{"block 1/trace 0/action 0", `{"from":"test1"}`},
{"block 1/trace 2/action 0", `{"to":20}`},
{"block 2/trace 0/action 0", `{"to":20}`},
{"block 3/trace 0/action 0", `{"from":"test1"}`},
{"block 3/trace 1/action 0", `{"to":"transfer3"}`},
},
},
{
name: "fail transaction, does not save ABI",
blocks: in(
testBlock(t, "00000002aa", "00000001aa",
trxTrace(t, hardFailStatus, actionTraceSetABI(t, "test", 0, 1, testABI1)),
),
testBlock(t, "00000003aa", "00000002aa",
trxTrace(t, actionTrace(t, "test:test:act1", 0, 2, testABI1, `{"from":"test1"}`)),
),
),
expectations: []expectation{
{"block 1/trace 0/action 0", `000000008090b1ca`},
},
},
{
name: "fail transaction, still works from failed transaction but does not record ABI",
blocks: in(
testBlock(t, "00000002aa", "00000001aa",
trxTrace(t, actionTraceSetABI(t, "test", 0, 1, testABI1)),
),
testBlock(t, "00000003aa", "00000002aa",
trxTrace(t, hardFailStatus,
actionTrace(t, "test:test:act1", 0, 2, testABI1, `{"from":"test1"}`),
actionTraceSetABI(t, "test", 1, 3, testABI2),
actionTrace(t, "test:test:act2", 2, 4, testABI2, `{"to":1}`),
actionTrace(t, "test:test:act2", 3, 5, testABI2, `{"to":2}`),
actionTraceSetABI(t, "test", 4, 6, testABI3),
actionTraceFail(t, "test:test:act3", 5, testABI3, `{"quantity":"1.0000 EOS"}`),
),
),
testBlock(t, "00000004aa", "00000003aa",
trxTrace(t,
actionTrace(t, "test:test:act1", 0, 2, testABI1, `{"from":"test3"}`),
// Let's assume there is a bunch of transaction in-between, so we test that no recording actually occurred!
actionTrace(t, "test:test:act1", 1, 7, testABI1, `{"from":"test4"}`),
),
),
),
expectations: []expectation{
{"block 1/trace 0/action 0", `{"from":"test1"}`},
{"block 1/trace 0/action 2", `{"to":1}`},
{"block 1/trace 0/action 3", `{"to":2}`},
{"block 1/trace 0/action 5", `{"quantity":"1.0000 EOS"}`},
{"block 2/trace 0/action 0", `{"from":"test3"}`},
{"block 2/trace 0/action 1", `{"from":"test4"}`},
},
},
{
name: "soft_fail onerror, still records ABI",
blocks: in(
testBlock(t, "00000002aa", "00000001aa",
trxTrace(t, softFailStatus,
actionTrace(t, "eosio:eosio:onerror", 0, 1, nil, ""),
actionTraceSetABI(t, "test", 1, 2, testABI2),
actionTrace(t, "test:test:act2", 2, 3, testABI2, `{"to":1}`),
actionTraceSetABI(t, "test", 3, 4, testABI3),
),
),
testBlock(t, "00000003aa", "00000002aa",
trxTrace(t, actionTrace(t, "test:test:act3", 0, 5, testABI3, `{"quantity":"1.0000 EOS"}`)),
),
),
expectations: []expectation{
{"block 0/trace 0/action 2", `{"to":1}`},
{"block 1/trace 0/action 0", `{"quantity":"1.0000 EOS"}`},
},
},
{
name: "soft_fail, with abi dumps, single action global sequence 0, still records ABI",
abiDumps: map[string]*eos.ABI{
"eosio.token": tokenABI2,
},
blocks: in(
testBlock(t, "00000002aa", "00000001aa",
trxTrace(t, actionTraceSetABI(t, "eosio.token", 0, 1, tokenABI2)),
),
testBlock(t, "00000003aa", "00000002aa",
trxTrace(t, softFailStatus,
actionTraceFail(t, "eosio.token:eosio.token:transfer", 0, tokenABI2, `{"from":"bitfinexcw11","memo":"Simple test","quantity":"1.0000 EOS","to":"bitfinexcw12"}`),
),
),
),
expectations: []expectation{
{"block 1/trace 0/action 0", `{"from":"bitfinexcw11","memo":"Simple test","quantity":"1.0000 EOS","to":"bitfinexcw12"}`},
},
},
{
name: "hard_fail onerror, still works from failed transaction but does not record ABI",
blocks: in(
testBlock(t, "00000002aa", "00000001aa",
trxTrace(t, hardFailStatus,
actionTrace(t, "eosio:eosio:onerror", 0, 1, nil, ""),
actionTraceSetABI(t, "test", 1, 2, testABI2),
actionTrace(t, "test:test:act2", 2, 3, testABI2, `{"to":1}`),
actionTraceSetABI(t, "test", 3, 4, testABI3),
actionTraceFail(t, "any:any:any", 4, nil, ""),
),
),
testBlock(t, "00000003aa", "00000002aa",
trxTrace(t, actionTrace(t, "test:test:act3", 0, 1, testABI3, `{"quantity":"1.0000 EOS"}`)),
// Let's assume there is a bunch of transaction in-between, so we test that no recording actually occurred!
trxTrace(t, actionTrace(t, "test:test:act3", 0, 8, testABI3, `{"quantity":"2.0000 EOS"}`)),
),
),
expectations: []expectation{
{"block 0/trace 0/action 2", `{"to":1}`},
{"block 1/trace 0/action 0", `102700000000000004454f5300000000`},
{"block 1/trace 1/action 0", `204e00000000000004454f5300000000`},
},
},
{
name: "dtrx ops are correctly decoded",
blocks: in(
testBlock(t, "00000002aa", "00000001aa",
trxTrace(t,
actionTraceSetABI(t, "test", 0, 1, testABI1),
actionTraceSetABI(t, "token", 1, 2, tokenABI1),
actionTrace(t, "test:test:act1", 2, 3, testABI1, `{"from":"block1"}`),
// A dtrx op created by action index 2
dtrxOp(t, 2, "create", signedTrx(t,
cfaAction(t, "token:transfer", tokenABI1, `{"to":"someone"}`),
action(t, "test:act1", testABI1, `{"from":"inner1"}`),
)),
),
),
testBlock(t, "00000003aa", "00000002aa",
trxTrace(t,
actionTrace(t, "test:test:act1", 0, 4, testABI1, `{"from":"block2"}`),
// A dtrx op created by action index 0
dtrxOp(t, 0, "create", signedTrx(t,
cfaAction(t, "token:transfer", tokenABI1, `{"to":"somelse"}`),
action(t, "test:act1", testABI1, `{"from":"inner2"}`),
)),
),
),
testBlock(t, "00000004aa", "00000003aa",
trxTrace(t, dtrxOp(t, 0, "push_create", signedTrx(t, action(t, "test:act1", testABI1, `{"from":"push1"}`)))),
),
),
expectations: []expectation{
{"block 0/trace 0/action 2", `{"from":"block1"}`},
{"block 0/trace 0/dtrxOp 0/action 0", `{"from":"inner1"}`},
{"block 0/trace 0/dtrxOp 0/cfaAction 0", `{"to":"someone"}`},
{"block 1/trace 0/dtrxOp 0/action 0", `{"from":"inner2"}`},
{"block 1/trace 0/dtrxOp 0/cfaAction 0", `{"to":"somelse"}`},
{"block 2/trace 0/dtrxOp 0/action 0", `{"from":"push1"}`},
},
},
{
name: "trx ops are correctly decoded",
blocks: in(
testBlock(t, "00000002aa", "00000001aa",
trxTrace(t,
actionTraceSetABI(t, "test", 0, 1, testABI1),
actionTraceSetABI(t, "token", 1, 2, tokenABI1),
actionTraceSetABI(t, "eosio", 2, 3, systemABI),
),
),
testBlock(t, "00000003aa", "00000002aa",
trxTrace(t, softFailStatus,
actionTrace(t, "test:test:act1", 0, 4, testABI1, `{"from":"block2"}`),
),
trxTrace(t, hardFailStatus,
actionTrace(t, "eosio:eosio:onerror", 0, 5, systemABI, `{"trx_id":"abc"}`),
),
trxOp(t, signedTrx(t,
action(t, "eosio:onblock", systemABI, `{"id":"00000003aa"}`),
cfaAction(t, "test:act1", testABI1, `{"from":"block3"}`),
)),
trxOp(t, signedTrx(t,
action(t, "eosio:onerror", systemABI, `{"trx_id":"abc"}`),
cfaAction(t, "token:transfer", tokenABI1, `{"to":"someone"}`),
)),
),
),
expectations: []expectation{
{"block 1/trace 0/action 0", `{"from":"block2"}`},
{"block 1/trace 1/action 0", `{"trx_id":"abc"}`},
{"block 1/trxOp 0/action 0", `{"id":"00000003aa"}`},
{"block 1/trxOp 0/cfaAction 0", `{"from":"block3"}`},
{"block 1/trxOp 1/action 0", `{"trx_id":"abc"}`},
{"block 1/trxOp 1/cfaAction 0", `{"to":"someone"}`},
},
},
{
name: "native eosio:transfer correctly decoded",
blocks: in(
testBlock(t, "00000002aa", "00000001aa",
trxTrace(t, actionTraceSetABI(t, "eosio.token", 0, 1, tokenABI2)),
trxTrace(t, actionTrace(t, "eosio.token:eosio.token:transfer", 0, 2, tokenABI2, `{"from":"eosio","to":"token","quantity":"1.0000 EOS","memo":""}`)),
trxTrace(t, actionTrace(t, "eosio.token:eosio.token:transfer", 0, 3, tokenABI2, `{"from":"eosio","to":"token","quantity":"1.0000 EOS","memo":"With memo"}`)),
),
),
expectations: []expectation{
{"block 0/trace 1/action 0", `{"from":"eosio","to":"token","quantity":"1.0000 EOS","memo":""}`},
{"block 0/trace 2/action 0", `{"from":"eosio","to":"token","quantity":"1.0000 EOS","memo":"With memo"}`},
},
},
// TODO: Add those tests
// - ensures "hard-coded" system methods like `setabi`, `setcode` always work?
}
toString := func(in proto.Message) string {
out, err := (&jsonpb.Marshaler{}).MarshalToString(in)
require.NoError(t, err)
return out
}
hexRegex := regexp.MustCompile("^[0-9a-fA-F]+$")
actionTraceRegex := regexp.MustCompile("^block (\\d+)/trace (\\d+)/action (\\d+)$")
dtrxOpRegex := regexp.MustCompile("^block (\\d+)/trace (\\d+)/dtrxOp (\\d+)/(action|cfaAction) (\\d+)$")
trxOpRegex := regexp.MustCompile("^block (\\d+)/trxOp (\\d+)/(action|cfaAction) (\\d+)$")
toInt := func(in string) int {
out, err := strconv.ParseInt(in, 10, 32)
require.NoError(t, err)
return int(out)
}
extractTrace := func(testData *testData, regexMatch []string) (block *pbcodec.Block, trace *pbcodec.TransactionTrace) {
block = testData.blocks[toInt(regexMatch[1])]
trace = block.UnfilteredTransactionTraces[toInt(regexMatch[2])]
return
}
assertMatchAction := func(expected string, action *pbcodec.Action) {
if hexRegex.MatchString(expected) {
require.Equal(t, expected, hex.EncodeToString(action.RawData), toString(action))
require.Empty(t, action.JsonData, "JsonData should be empty\n%s", toString(action))
} else {
require.NotEmpty(t, action.RawData, "RawData should still be populated\n%s", toString(action))
require.NotEmpty(t, action.JsonData, "JsonData should not be empty\n%s", toString(action))
assert.JSONEq(t, expected, action.JsonData)
}
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
decoder := newABIDecoder()
for contract, abi := range test.abiDumps {
abiBinary, err := eos.MarshalBinary(abi)
require.NoError(t, err)
decoder.addInitialABI(contract, base64.RawStdEncoding.EncodeToString(abiBinary))
}
for _, block := range test.blocks {
maybePrintBlock(t, block)
err := decoder.startBlock(block.Num())
require.NoError(t, err)
for _, trxTrace := range block.UnfilteredTransactionTraces {
err := decoder.processTransaction(trxTrace)
require.NoError(t, err)
}
// This should wait for all decoding in the block to terminate
err = decoder.endBlock(block)
require.NoError(t, err)
}
for _, expect := range test.expectations {
var match []string
if match = fullMatchRegex(actionTraceRegex, expect.path); match != nil {
_, trace := extractTrace(&test, match)
assertMatchAction(expect.value, trace.ActionTraces[toInt(match[3])].Action)
continue
}
if match = fullMatchRegex(dtrxOpRegex, expect.path); match != nil {
_, trace := extractTrace(&test, match)
dtrxOp := trace.DtrxOps[toInt(match[3])]
if match[4] == "cfaAction" {
assertMatchAction(expect.value, dtrxOp.Transaction.Transaction.ContextFreeActions[toInt(match[5])])
} else if match[4] == "action" {
assertMatchAction(expect.value, dtrxOp.Transaction.Transaction.Actions[toInt(match[5])])
}
continue
}
if match = fullMatchRegex(trxOpRegex, expect.path); match != nil {
block := test.blocks[toInt(match[1])]
trxOp := block.UnfilteredImplicitTransactionOps[toInt(match[2])]
if match[3] == "cfaAction" {
assertMatchAction(expect.value, trxOp.Transaction.Transaction.ContextFreeActions[toInt(match[4])])
} else if match[3] == "action" {
assertMatchAction(expect.value, trxOp.Transaction.Transaction.Actions[toInt(match[4])])
}
continue
}
assert.Fail(t, "Unable to assert unknown expectation", "Expecation path %q not matching any assertion regex", expect.path)
}
})
}
}
func fullMatchRegex(regex *regexp.Regexp, content string) []string {
match := regex.FindAllStringSubmatch(content, -1)
if match == nil {
return nil
}
return match[0]
}
func testBlock(t *testing.T, blkID string, previousBlkID string, elements ...interface{}) *pbcodec.Block {
pbblock := &pbcodec.Block{
Id: blkID,
Number: eos.BlockNum(blkID),
}
blockTime, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05.5Z")
require.NoError(t, err)
blockTimestamp, err := ptypes.TimestampProto(blockTime)
require.NoError(t, err)
pbblock.DposIrreversibleBlocknum = pbblock.Number - 1
pbblock.Header = &pbcodec.BlockHeader{
Previous: previousBlkID,
Producer: "tester",
Timestamp: blockTimestamp,
}
for _, element := range elements {
switch v := element.(type) {
case *pbcodec.TransactionTrace:
pbblock.UnfilteredTransactionTraceCount++
pbblock.UnfilteredTransactionTraces = append(pbblock.UnfilteredTransactionTraces, v)
case *pbcodec.TrxOp:
pbblock.UnfilteredImplicitTransactionOps = append(pbblock.UnfilteredImplicitTransactionOps, v)
}
}
return pbblock
}
func trxTrace(t *testing.T, elements ...interface{}) *pbcodec.TransactionTrace {
trace := &pbcodec.TransactionTrace{
Receipt: &pbcodec.TransactionReceiptHeader{
Status: pbcodec.TransactionStatus_TRANSACTIONSTATUS_EXECUTED,
},
}
for _, element := range elements {
switch v := element.(type) {
case *pbcodec.ActionTrace:
trace.ActionTraces = append(trace.ActionTraces, v)
case *pbcodec.DBOp:
trace.DbOps = append(trace.DbOps, v)
case *pbcodec.DTrxOp:
trace.DtrxOps = append(trace.DtrxOps, v)
case *pbcodec.TableOp:
trace.TableOps = append(trace.TableOps, v)
case pbcodec.TransactionStatus:
trace.Receipt.Status = v
}
}
return trace
}
func signedTrx(t *testing.T, elements ...interface{}) *pbcodec.SignedTransaction {
signedTrx := &pbcodec.SignedTransaction{}
signedTrx.Transaction = trx(t, elements...)
return signedTrx
}
type ContextFreeAction *pbcodec.Action
func trx(t *testing.T, elements ...interface{}) *pbcodec.Transaction {
trx := &pbcodec.Transaction{}
for _, element := range elements {
switch v := element.(type) {
case *pbcodec.Action:
trx.Actions = append(trx.Actions, v)
case ContextFreeAction:
trx.ContextFreeActions = append(trx.ContextFreeActions, (*pbcodec.Action)(v))
}
}
return trx
}
func actionTrace(t *testing.T, tripletName string, executionIndex uint32, globalSequence uint64, abi *eos.ABI, data string) *pbcodec.ActionTrace {
parts := strings.Split(tripletName, ":")
receiver := parts[0]
account := parts[1]
actionName := parts[2]
return &pbcodec.ActionTrace{
ExecutionIndex: executionIndex,
Receiver: receiver,
Receipt: &pbcodec.ActionReceipt{
Receiver: receiver,
GlobalSequence: globalSequence,
},
Action: action(t, account+":"+actionName, abi, data),
}
}
func actionTraceFail(t *testing.T, tripletName string, executionIndex uint32, abi *eos.ABI, data string) *pbcodec.ActionTrace {
out := actionTrace(t, tripletName, executionIndex, 0, abi, data)
out.Receipt = nil
return out
}
func actionTraceSetABI(t *testing.T, account string, executionIndex uint32, globalSequence uint64, abi *eos.ABI) *pbcodec.ActionTrace {
abiData, err := eos.MarshalBinary(abi)
require.NoError(t, err)
setABI := &system.SetABI{Account: eos.AccountName(account), ABI: eos.HexBytes(abiData)}
rawData, err := eos.MarshalBinary(setABI)
require.NoError(t, err)
return &pbcodec.ActionTrace{
ExecutionIndex: executionIndex,
Receiver: "eosio",
Receipt: &pbcodec.ActionReceipt{
Receiver: "eosio",
GlobalSequence: globalSequence,
},
Action: &pbcodec.Action{
Account: "eosio",
Name: "setabi",
RawData: rawData,
},
}
}
func cfaAction(t *testing.T, pairName string, abi *eos.ABI, data string) ContextFreeAction {
return ContextFreeAction(action(t, pairName, abi, data))
}
func action(t *testing.T, pairName string, abi *eos.ABI, data string) *pbcodec.Action {
parts := strings.Split(pairName, ":")
account := parts[0]
actionName := parts[1]
var rawData []byte
if abi != nil && data != "" {
var err error
rawData, err = abi.EncodeAction(eos.ActionName(actionName), []byte(data))
require.NoError(t, err)
}
return &pbcodec.Action{
Account: account,
Name: actionName,
RawData: rawData,
}
}
func trxOp(t *testing.T, signedTrx *pbcodec.SignedTransaction) *pbcodec.TrxOp {
op := &pbcodec.TrxOp{
Transaction: signedTrx,
}
return op
}
func dtrxOp(t *testing.T, actionIndex uint32, operation string, signedTrx *pbcodec.SignedTransaction) *pbcodec.DTrxOp {
opName := pbcodec.DTrxOp_Operation_value["OPERATION_"+strings.ToUpper(operation)]
op := &pbcodec.DTrxOp{
Operation: pbcodec.DTrxOp_Operation(opName),
ActionIndex: actionIndex,
Transaction: signedTrx,
}
return op
}
func maybePrintBlock(t *testing.T, block *pbcodec.Block) {
if os.Getenv("DEBUG") == "" && os.Getenv("TRACE") != "true" {
return
}
marshaler := &jsonpb.Marshaler{}
out, err := marshaler.MarshalToString(block)
require.NoError(t, err)
// We re-normalize to a plain map[string]interface{} so it's printed as JSON and not a proto default String implementation
normalizedOut := map[string]interface{}{}
require.NoError(t, json.Unmarshal([]byte(out), &normalizedOut))
zlog.Debug("processing test block", zap.Any("block", normalizedOut))
}
|
[
"\"DEBUG\"",
"\"TRACE\""
] |
[] |
[
"TRACE",
"DEBUG"
] |
[]
|
["TRACE", "DEBUG"]
|
go
| 2 | 0 | |
torchember/whyhat.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/20_why_hat.ipynb (unless otherwise specified).
__all__ = ['md5hash', 'ModelInput', 'InputEmb', 'InputOneHot', 'InputConti', 'YEncoder', 'YOneHot', 'YConti',
'RichColumn', 'RichDF', 'TabularModel', 'TabularNN']
# Cell
import pandas as pd
import numpy as np
from pathlib import Path
import os
import json
from .core import color
from .helper import tracker
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# Cell
from hashlib import md5
from datetime import datetime
from torch import nn
import torch
import numpy as np
def md5hash(x):
return md5(x.encode()).hexdigest()
# Cell
class ModelInput(nn.Module):
def __init__(self,rich_col):
super().__init__()
self.rich_col = rich_col
rich_col.input_module =self
class InputEmb(ModelInput):
def __init__(self,rich_col):
super().__init__(rich_col)
self.emb = nn.Embedding(len(rich_col.top_freq)+1,rich_col.hidden_size)
def forward(self,x):
return self.emb(x)
class InputOneHot(ModelInput):
def __init__(self,rich_col):
super().__init__(rich_col)
self.eye = torch.eye(len(self.rich_col))
def forward(self,x):
return self.eye[x]
class InputConti(ModelInput):
def __init__(self,rich_col):
super().__init__(rich_col)
rich_col.mean = rich_col.col.mean()
rich_col.std = rich_col.col.std()
self.bn=nn.BatchNorm1d(1)
self.tanh = nn.Tanh()
def forward(self,x):
x = self.tanh(self.bn(x))
return x.detach()
# Cell
class YEncoder:
"""
Encode the why into the required shape
input of the __call__, numpy array
"""
def __init__(self,rich_col):
super().__init__()
self.rich_col = rich_col
assert rich_col.is_y,f"{rich_col.name} isn't a y column set"
rich_col.y_encoder = self
def __call__(self,x):
raise NotImplementedError("Defind __call__ of YEncoder first")
class YOneHot(YEncoder):
def __init__(self, rich_col):
super().__init__(rich_col)
self.eye = np.eye(len(rich_col)).astype(np.int)
def __call__(self, x):
return self.eye[x]
class YConti(YEncoder):
def __init__(self, rich_col):
super().__init__(rich_col)
self.mean = rich_col.col.mean()
self.std = rich_col.col.std()
def __call__(self,x):
return np.clip((x-self.mean)/self.std,-2,2)
# Cell
class RichColumn(object):
"""
A pandas series manager
"""
def __init__(self,column, is_y = False,min_occur = 5, is_emb = True,hidden_size=20):
self.col = column
self.col.rc = self
self.name = self.col.name
self.min_occur = min_occur
self.hidden_size = hidden_size
self.is_emb = is_emb
self.is_y = is_y
self.use = True
self.is_conti = True
self.defined = False
def kill(self):
"""
set column to kill mode, that it would not be involved in the learning
"""
self.defined = True
self.use = False
def conti(self):
"""
set column to contineous data
"""
self.defined = True
self.is_conti = True
def disc(self):
"""
set column to discrete data
"""
self.defined = True
self.is_conti = False
def is_number(self):
"""
Is this column's data type in any form of number
"""
return self.col.dtype in (int,float,
np.float16,np.float32,np.float64,np.float64,
np.int0,np.int8,np.int16,np.int32,np.int64)
def __bool__(self):
"""
is this column going to join the learning
"""
return self.use
def __len__(self):
"""
width of column when entering the model, or used as target
"""
if self.is_conti:
return 1
else:
if self.is_emb and (self.is_y==False):
return self.hidden_size
else:
width = len(self.top_freq)+1
width =1 if width==2 else width
return width
def __repr__(self,):
return f"<Rich Column:{self.name}>"
def top_freq_(self):
freq = self.freq()
self.top_freq = freq[freq[self.name]>=self.min_occur].reset_index()
self.tokens = dict((v,k+1) for k,v in enumerate(self.top_freq["index"]))
self.token_arr = np.array(["<mtk>",]+list(self.top_freq["index"]))
return self.top_freq
def freq(self):
return pd.DataFrame(data=self.col.value_counts())
@property
def conf_dict(self):
return dict((i,getattr(self,i)) for i in ["name","defined","is_conti","is_y","is_emb","use"])
def set_conf(self,conf_dict):
for k,v in conf_dict.items():
setattr(self,k,v)
return self
def encode(self,x):
if self.is_conti:
return x if x else self.mean
else:
try:
return self.tokens[x]
except:
return 0
def decode(self,idx):
return self.token_arr[idx]
def build_learn(self):
"""
prepare the column for learning
"""
if self.is_y == False:
if self.is_conti:
self.mean = self.col.mean()
InputConti(self)
else:
InputEmb(self)
else:
if self.is_conti:
self.mean = self.col.mean()
YConti(self)
else:
YOneHot(self)
return self
# Cell
class RichDF(object):
"""
A pandas dataframe manager
"""
def __init__(self,df,fname=None):
self.df = df
self.columns = dict()
if fname==None:
fname=f"why_hat_{self.ts_str}"
self.t = tracker("torchember",fname)
self.t.data = self.t.log_path
for colname in self.df:
self.columns.update({colname:RichColumn(df[colname])})
@property
def ts_str(self):
return datetime.now().strftime("%m%d_%H%M%S")
@property
def col_conf(self):
return dict((k,{"use":v.use,"is_cont":v.is_conti}) for k,v in self.columns.items())
def __getitem__(self,col_name):
return self.columns[col_name]
def kill(self,colname):
"""
Not using this column
"""
self.df[colname].rc.kill()
def conti(self,colname):
self.df[colname].rc.conti()
def disc(self,colname):
self.df[colname].rc.disc()
def save_col(self,rcol):
self.t[md5hash(rcol.name)]=rcol.conf_dict
def set_col(self,rcol):
if rcol.defined:
print(f"{rcol.name} defined, use:{rcol.use}, contineus?:{rcol.is_conti}")
print(color.bold("="*30))
print(color.cyan(rcol.name))
print(color.red(f"number? {rcol.is_number()}"))
print(rcol.top_freq_().head(5))
print(color.red("Is this a [C]ontineous, [D]iscrete or a column we do[N]'t need? default N"))
x = input().lower()
if x=="c":
rcol.conti()
print(color.blue(f"{rcol.name} set to contineous data"))
self.save_col(rcol)
elif x =="d":
rcol.disc()
print(color.blue(f"{rcol.name} set to discrite data"))
self.save_col(rcol)
elif (x =="") or (x=="n"):
rcol.kill()
print(color.blue(f"{rcol.name} will not be involved in learning"))
self.save_col(rcol)
else:
print(color.yellow(f"option [{x}] not found, try Again?"))
def save(self,colname):
col=self.df[colname]
self.t[md5hash(colname)] = col.rc.conf_dict
def read(self,colname):
col=self.df[colname]
col.rc.set_conf(self.t[md5hash(colname)])
if col.rc.is_conti:
col.rc.top_freq_()
def shuffle_df(self):
self.df = self.df\
.sample(frac=1.)\
.reset_index().drop("index",axis=1)
def tour(self):
"""
Go through column 1 by 1 to decide the processing for its data
"""
for colname in self.df:
col = self.df[colname]
current = self.t[md5hash(colname)]
if current != None:
col.rc.set_conf(current)
if col.rc.is_conti==False:
col.rc.top_freq_()
if col.rc.defined==False:
self.set_col(col.rc)
def set_y(self, *colnames):
"""
set columns to y
all the columns that use==True and is_y==False will be treated as x
"""
for colname in colnames:
rc = self.columns[colname]
rc.is_y = True
rc.use = True
rc.is_emb = False
self.save(colname)
def set_x(self, *colnames):
"""
set columns to x
of course,every columns' default status is x,
so you don't have to set this if you accidentally set x to y
"""
for colname in colnames:
rc = self.columns[colname]
rc.use = True
rc.is_y = False
self.save(colname)
@property
def Xs(self):
"""
Return the next x rich column
"""
for col,rc in self.columns.items():
if (rc.is_y) ==False and rc.use:
yield rc
@property
def Ys(self):
"""
Return the next y rich column
"""
for col,rc in self.columns.items():
if rc.is_y and rc.use:
yield rc
# Cell
class TabularModel(nn.Module):
def __init__(self,rdf):
super().__init__()
self.rdf=rdf
self.inputs = nn.ModuleDict(modules = dict((x.name,x.input_module) for x in rdf.Xs))
self.build_dial_x()
self.build_dial_y()
self.input_width = len(self.dial)
self.target_width = len(self.dial_y)
self.hidden_size = max(self.input_width,self.target_width,20)
self.dnn = nn.Sequential(*[
nn.Linear(self.input_width,self.hidden_size),
nn.BatchNorm1d(self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size,self.target_width),
nn.BatchNorm1d(self.target_width),
])
def forward(self,Xs):
"""
Xs dictionary of inputs
"""
ipts = list(self.inputs[xcol.name](Xs[xcol.name]) for xcol in self.rdf.Xs)
concat = torch.cat(ipts,dim=1)
return self.dnn(concat)
def build_dial_x(self):
all_width = 0
self.dial = dict()
for x in self.rdf.Xs:
for i in range(len(x)):
self.dial.update({all_width:dict({"colname":x.name,
"rich_col":x,
"sub_idx":i,
"remark":f"input<{i}> of column {x.name}"})})
all_width+=1
return all_width
def build_dial_y(self):
all_width = 0
self.dial_y = dict()
for y in self.rdf.Ys:
for i in range(len(y)):
self.dial_y.update({all_width:dict({"colname":y.name,
"rich_col":y,
"sub_idx":i,
"remark":f"target<{i}> of column {y.name}"})})
all_width+=1
return all_width
class TabularNN:
def __init__(self, rich_df,batch_size=128):
self.rich_df = rich_df
self.l = len(rich_df.df)
self.batch_size = batch_size
self.x = list(x.build_learn() for x in self.rich_df.Xs)
self.y = list(y.build_learn() for y in self.rich_df.Ys)
self.assert_xy()
self.assert_y_consistency()
self.reset_i()
self.epoch = 0
self.rich_df.shuffle_df()
self.model = TabularModel(self.rich_df)
def reset_i(self):
"""reset iterator"""
self.s=0
self.e=1
def __repr__(self):
return f">>TabularNN"
def assert_xy(self):
assert len(self.x)>0, "You have you set some X"
assert len(self.y)>0, "You have you set some Y"
def assert_y_consistency(self):
conti_list = list(rc.is_conti for rc in self.rich_df.Ys)
assert float(sum(conti_list))/len(conti_list) in [1.,0.],"Y has to be all discrete columns, or contineous columns"
# decide loss function based on Y
if conti_list[0]:
self.crit = nn.MSELoss()
else:
self.crit = nn.BCEWithLogitsLoss()
def build_model_nn(self):
self.nn = TabularModel(self)
def batch_df(self):
start = self.batch_size*self.s
end = self.batch_size*self.e
if start>self.l:
self.epoch+=1
self.reset_i()
start = self.batch_size*self.s
end = self.batch_size*self.e
yield self.rich_df.df[start:end]
def batch_array(self):
df_b = next(self.batch_df())
x_data = dict()
y_data = dict()
for x in self.x:
if x.is_conti:
df_b[x.name]= df_b[x.name].fillna(x.mean)
arr = df_b[x.name].apply(x.encode).values
x_tensor = torch.FloatTensor(arr)[:,None] if x.is_conti else torch.LongTensor(arr)
x_data.update({x.name:x_tensor})
for y in self.y:
arr = df_b[y.name].apply(y.encode).values
y_tensor = torch.FloatTensor(arr) if y.is_conti else torch.LongTensor(arr)
y_data.update({y.name:y_tensor[:,None]})
yield x_data,y_data
def batch_y_pred(self):
x_data,y_data = next(self.batch_array())
yield self.model(x_data)
|
[] |
[] |
[
"KMP_DUPLICATE_LIB_OK"
] |
[]
|
["KMP_DUPLICATE_LIB_OK"]
|
python
| 1 | 0 | |
src/api_utils/gladia_api_utils/model_management.py
|
import inspect
import os
import sys
import threading
from logging import raiseExceptions
from pathlib import Path
from urllib.parse import urlparse
from git import Repo
from icecream import ic
from .file_management import create_directory, download_file, is_archive, uncompress
def download_model(
url: str,
output_path: str,
uncompress_after_download=True,
file_type=None,
reset=True,
branch="origin",
) -> str:
"""download a model and uncompress it if necessary
reset lets you decide not to force sync between huggingface hub and you local repo (for testing purposes for instance)
"""
namespace = sys._getframe(1).f_globals
cwd = os.getcwd()
rel_path = namespace["__file__"]
model_root_path = os.path.dirname(os.path.join(cwd, rel_path))
# check env to see if mutualized_storage had been set
mutualized_storage = os.getenv('MODEL_MUTUALIZED_STORAGE', True)
mutualized_storage_root = os.getenv('MODEL_MUTUALIZED_STORAGE_ROOT', '/tmp/gladia/models/')
if not os.path.isabs(output_path):
if mutualized_storage == True :
output_path = os.path.join(mutualized_storage_root, rel_path, output_path)
else:
output_path = os.path.join(model_root_path, output_path)
ic("Downloading model", url, output_path)
url_domain = urlparse(url).netloc
if url_domain == "huggingface.co" or url_domain == "www.huggingface.co":
# check if directory exists if not clone it
# else pull
os.environ["GIT_LFS_SKIP_SMUDGE"] = "1"
if not os.path.isdir(Path(output_path)):
ic("Cloning HuggingFace Model", url)
Repo.clone_from(url, output_path)
os.system(f"cd {output_path} && git lfs pull")
else:
if reset:
ic("Pulling HuggingFace Model", url)
repo = Repo(output_path)
repo.git.reset("--hard", "origin/main")
os.system(f"cd {output_path} && git lfs pull")
else:
ic("Downloading", url)
download_file(url, output_path)
if is_archive(output_path) and uncompress_after_download:
ic("Uncompressing", output_path)
uncompress(output_path)
return output_path
def download_models(model_list: dict) -> dict:
"""model_list should be [(url, output_path, uncompression_mode)]"""
# manage relative imports
namespace = sys._getframe(1).f_globals
cwd = os.getcwd()
rel_path = namespace["__file__"]
rel_path = rel_path.lstrip('./')
if ".py" in rel_path:
rel_path = os.path.dirname(rel_path)
# used in case of relative path
model_root_path = os.path.dirname(os.path.join(cwd, rel_path))
ic("Downloading multiple models")
threads = []
output = dict()
# check env to see if mutualized_storage had been set
mutualized_storage = os.getenv('MODEL_MUTUALIZED_STORAGE', True)
mutualized_storage_root = os.getenv('MODEL_MUTUALIZED_STORAGE_ROOT', '/tmp/gladia/models/')
for key, model in model_list.items():
if not os.path.isabs(model["output_path"]):
if mutualized_storage:
model["output_path"] = os.path.join(mutualized_storage_root, rel_path, model["output_path"])
else:
model["output_path"] = os.path.join(model_root_path, model["output_path"])
t = threading.Thread(
target=download_model,
args=(
model["url"],
model["output_path"],
),
)
output[key] = model
threads.append(t)
t.start()
return output
|
[] |
[] |
[
"MODEL_MUTUALIZED_STORAGE",
"MODEL_MUTUALIZED_STORAGE_ROOT",
"GIT_LFS_SKIP_SMUDGE"
] |
[]
|
["MODEL_MUTUALIZED_STORAGE", "MODEL_MUTUALIZED_STORAGE_ROOT", "GIT_LFS_SKIP_SMUDGE"]
|
python
| 3 | 0 | |
tests/unit/gapic/compute_v1/test_firewalls.py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.firewalls import FirewallsClient
from google.cloud.compute_v1.services.firewalls import pagers
from google.cloud.compute_v1.services.firewalls import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert FirewallsClient._get_default_mtls_endpoint(None) is None
assert FirewallsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
FirewallsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
FirewallsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
FirewallsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert FirewallsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [FirewallsClient,])
def test_firewalls_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name", [(transports.FirewallsRestTransport, "rest"),]
)
def test_firewalls_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [FirewallsClient,])
def test_firewalls_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
def test_firewalls_client_get_transport_class():
transport = FirewallsClient.get_transport_class()
available_transports = [
transports.FirewallsRestTransport,
]
assert transport in available_transports
transport = FirewallsClient.get_transport_class("rest")
assert transport == transports.FirewallsRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(FirewallsClient, transports.FirewallsRestTransport, "rest"),],
)
@mock.patch.object(
FirewallsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FirewallsClient)
)
def test_firewalls_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(FirewallsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(FirewallsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(FirewallsClient, transports.FirewallsRestTransport, "rest", "true"),
(FirewallsClient, transports.FirewallsRestTransport, "rest", "false"),
],
)
@mock.patch.object(
FirewallsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FirewallsClient)
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_firewalls_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(FirewallsClient, transports.FirewallsRestTransport, "rest"),],
)
def test_firewalls_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(FirewallsClient, transports.FirewallsRestTransport, "rest"),],
)
def test_firewalls_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_delete_rest(
transport: str = "rest", request_type=compute.DeleteFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_rest_bad_request(
transport: str = "rest", request_type=compute.DeleteFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete(request)
def test_delete_rest_from_dict():
test_delete_rest(request_type=dict)
def test_delete_rest_flattened(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "firewall": "sample2"}
# get truthy value for each flattened field
mock_args = dict(project="project_value", firewall="firewall_value",)
mock_args.update(sample_request)
client.delete(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/firewalls/{firewall}"
% client.transport._host,
args[1],
)
def test_delete_rest_flattened_error(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete(
compute.DeleteFirewallRequest(),
project="project_value",
firewall="firewall_value",
)
def test_get_rest(transport: str = "rest", request_type=compute.GetFirewallRequest):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Firewall(
creation_timestamp="creation_timestamp_value",
description="description_value",
destination_ranges=["destination_ranges_value"],
direction=compute.Firewall.Direction.EGRESS,
disabled=True,
id=205,
kind="kind_value",
name="name_value",
network="network_value",
priority=898,
self_link="self_link_value",
source_ranges=["source_ranges_value"],
source_service_accounts=["source_service_accounts_value"],
source_tags=["source_tags_value"],
target_service_accounts=["target_service_accounts_value"],
target_tags=["target_tags_value"],
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Firewall.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Firewall)
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.destination_ranges == ["destination_ranges_value"]
assert response.direction == compute.Firewall.Direction.EGRESS
assert response.disabled is True
assert response.id == 205
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.network == "network_value"
assert response.priority == 898
assert response.self_link == "self_link_value"
assert response.source_ranges == ["source_ranges_value"]
assert response.source_service_accounts == ["source_service_accounts_value"]
assert response.source_tags == ["source_tags_value"]
assert response.target_service_accounts == ["target_service_accounts_value"]
assert response.target_tags == ["target_tags_value"]
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_from_dict():
test_get_rest(request_type=dict)
def test_get_rest_flattened(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Firewall()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Firewall.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "firewall": "sample2"}
# get truthy value for each flattened field
mock_args = dict(project="project_value", firewall="firewall_value",)
mock_args.update(sample_request)
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/firewalls/{firewall}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetFirewallRequest(),
project="project_value",
firewall="firewall_value",
)
def test_insert_rest(
transport: str = "rest", request_type=compute.InsertFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["firewall_resource"] = compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_insert_rest_bad_request(
transport: str = "rest", request_type=compute.InsertFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["firewall_resource"] = compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.insert(request)
def test_insert_rest_from_dict():
test_insert_rest(request_type=dict)
def test_insert_rest_flattened(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
),
)
mock_args.update(sample_request)
client.insert(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/firewalls"
% client.transport._host,
args[1],
)
def test_insert_rest_flattened_error(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert(
compute.InsertFirewallRequest(),
project="project_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
),
)
def test_list_rest(transport: str = "rest", request_type=compute.ListFirewallsRequest):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.FirewallList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.FirewallList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListFirewallsRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_from_dict():
test_list_rest(request_type=dict)
def test_list_rest_flattened(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.FirewallList()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.FirewallList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(project="project_value",)
mock_args.update(sample_request)
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/firewalls"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListFirewallsRequest(), project="project_value",
)
def test_list_rest_pager():
client = FirewallsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.FirewallList(
items=[compute.Firewall(), compute.Firewall(), compute.Firewall(),],
next_page_token="abc",
),
compute.FirewallList(items=[], next_page_token="def",),
compute.FirewallList(items=[compute.Firewall(),], next_page_token="ghi",),
compute.FirewallList(items=[compute.Firewall(), compute.Firewall(),],),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.FirewallList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.Firewall) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_patch_rest(transport: str = "rest", request_type=compute.PatchFirewallRequest):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request_init["firewall_resource"] = compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.patch(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_patch_rest_bad_request(
transport: str = "rest", request_type=compute.PatchFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request_init["firewall_resource"] = compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.patch(request)
def test_patch_rest_from_dict():
test_patch_rest(request_type=dict)
def test_patch_rest_flattened(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "firewall": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
firewall="firewall_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
),
)
mock_args.update(sample_request)
client.patch(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/firewalls/{firewall}"
% client.transport._host,
args[1],
)
def test_patch_rest_flattened_error(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.patch(
compute.PatchFirewallRequest(),
project="project_value",
firewall="firewall_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
),
)
def test_update_rest(
transport: str = "rest", request_type=compute.UpdateFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request_init["firewall_resource"] = compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.update(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_update_rest_bad_request(
transport: str = "rest", request_type=compute.UpdateFirewallRequest
):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "firewall": "sample2"}
request_init["firewall_resource"] = compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
)
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.update(request)
def test_update_rest_from_dict():
test_update_rest(request_type=dict)
def test_update_rest_flattened(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "firewall": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
firewall="firewall_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
),
)
mock_args.update(sample_request)
client.update(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/firewalls/{firewall}"
% client.transport._host,
args[1],
)
def test_update_rest_flattened_error(transport: str = "rest"):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update(
compute.UpdateFirewallRequest(),
project="project_value",
firewall="firewall_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(I_p_protocol="I_p_protocol_value")]
),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.FirewallsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.FirewallsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FirewallsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.FirewallsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FirewallsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.FirewallsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = FirewallsClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize("transport_class", [transports.FirewallsRestTransport,])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_firewalls_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.FirewallsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_firewalls_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.firewalls.transports.FirewallsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.FirewallsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"delete",
"get",
"insert",
"list",
"patch",
"update",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_firewalls_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.firewalls.transports.FirewallsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FirewallsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_firewalls_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.firewalls.transports.FirewallsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FirewallsTransport()
adc.assert_called_once()
def test_firewalls_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FirewallsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_firewalls_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.FirewallsRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
def test_firewalls_host_no_port():
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
)
assert client.transport._host == "compute.googleapis.com:443"
def test_firewalls_host_with_port():
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
)
assert client.transport._host == "compute.googleapis.com:8000"
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = FirewallsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = FirewallsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = FirewallsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = FirewallsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = FirewallsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = FirewallsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = FirewallsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = FirewallsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = FirewallsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = FirewallsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.FirewallsTransport, "_prep_wrapped_messages"
) as prep:
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.FirewallsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = FirewallsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = FirewallsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
python/ray/tests/test_runtime_env_complicated.py
|
import os
from ray.workers.setup_runtime_env import inject_dependencies
import pytest
import sys
import unittest
import yaml
import time
import subprocess
from unittest import mock
import ray
from ray._private.utils import get_conda_env_dir, get_conda_bin_executable
from ray._private.runtime_env import RuntimeEnvDict
from ray.job_config import JobConfig
from ray.test_utils import (run_string_as_driver,
run_string_as_driver_nonblocking)
@pytest.fixture(scope="session")
def conda_envs():
"""Creates two copies of current conda env with different tf versions."""
ray.init()
conda_path = get_conda_bin_executable("conda")
init_cmd = (f". {os.path.dirname(conda_path)}"
f"/../etc/profile.d/conda.sh")
subprocess.run([f"{init_cmd} && conda activate"], shell=True)
current_conda_env = os.environ.get("CONDA_DEFAULT_ENV")
assert current_conda_env is not None
def delete_env(env_name):
subprocess.run(["conda", "remove", "--name", env_name, "--all", "-y"])
# Cloning the env twice may take minutes, so parallelize with Ray.
@ray.remote
def create_tf_env(tf_version: str):
env_name = f"tf-{tf_version}"
delete_env(env_name)
subprocess.run([
"conda", "create", "-n", env_name, "--clone", current_conda_env,
"-y"
])
commands = [
init_cmd, f"conda activate {env_name}",
f"python -m pip install tensorflow=={tf_version}",
"conda deactivate"
]
command_separator = " && "
command_str = command_separator.join(commands)
subprocess.run([command_str], shell=True)
tf_versions = ["2.2.0", "2.3.0"]
ray.get([create_tf_env.remote(version) for version in tf_versions])
ray.shutdown()
yield
ray.init()
for tf_version in tf_versions:
delete_env(env_name=f"tf-{tf_version}")
subprocess.run([f"{init_cmd} && conda deactivate"], shell=True)
ray.shutdown()
check_remote_client_conda = """
import ray
context = ray.client("localhost:24001").env({{"conda" : "tf-{tf_version}"}}).\\
connect()
@ray.remote
def get_tf_version():
import tensorflow as tf
return tf.__version__
assert ray.get(get_tf_version.remote()) == "{tf_version}"
context.disconnect()
"""
@pytest.mark.skipif(
os.environ.get("CONDA_DEFAULT_ENV") is None,
reason="must be run from within a conda environment")
@pytest.mark.skipif(sys.platform == "win32", reason="Unsupported on Windows.")
@pytest.mark.skipif(sys.platform == "darwin", reason="Flaky on MacOS.")
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 24001 --port 0"],
indirect=True)
def test_client_tasks_and_actors_inherit_from_driver(conda_envs,
call_ray_start):
@ray.remote
def get_tf_version():
import tensorflow as tf
return tf.__version__
@ray.remote
class TfVersionActor:
def get_tf_version(self):
import tensorflow as tf
return tf.__version__
tf_versions = ["2.2.0", "2.3.0"]
for i, tf_version in enumerate(tf_versions):
runtime_env = {"conda": f"tf-{tf_version}"}
with ray.client("localhost:24001").env(runtime_env).connect():
assert ray.get(get_tf_version.remote()) == tf_version
actor_handle = TfVersionActor.remote()
assert ray.get(actor_handle.get_tf_version.remote()) == tf_version
# Ensure that we can have a second client connect using the other
# conda environment.
other_tf_version = tf_versions[(i + 1) % 2]
run_string_as_driver(
check_remote_client_conda.format(tf_version=other_tf_version))
@pytest.mark.skipif(
os.environ.get("CONDA_DEFAULT_ENV") is None,
reason="must be run from within a conda environment")
@pytest.mark.skipif(sys.platform == "darwin", reason="Flaky on MacOS.")
@pytest.mark.skipif(sys.platform == "win32", reason="Unsupported on Windows.")
def test_task_conda_env(conda_envs, shutdown_only):
import tensorflow as tf
ray.init()
@ray.remote
def get_tf_version():
return tf.__version__
tf_versions = ["2.2.0", "2.3.0"]
for tf_version in tf_versions:
runtime_env = {"conda": f"tf-{tf_version}"}
task = get_tf_version.options(runtime_env=runtime_env)
assert ray.get(task.remote()) == tf_version
@pytest.mark.skipif(
os.environ.get("CONDA_DEFAULT_ENV") is None,
reason="must be run from within a conda environment")
@pytest.mark.skipif(sys.platform == "darwin", reason="Flaky on MacOS.")
@pytest.mark.skipif(sys.platform == "win32", reason="Unsupported on Windows.")
def test_actor_conda_env(conda_envs, shutdown_only):
import tensorflow as tf
ray.init()
@ray.remote
class TfVersionActor:
def get_tf_version(self):
return tf.__version__
tf_versions = ["2.2.0", "2.3.0"]
for tf_version in tf_versions:
runtime_env = {"conda": f"tf-{tf_version}"}
actor = TfVersionActor.options(runtime_env=runtime_env).remote()
assert ray.get(actor.get_tf_version.remote()) == tf_version
@pytest.mark.skipif(
os.environ.get("CONDA_DEFAULT_ENV") is None,
reason="must be run from within a conda environment")
@pytest.mark.skipif(sys.platform == "darwin", reason="Flaky on MacOS.")
@pytest.mark.skipif(sys.platform == "win32", reason="Unsupported on Windows.")
def test_inheritance_conda_env(conda_envs, shutdown_only):
import tensorflow as tf
ray.init()
@ray.remote
def get_tf_version():
return tf.__version__
@ray.remote
def wrapped_tf_version():
return ray.get(get_tf_version.remote())
@ray.remote
class TfVersionActor:
def get_tf_version(self):
return ray.get(wrapped_tf_version.remote())
tf_versions = ["2.2.0", "2.3.0"]
for tf_version in tf_versions:
runtime_env = {"conda": f"tf-{tf_version}"}
task = wrapped_tf_version.options(runtime_env=runtime_env)
assert ray.get(task.remote()) == tf_version
actor = TfVersionActor.options(runtime_env=runtime_env).remote()
assert ray.get(actor.get_tf_version.remote()) == tf_version
@pytest.mark.skipif(
os.environ.get("CONDA_DEFAULT_ENV") is None,
reason="must be run from within a conda environment")
@pytest.mark.skipif(sys.platform == "darwin", reason="Flaky on MacOS.")
@pytest.mark.skipif(sys.platform == "win32", reason="Unsupported on Windows.")
def test_job_config_conda_env(conda_envs, shutdown_only):
import tensorflow as tf
tf_version = "2.2.0"
@ray.remote
def get_conda_env():
return tf.__version__
for tf_version in ["2.2.0", "2.3.0"]:
runtime_env = {"conda": f"tf-{tf_version}"}
ray.init(job_config=JobConfig(runtime_env=runtime_env))
assert ray.get(get_conda_env.remote()) == tf_version
ray.shutdown()
def test_get_conda_env_dir(tmp_path):
from pathlib import Path
"""
Typical output of `conda env list`, for context:
base /Users/scaly/anaconda3
my_env_1 /Users/scaly/anaconda3/envs/my_env_1
For this test, `tmp_path` is a stand-in for `Users/scaly/anaconda3`.
"""
# Simulate starting in an env named tf1.
d = tmp_path / "envs" / "tf1"
Path.mkdir(d, parents=True)
with mock.patch.dict(os.environ, {
"CONDA_PREFIX": str(d),
"CONDA_DEFAULT_ENV": "tf1"
}):
with pytest.raises(ValueError):
# Env tf2 should not exist.
env_dir = get_conda_env_dir("tf2")
tf2_dir = tmp_path / "envs" / "tf2"
Path.mkdir(tf2_dir, parents=True)
env_dir = get_conda_env_dir("tf2")
assert (env_dir == str(tmp_path / "envs" / "tf2"))
# Simulate starting in (base) conda env.
with mock.patch.dict(os.environ, {
"CONDA_PREFIX": str(tmp_path),
"CONDA_DEFAULT_ENV": "base"
}):
with pytest.raises(ValueError):
# Env tf3 should not exist.
env_dir = get_conda_env_dir("tf3")
# Env tf2 still should exist.
env_dir = get_conda_env_dir("tf2")
assert (env_dir == str(tmp_path / "envs" / "tf2"))
"""
Note(architkulkarni):
These tests only run on Buildkite in a special job that runs
after the wheel is built, because the tests pass in the wheel as a dependency
in the runtime env. Buildkite only supports Linux for now.
"""
@pytest.mark.skipif(
os.environ.get("CI") is None,
reason="This test is only run on CI because it uses the built Ray wheel.")
@pytest.mark.skipif(
sys.platform != "linux", reason="This test is only run on Buildkite.")
def test_conda_create_task(shutdown_only):
"""Tests dynamic creation of a conda env in a task's runtime env."""
ray.init()
runtime_env = {
"conda": {
"dependencies": ["pip", {
"pip": ["pip-install-test==0.5"]
}]
}
}
@ray.remote
def f():
import pip_install_test # noqa
return True
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
with pytest.raises(ray.exceptions.RayTaskError) as excinfo:
ray.get(f.remote())
assert "ModuleNotFoundError" in str(excinfo.value)
assert ray.get(f.options(runtime_env=runtime_env).remote())
@pytest.mark.skipif(
os.environ.get("CI") is None,
reason="This test is only run on CI because it uses the built Ray wheel.")
@pytest.mark.skipif(
sys.platform != "linux", reason="This test is only run on Buildkite.")
def test_conda_create_job_config(shutdown_only):
"""Tests dynamic conda env creation in a runtime env in the JobConfig."""
runtime_env = {
"conda": {
"dependencies": ["pip", {
"pip": ["pip-install-test==0.5"]
}]
}
}
ray.init(job_config=JobConfig(runtime_env=runtime_env))
@ray.remote
def f():
import pip_install_test # noqa
return True
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
assert ray.get(f.remote())
def test_inject_dependencies():
num_tests = 4
conda_dicts = [None] * num_tests
outputs = [None] * num_tests
conda_dicts[0] = {}
outputs[0] = {
"dependencies": ["python=7.8", "pip", {
"pip": ["ray==1.2.3"]
}]
}
conda_dicts[1] = {"dependencies": ["blah"]}
outputs[1] = {
"dependencies": ["blah", "python=7.8", "pip", {
"pip": ["ray==1.2.3"]
}]
}
conda_dicts[2] = {"dependencies": ["blah", "pip"]}
outputs[2] = {
"dependencies": ["blah", "pip", "python=7.8", {
"pip": ["ray==1.2.3"]
}]
}
conda_dicts[3] = {"dependencies": ["blah", "pip", {"pip": ["some_pkg"]}]}
outputs[3] = {
"dependencies": [
"blah", "pip", {
"pip": ["ray==1.2.3", "some_pkg"]
}, "python=7.8"
]
}
for i in range(num_tests):
output = inject_dependencies(conda_dicts[i], "7.8", ["ray==1.2.3"])
error_msg = (f"failed on input {i}."
f"Output: {output} \n"
f"Expected output: {outputs[i]}")
assert (output == outputs[i]), error_msg
@pytest.mark.skipif(
os.environ.get("CI") is None, reason="This test is only run on CI.")
@pytest.mark.skipif(
sys.platform != "linux", reason="This test is only run for Linux.")
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 24001 --port 0"],
indirect=True)
def test_conda_create_ray_client(call_ray_start):
"""Tests dynamic conda env creation in RayClient."""
runtime_env = {
"conda": {
"dependencies": ["pip", {
"pip": ["pip-install-test==0.5"]
}]
}
}
@ray.remote
def f():
import pip_install_test # noqa
return True
with ray.client("localhost:24001").env(runtime_env).connect():
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
assert ray.get(f.remote())
with ray.client("localhost:24001").connect():
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed in a client that doesn't
# use the runtime_env
ray.get(f.remote())
@pytest.mark.skipif(
os.environ.get("CI") is None,
reason="This test is only run on CI because it uses the built Ray wheel.")
@pytest.mark.skipif(
sys.platform != "linux", reason="This test is only run on Buildkite.")
@pytest.mark.parametrize("pip_as_str", [True, False])
def test_pip_task(shutdown_only, pip_as_str, tmp_path):
"""Tests pip installs in the runtime env specified in f.options()."""
ray.init()
if pip_as_str:
d = tmp_path / "pip_requirements"
d.mkdir()
p = d / "requirements.txt"
requirements_txt = """
pip-install-test==0.5
"""
p.write_text(requirements_txt)
runtime_env = {"pip": str(p)}
else:
runtime_env = {"pip": ["pip-install-test==0.5"]}
@ray.remote
def f():
import pip_install_test # noqa
return True
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
with pytest.raises(ray.exceptions.RayTaskError) as excinfo:
ray.get(f.remote())
assert "ModuleNotFoundError" in str(excinfo.value)
assert ray.get(f.options(runtime_env=runtime_env).remote())
@pytest.mark.skipif(
os.environ.get("CI") is None,
reason="This test is only run on CI because it uses the built Ray wheel.")
@pytest.mark.skipif(
sys.platform != "linux", reason="This test is only run on Buildkite.")
def test_pip_ray_serve(shutdown_only):
"""Tests that ray[serve] can be included as a pip dependency."""
ray.init()
runtime_env = {"pip": ["pip-install-test==0.5", "ray[serve]"]}
@ray.remote
def f():
import pip_install_test # noqa
return True
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
with pytest.raises(ray.exceptions.RayTaskError) as excinfo:
ray.get(f.remote())
assert "ModuleNotFoundError" in str(excinfo.value)
assert ray.get(f.options(runtime_env=runtime_env).remote())
@pytest.mark.skipif(
os.environ.get("CI") is None,
reason="This test is only run on CI because it uses the built Ray wheel.")
@pytest.mark.skipif(
sys.platform != "linux", reason="This test is only run on Buildkite.")
@pytest.mark.parametrize("pip_as_str", [True, False])
def test_pip_job_config(shutdown_only, pip_as_str, tmp_path):
"""Tests dynamic installation of pip packages in a task's runtime env."""
if pip_as_str:
d = tmp_path / "pip_requirements"
d.mkdir()
p = d / "requirements.txt"
requirements_txt = """
pip-install-test==0.5
"""
p.write_text(requirements_txt)
runtime_env = {"pip": str(p)}
else:
runtime_env = {"pip": ["pip-install-test==0.5"]}
ray.init(job_config=JobConfig(runtime_env=runtime_env))
@ray.remote
def f():
import pip_install_test # noqa
return True
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
assert ray.get(f.remote())
@pytest.mark.skipif(sys.platform == "win32", reason="Unsupported on Windows.")
@pytest.mark.parametrize("use_working_dir", [True, False])
def test_conda_input_filepath(use_working_dir, tmp_path):
conda_dict = {"dependencies": ["pip", {"pip": ["pip-install-test==0.5"]}]}
d = tmp_path / "pip_requirements"
d.mkdir()
p = d / "environment.yml"
p.write_text(yaml.dump(conda_dict))
if use_working_dir:
runtime_env_dict = RuntimeEnvDict({
"working_dir": str(d),
"conda": "environment.yml"
})
else:
runtime_env_dict = RuntimeEnvDict({"conda": str(p)})
output_conda_dict = runtime_env_dict.get_parsed_dict().get("conda")
assert output_conda_dict == conda_dict
@unittest.skipIf(sys.platform == "win32", "Fail to create temp dir.")
def test_experimental_package(shutdown_only):
ray.init(num_cpus=2)
pkg = ray.experimental.load_package(
os.path.join(
os.path.dirname(__file__),
"../experimental/packaging/example_pkg/ray_pkg.yaml"))
a = pkg.MyActor.remote()
assert ray.get(a.f.remote()) == "hello world"
assert ray.get(pkg.my_func.remote()) == "hello world"
@unittest.skipIf(sys.platform == "win32", "Fail to create temp dir.")
def test_experimental_package_lazy(shutdown_only):
pkg = ray.experimental.load_package(
os.path.join(
os.path.dirname(__file__),
"../experimental/packaging/example_pkg/ray_pkg.yaml"))
ray.init(num_cpus=2)
a = pkg.MyActor.remote()
assert ray.get(a.f.remote()) == "hello world"
assert ray.get(pkg.my_func.remote()) == "hello world"
@unittest.skipIf(sys.platform == "win32", "Fail to create temp dir.")
def test_experimental_package_github(shutdown_only):
ray.init(num_cpus=2)
pkg = ray.experimental.load_package(
"http://raw.githubusercontent.com/ray-project/ray/master/"
"python/ray/experimental/packaging/example_pkg/ray_pkg.yaml")
a = pkg.MyActor.remote()
assert ray.get(a.f.remote()) == "hello world"
assert ray.get(pkg.my_func.remote()) == "hello world"
@pytest.mark.skipif(
os.environ.get("CI") is None,
reason="This test is only run on CI because it uses the built Ray wheel.")
@pytest.mark.skipif(
sys.platform != "linux", reason="This test is only run on Buildkite.")
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 24001 --port 0"],
indirect=True)
def test_client_working_dir_filepath(call_ray_start, tmp_path):
"""Test that pip and conda relative filepaths work with working_dir."""
working_dir = tmp_path / "requirements"
working_dir.mkdir()
pip_file = working_dir / "requirements.txt"
requirements_txt = """
pip-install-test==0.5
"""
pip_file.write_text(requirements_txt)
runtime_env_pip = {
"working_dir": str(working_dir),
"pip": "requirements.txt"
}
conda_file = working_dir / "environment.yml"
conda_dict = {"dependencies": ["pip", {"pip": ["pip-install-test==0.5"]}]}
conda_str = yaml.dump(conda_dict)
conda_file.write_text(conda_str)
runtime_env_conda = {
"working_dir": str(working_dir),
"conda": "environment.yml"
}
@ray.remote
def f():
import pip_install_test # noqa
return True
with ray.client("localhost:24001").connect():
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed in a client that doesn't
# use the runtime_env
ray.get(f.remote())
for runtime_env in [runtime_env_pip, runtime_env_conda]:
with ray.client("localhost:24001").env(runtime_env).connect():
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
assert ray.get(f.remote())
install_env_script = """
import ray
import time
job_config = ray.job_config.JobConfig(runtime_env={env})
ray.init(address="auto", job_config=job_config)
@ray.remote
def f():
return "hello"
f.remote()
# Give the env 5 seconds to begin installing in a new worker.
time.sleep(5)
"""
@pytest.mark.skipif(
os.environ.get("CI") is None,
reason="This test is only run on CI because it uses the built Ray wheel.")
@pytest.mark.skipif(
sys.platform != "linux", reason="This test is only run on Buildkite.")
def test_env_installation_nonblocking(shutdown_only):
"""Test fix for https://github.com/ray-project/ray/issues/16226."""
env1 = {"pip": ["pip-install-test==0.5"]}
job_config = ray.job_config.JobConfig(runtime_env=env1)
ray.init(job_config=job_config)
@ray.remote
def f():
return "hello"
# Warm up a worker because it takes time to start.
ray.get(f.remote())
def assert_tasks_finish_quickly(total_sleep_s=0.1):
"""Call f every 0.01 seconds for total time total_sleep_s."""
gap_s = 0.01
for i in range(int(total_sleep_s / gap_s)):
start = time.time()
ray.get(f.remote())
# Env installation takes around 10 to 60 seconds. If we fail the
# below assert, we can be pretty sure an env installation blocked
# the task.
assert time.time() - start < 0.1
time.sleep(gap_s)
assert_tasks_finish_quickly()
env2 = {"pip": ["pip-install-test==0.5", "requests"]}
f.options(runtime_env=env2).remote()
# Check that installing env2 above does not block tasks using env1.
assert_tasks_finish_quickly()
proc = run_string_as_driver_nonblocking(
install_env_script.format(env=env1))
# Check that installing env1 in a new worker in the script above does not
# block other tasks that use env1.
assert_tasks_finish_quickly(total_sleep_s=5)
proc.kill()
proc.wait()
@pytest.mark.skipif(
os.environ.get("CI") is None,
reason="This test is only run on CI because it uses the built Ray wheel.")
@pytest.mark.skipif(
sys.platform != "linux", reason="This test is only run on Buildkite.")
def test_simultaneous_install(shutdown_only):
"""Test that two envs can be installed without affecting each other."""
ray.init()
@ray.remote
class TensorflowWorker:
def __init__(self):
import tensorflow as tf
self.version = tf.__version__
def get_version(self):
return self.version
# Before we used a global lock on conda installs, these two envs would be
# installed concurrently, leading to errors:
# https://github.com/ray-project/ray/issues/17086
# Now we use a global lock, so the envs are installed sequentially.
tf1 = TensorflowWorker.options(runtime_env={
"pip": ["tensorflow==2.4.2"]
}).remote()
tf2 = TensorflowWorker.options(runtime_env={
"pip": ["tensorflow==2.5.0"]
}).remote()
assert ray.get(tf1.get_version.remote()) == "2.4.2"
assert ray.get(tf2.get_version.remote()) == "2.5.0"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
|
[] |
[] |
[
"CONDA_DEFAULT_ENV",
"CI"
] |
[]
|
["CONDA_DEFAULT_ENV", "CI"]
|
python
| 2 | 0 | |
osa/tests/test_jobs.py
|
import os
from pathlib import Path
from textwrap import dedent
import pytest
from osa.configs import options
from osa.configs.config import cfg
extra_files = Path(os.getenv("OSA_TEST_DATA", "extra"))
datasequence_history_file = extra_files / "history_files/sequence_LST1_04185.0010.history"
calibration_history_file = extra_files / "history_files/sequence_LST1_04183.history"
options.date = "2020-01-17"
options.tel_id = "LST1"
options.prod_id = "v0.1.0"
def test_historylevel():
from osa.job import historylevel
options.dl1_prod_id = "tailcut84"
options.dl2_prod_id = "model1"
level, rc = historylevel(datasequence_history_file, "DATA")
assert level == 0
assert rc == 0
level, rc = historylevel(calibration_history_file, "PEDCALIB")
assert level == 0
assert rc == 0
options.dl1_prod_id = "tailcut84"
options.dl2_prod_id = "model2"
level, rc = historylevel(datasequence_history_file, "DATA")
assert level == 1
assert rc == 0
def test_preparejobs(running_analysis_dir, sequence_list):
from osa.job import prepare_jobs
options.simulate = False
options.directory = running_analysis_dir
prepare_jobs(sequence_list)
expected_calib_script = os.path.join(running_analysis_dir, "sequence_LST1_01809.py")
expected_data_script = os.path.join(running_analysis_dir, "sequence_LST1_01807.py")
assert os.path.isfile(os.path.abspath(expected_calib_script))
assert os.path.isfile(os.path.abspath(expected_data_script))
def test_sequence_filenames(running_analysis_dir, sequence_list):
from osa.job import sequence_filenames
for sequence in sequence_list:
sequence_filenames(sequence)
assert sequence.script == running_analysis_dir / f"sequence_LST1_{sequence.run:05d}.py"
def test_scheduler_env_variables(sequence_list, running_analysis_dir):
from osa.job import scheduler_env_variables
# Extract the first sequence
first_sequence = sequence_list[0]
env_variables = scheduler_env_variables(first_sequence)
assert env_variables == [
'#SBATCH --job-name=LST1_01809',
'#SBATCH --time=1:15:00',
f'#SBATCH --chdir={running_analysis_dir}',
'#SBATCH --output=log/Run01809.%4a_jobid_%A.out',
'#SBATCH --error=log/Run01809.%4a_jobid_%A.err',
f'#SBATCH --partition={cfg.get("SLURM", "PARTITION_PEDCALIB")}',
'#SBATCH --mem-per-cpu=3GB',
]
# Extract the second sequence
second_sequence = sequence_list[1]
env_variables = scheduler_env_variables(second_sequence)
assert env_variables == [
'#SBATCH --job-name=LST1_01807',
'#SBATCH --time=1:15:00',
f'#SBATCH --chdir={running_analysis_dir}',
'#SBATCH --output=log/Run01807.%4a_jobid_%A.out',
'#SBATCH --error=log/Run01807.%4a_jobid_%A.err',
'#SBATCH --array=0-10',
f'#SBATCH --partition={cfg.get("SLURM", "PARTITION_DATA")}',
'#SBATCH --mem-per-cpu=16GB',
]
def test_job_header_template(sequence_list, running_analysis_dir):
"""Extract and check the header for the first two sequences."""
from osa.job import job_header_template
# Extract the first sequence
first_sequence = sequence_list[0]
header = job_header_template(first_sequence)
output_string1 = dedent(
f"""\
#!/bin/env python
#SBATCH --job-name=LST1_01809
#SBATCH --time=1:15:00
#SBATCH --chdir={running_analysis_dir}
#SBATCH --output=log/Run01809.%4a_jobid_%A.out
#SBATCH --error=log/Run01809.%4a_jobid_%A.err
#SBATCH --partition={cfg.get('SLURM', 'PARTITION_PEDCALIB')}
#SBATCH --mem-per-cpu=3GB"""
)
assert header == output_string1
# Extract the second sequence
second_sequence = sequence_list[1]
header = job_header_template(second_sequence)
output_string2 = dedent(
f"""\
#!/bin/env python
#SBATCH --job-name=LST1_01807
#SBATCH --time=1:15:00
#SBATCH --chdir={running_analysis_dir}
#SBATCH --output=log/Run01807.%4a_jobid_%A.out
#SBATCH --error=log/Run01807.%4a_jobid_%A.err
#SBATCH --array=0-10
#SBATCH --partition={cfg.get('SLURM', 'PARTITION_DATA')}
#SBATCH --mem-per-cpu=16GB"""
)
assert header == output_string2
def test_create_job_template_scheduler(
sequence_list,
drs4_time_calibration_files,
drs4_baseline_file,
calibration_file,
run_summary_file,
pedestal_ids_file,
):
from osa.job import data_sequence_job_template
assert pedestal_ids_file.exists()
options.test = False
options.simulate = False
content1 = data_sequence_job_template(sequence_list[1])
expected_content1 = dedent(
f"""\
#!/bin/env python
#SBATCH --job-name=LST1_01807
#SBATCH --time=1:15:00
#SBATCH --chdir={Path.cwd()}/test_osa/test_files0/running_analysis/20200117/v0.1.0
#SBATCH --output=log/Run01807.%4a_jobid_%A.out
#SBATCH --error=log/Run01807.%4a_jobid_%A.err
#SBATCH --array=0-10
#SBATCH --partition={cfg.get('SLURM', 'PARTITION_DATA')}
#SBATCH --mem-per-cpu={cfg.get('SLURM', 'MEMSIZE_DATA')}
import os
import subprocess
import sys
import tempfile
os.environ['CTAPIPE_CACHE'] = '/fefs/aswg/lstanalyzer/.ctapipe/ctapipe_cache'
os.environ['CTAPIPE_SVC_PATH'] = '/fefs/aswg/lstanalyzer/.ctapipe/service'
os.environ['MPLCONFIGDIR'] = '/fefs/aswg/lstanalyzer/.cache/matplotlib'
subruns = int(os.getenv('SLURM_ARRAY_TASK_ID'))
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['NUMBA_CACHE_DIR'] = tmpdirname
proc = subprocess.run([
'datasequence',
'--config',
'{Path.cwd()}/osa/configs/sequencer.cfg',
'--date=2020-01-17',
'--prod-id=v0.1.0',
'--drs4-pedestal-file={drs4_baseline_file}',
'--time-calib-file={drs4_time_calibration_files[0]}',
'--pedcal-file={calibration_file}',
'--systematic-correction-file={Path.cwd()}/test_osa/test_files0/monitoring/PixelCalibration/Cat-A/ffactor_systematics/20200725/pro/ffactor_systematics_20200725.h5',
'--drive-file={Path.cwd()}/test_osa/test_files0/monitoring/DrivePositioning/drive_log_20_01_17.txt',
'--run-summary={run_summary_file}',
f'01807.{{subruns:04d}}',
'LST1'
])
sys.exit(proc.returncode)"""
)
content2 = data_sequence_job_template(sequence_list[2])
expected_content2 = dedent(
f"""\
#!/bin/env python
#SBATCH --job-name=LST1_01808
#SBATCH --time=1:15:00
#SBATCH --chdir={Path.cwd()}/test_osa/test_files0/running_analysis/20200117/v0.1.0
#SBATCH --output=log/Run01808.%4a_jobid_%A.out
#SBATCH --error=log/Run01808.%4a_jobid_%A.err
#SBATCH --array=0-8
#SBATCH --partition={cfg.get('SLURM', 'PARTITION_DATA')}
#SBATCH --mem-per-cpu={cfg.get('SLURM', 'MEMSIZE_DATA')}
import os
import subprocess
import sys
import tempfile
os.environ['CTAPIPE_CACHE'] = '/fefs/aswg/lstanalyzer/.ctapipe/ctapipe_cache'
os.environ['CTAPIPE_SVC_PATH'] = '/fefs/aswg/lstanalyzer/.ctapipe/service'
os.environ['MPLCONFIGDIR'] = '/fefs/aswg/lstanalyzer/.cache/matplotlib'
subruns = int(os.getenv('SLURM_ARRAY_TASK_ID'))
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['NUMBA_CACHE_DIR'] = tmpdirname
proc = subprocess.run([
'datasequence',
'--config',
'{Path.cwd()}/osa/configs/sequencer.cfg',
'--date=2020-01-17',
'--prod-id=v0.1.0',
'--drs4-pedestal-file={drs4_baseline_file}',
'--time-calib-file={drs4_time_calibration_files[0]}',
'--pedcal-file={calibration_file}',
'--systematic-correction-file={Path.cwd()}/test_osa/test_files0/monitoring/PixelCalibration/Cat-A/ffactor_systematics/20200725/pro/ffactor_systematics_20200725.h5',
'--drive-file={Path.cwd()}/test_osa/test_files0/monitoring/DrivePositioning/drive_log_20_01_17.txt',
'--run-summary={run_summary_file}',
f'--pedestal-ids-file={Path.cwd()}/test_osa/test_files0/auxiliary/PedestalFinder/20200117/pedestal_ids_Run01808.{{subruns:04d}}.h5',
f'01808.{{subruns:04d}}',
'LST1'
])
sys.exit(proc.returncode)"""
)
options.simulate = True
assert content1 == expected_content1
assert content2 == expected_content2
def test_create_job_template_local(
sequence_list,
drs4_time_calibration_files,
drs4_baseline_file,
calibration_file,
systematic_correction_files,
run_summary_file,
pedestal_ids_file,
r0_data,
):
"""Check the job file in local mode (assuming no scheduler)."""
from osa.job import data_sequence_job_template
for file in drs4_time_calibration_files:
assert file.exists()
for file in systematic_correction_files:
assert file.exists()
for file in r0_data:
assert file.exists()
assert pedestal_ids_file.exists()
options.test = True
options.simulate = False
content1 = data_sequence_job_template(sequence_list[1])
expected_content1 = dedent(
f"""\
#!/bin/env python
import os
import subprocess
import sys
import tempfile
subruns = 0
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['NUMBA_CACHE_DIR'] = tmpdirname
proc = subprocess.run([
'datasequence',
'--config',
'{Path.cwd()}/osa/configs/sequencer.cfg',
'--date=2020-01-17',
'--prod-id=v0.1.0',
'--drs4-pedestal-file={drs4_baseline_file}',
'--time-calib-file={drs4_time_calibration_files[0]}',
'--pedcal-file={calibration_file}',
'--systematic-correction-file={Path.cwd()}/test_osa/test_files0/monitoring/PixelCalibration/Cat-A/ffactor_systematics/20200725/pro/ffactor_systematics_20200725.h5',
'--drive-file={Path.cwd()}/test_osa/test_files0/monitoring/DrivePositioning/drive_log_20_01_17.txt',
'--run-summary={run_summary_file}',
f'01807.{{subruns:04d}}',
'LST1'
])
sys.exit(proc.returncode)"""
)
content2 = data_sequence_job_template(sequence_list[2])
expected_content2 = dedent(
f"""\
#!/bin/env python
import os
import subprocess
import sys
import tempfile
subruns = 0
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['NUMBA_CACHE_DIR'] = tmpdirname
proc = subprocess.run([
'datasequence',
'--config',
'{Path.cwd()}/osa/configs/sequencer.cfg',
'--date=2020-01-17',
'--prod-id=v0.1.0',
'--drs4-pedestal-file={drs4_baseline_file}',
'--time-calib-file={drs4_time_calibration_files[0]}',
'--pedcal-file={calibration_file}',
'--systematic-correction-file={Path.cwd()}/test_osa/test_files0/monitoring/PixelCalibration/Cat-A/ffactor_systematics/20200725/pro/ffactor_systematics_20200725.h5',
'--drive-file={Path.cwd()}/test_osa/test_files0/monitoring/DrivePositioning/drive_log_20_01_17.txt',
'--run-summary={run_summary_file}',
f'--pedestal-ids-file={Path.cwd()}/test_osa/test_files0/auxiliary/PedestalFinder/20200117/pedestal_ids_Run01808.{{subruns:04d}}.h5',
f'01808.{{subruns:04d}}',
'LST1'
])
sys.exit(proc.returncode)"""
)
options.simulate = True
assert content1 == expected_content1
assert content2 == expected_content2
def test_create_job_scheduler_calibration(sequence_list):
"""Check the pilot job file for the calibration pipeline."""
from osa.job import calibration_sequence_job_template
options.test = True
options.simulate = False
content = calibration_sequence_job_template(sequence_list[0])
expected_content = dedent(
f"""\
#!/bin/env python
import os
import subprocess
import sys
import tempfile
subruns = 0
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['NUMBA_CACHE_DIR'] = tmpdirname
proc = subprocess.run([
'calibration_pipeline',
'--config',
'{Path.cwd()}/osa/configs/sequencer.cfg',
'--date=2020-01-17',
'--drs4-pedestal-run=01804',
'--pedcal-run=01809',
'LST1'
])
sys.exit(proc.returncode)"""
)
options.simulate = True
assert content == expected_content
def test_set_cache_dirs():
from osa.job import set_cache_dirs
cache = set_cache_dirs()
cache_dirs = dedent(
f"""\
os.environ['CTAPIPE_CACHE'] = '{cfg.get('CACHE', 'CTAPIPE_CACHE')}'
os.environ['CTAPIPE_SVC_PATH'] = '{cfg.get('CACHE', 'CTAPIPE_SVC_PATH')}'
os.environ['MPLCONFIGDIR'] = '{cfg.get('CACHE', 'MPLCONFIGDIR')}'"""
)
assert cache_dirs == cache
def test_calibration_history_level():
from osa.job import check_history_level
levels = {"onsite_create_drs4_pedestal_file": 1, "onsite_create_calibration_file": 0}
level, exit_status = check_history_level(calibration_history_file, levels)
assert level == 0
assert exit_status == 0
@pytest.fixture
def mock_sacct_output():
"""Mock output of sacct to be able to use it in get_squeue_output function."""
return Path("./extra") / 'sacct_output.csv'
@pytest.fixture
def mock_squeue_output():
"""Mock output of squeue to be able to use it in get_squeue_output function."""
return Path("./extra") / 'squeue_output.csv'
@pytest.fixture
def sacct_output(mock_sacct_output):
from osa.job import get_sacct_output
return get_sacct_output(mock_sacct_output)
@pytest.fixture
def squeue_output(mock_squeue_output):
from osa.job import get_squeue_output
return get_squeue_output(mock_squeue_output)
def test_set_queue_values(sacct_output, squeue_output, sequence_list):
from osa.job import set_queue_values
set_queue_values(
sacct_info=sacct_output,
squeue_info=squeue_output,
sequence_list=sequence_list,
)
# Running calibration sequence
assert sequence_list[0].state == "RUNNING"
assert sequence_list[0].exit is None
assert sequence_list[0].jobid == 12951086
assert sequence_list[0].cputime == "00:36:00"
assert sequence_list[0].tries == 4
# Pending DATA sequences
assert sequence_list[1].state == "PENDING"
assert sequence_list[1].tries == 2
assert sequence_list[1].exit is None
assert sequence_list[2].state == "PENDING"
assert sequence_list[2].exit is None
assert sequence_list[2].tries == 1
def test_plot_job_statistics(sacct_output, running_analysis_dir):
from osa.job import plot_job_statistics
log_dir = running_analysis_dir / "log"
log_dir.mkdir(parents=True, exist_ok=True)
assert log_dir.exists()
plot_job_statistics(sacct_output, log_dir)
plot_file = log_dir / "job_statistics.pdf"
assert plot_file.exists()
def test_run_program_with_history_logging(running_analysis_dir):
from osa.job import run_program_with_history_logging
options.simulate = False
cmd = ["echo", "Testing"]
history_file = running_analysis_dir / "test.history"
run = "01140"
prod_id = "v0.2.0"
command = "echo"
config_file = "config_test.json"
rc = run_program_with_history_logging(
command_args=cmd,
history_file=history_file,
run=run,
prod_id=prod_id,
command=command,
config_file=config_file,
)
options.simulate = True
assert rc == 0
assert history_file.exists()
|
[] |
[] |
[
"MPLCONFIGDIR",
"SLURM_ARRAY_TASK_ID",
"CTAPIPE_SVC_PATH",
"NUMBA_CACHE_DIR",
"OSA_TEST_DATA",
"CTAPIPE_CACHE"
] |
[]
|
["MPLCONFIGDIR", "SLURM_ARRAY_TASK_ID", "CTAPIPE_SVC_PATH", "NUMBA_CACHE_DIR", "OSA_TEST_DATA", "CTAPIPE_CACHE"]
|
python
| 6 | 0 | |
pkg/redis/session_service_test.go
|
package redis
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"doublequote/pkg/mock"
"doublequote/pkg/utils"
"github.com/stretchr/testify/assert"
tmock "github.com/stretchr/testify/mock"
)
func NewTestSessionService() (*SessionService, *mock.CacheService) {
cache := mock.CacheService{}
s := NewSessionService(&cache)
return s, &cache
}
func TestNewSessionService(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
cache := mock.CacheService{}
_ = NewSessionService(&cache)
})
}
func TestSessionService_Create(t *testing.T) {
t.Run("OK", func(t *testing.T) {
svc, store := NewTestSessionService()
req, err := http.NewRequest("POST", "/", nil)
rr := httptest.NewRecorder()
store.On(
"Set",
req.Context(),
tmock.IsType(""),
1,
utils.TimeYear,
).Return(nil)
sess, err := svc.Create(rr, req, 1)
assert.Nil(t, err)
assert.Equal(t, 1, sess.UserID())
})
t.Run("StoreErr", func(t *testing.T) {
svc, cache := NewTestSessionService()
req, err := http.NewRequest("POST", "/", nil)
rr := httptest.NewRecorder()
cache.On(
"Set",
req.Context(),
tmock.IsType(""),
1,
utils.TimeYear,
).Return(fmt.Errorf("what is life? for I am just a humble cache mock"))
sess, err := svc.Create(rr, req, 1)
assert.Nil(t, sess)
assert.Equal(t, "what is life? for I am just a humble cache mock", err.Error())
cache.AssertExpectations(t)
})
}
func TestSessionService_Get(t *testing.T) {
t.Run("OK", func(t *testing.T) {
svc, cache := NewTestSessionService()
req, err := http.NewRequest("POST", "/", nil)
req.AddCookie(&http.Cookie{
Name: "session-id",
Value: "abc",
})
cache.On(
"GetInt",
req.Context(),
"abc",
).Return(1, nil)
sess, err := svc.Get(req)
assert.Nil(t, err)
assert.Equal(t, 1, sess.UserID())
cache.AssertExpectations(t)
})
t.Run("StoreErr", func(t *testing.T) {
svc, cache := NewTestSessionService()
req, err := http.NewRequest("POST", "/", nil)
req.AddCookie(&http.Cookie{
Name: "session-id",
Value: "abc",
})
cache.On(
"GetInt",
req.Context(),
"abc",
).Return(1, fmt.Errorf("error"))
sess, err := svc.Get(req)
assert.Nil(t, sess)
assert.Equal(t, "error", err.Error())
cache.AssertExpectations(t)
})
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
pkg/imgpkg/cmd/registry_flags.go
|
// Copyright 2020 VMware, Inc.
// SPDX-License-Identifier: Apache-2.0
package cmd
import (
"fmt"
"os"
"time"
"github.com/spf13/cobra"
"github.com/vmware-tanzu/carvel-imgpkg/pkg/imgpkg/registry"
)
type RegistryFlags struct {
CACertPaths []string
VerifyCerts bool
Insecure bool
Username string
Password string
Token string
Anon bool
RetryCount int
ResponseHeaderTimeout time.Duration
}
// Set Registers the flags available to the provided command
func (r *RegistryFlags) Set(cmd *cobra.Command) {
cmd.Flags().StringSliceVar(&r.CACertPaths, "registry-ca-cert-path", nil, "Add CA certificates for registry API (format: /tmp/foo) (can be specified multiple times)")
cmd.Flags().BoolVar(&r.VerifyCerts, "registry-verify-certs", true, "Set whether to verify server's certificate chain and host name")
cmd.Flags().BoolVar(&r.Insecure, "registry-insecure", false, "Allow the use of http when interacting with registries")
cmd.Flags().StringVar(&r.Username, "registry-username", "", "Set username for auth ($IMGPKG_USERNAME)")
cmd.Flags().StringVar(&r.Password, "registry-password", "", "Set password for auth ($IMGPKG_PASSWORD)")
cmd.Flags().StringVar(&r.Token, "registry-token", "", "Set token for auth ($IMGPKG_TOKEN)")
cmd.Flags().BoolVar(&r.Anon, "registry-anon", false, "Set anonymous auth ($IMGPKG_ANON)")
cmd.Flags().DurationVar(&r.ResponseHeaderTimeout, "registry-response-header-timeout", 30*time.Second, "Maximum time to allow a request to wait for a server's response headers from the registry (ms|s|m|h)")
cmd.Flags().IntVar(&r.RetryCount, "registry-retry-count", 5, "Set the number of times imgpkg retries to send requests to the registry in case of an error")
cmd.Flags().String("registry-azure-cr-config", "", "Path to the file containing Azure container registry configuration information. ($IMGPKG_REGISTRY_AZURE_CR_CONFIG)")
err := cmd.LocalFlags().MarkHidden("azure-container-registry-config")
if err != nil {
panic(fmt.Sprintf("Unable to hide flag: %s", err))
}
if cmd.PersistentPreRunE != nil {
panic("Internal inconsistency: PersistentPreRunE was already set")
}
cmd.PersistentPreRunE = func(subCmd *cobra.Command, args []string) error {
registryAzureContainerConfigFlag := subCmd.Flag("registry-azure-cr-config")
if registryAzureContainerConfigFlag == nil {
return nil
}
if registryAzureContainerConfigFlag.Value.String() != "" {
return cmd.Flags().Set("azure-container-registry-config", registryAzureContainerConfigFlag.Value.String())
}
return cmd.Flags().Set("azure-container-registry-config", os.Getenv("IMGPKG_REGISTRY_AZURE_CR_CONFIG"))
}
}
func (r *RegistryFlags) AsRegistryOpts() registry.Opts {
opts := registry.Opts{
CACertPaths: r.CACertPaths,
VerifyCerts: r.VerifyCerts,
Insecure: r.Insecure,
Username: r.Username,
Password: r.Password,
Token: r.Token,
Anon: r.Anon,
RetryCount: r.RetryCount,
ResponseHeaderTimeout: r.ResponseHeaderTimeout,
EnvironFunc: os.Environ,
}
if len(opts.Username) == 0 {
opts.Username = os.Getenv("IMGPKG_USERNAME")
}
if len(opts.Password) == 0 {
opts.Password = os.Getenv("IMGPKG_PASSWORD")
}
if len(opts.Token) == 0 {
opts.Token = os.Getenv("IMGPKG_TOKEN")
}
if os.Getenv("IMGPKG_ANON") == "true" {
opts.Anon = true
}
return opts
}
|
[
"\"IMGPKG_REGISTRY_AZURE_CR_CONFIG\"",
"\"IMGPKG_USERNAME\"",
"\"IMGPKG_PASSWORD\"",
"\"IMGPKG_TOKEN\"",
"\"IMGPKG_ANON\""
] |
[] |
[
"IMGPKG_PASSWORD",
"IMGPKG_ANON",
"IMGPKG_USERNAME",
"IMGPKG_TOKEN",
"IMGPKG_REGISTRY_AZURE_CR_CONFIG"
] |
[]
|
["IMGPKG_PASSWORD", "IMGPKG_ANON", "IMGPKG_USERNAME", "IMGPKG_TOKEN", "IMGPKG_REGISTRY_AZURE_CR_CONFIG"]
|
go
| 5 | 0 | |
test/smoke/interior/interior_multicast_test.go
|
package interior
import (
"fmt"
. "github.com/onsi/ginkgo"
"os"
)
var _ = Describe("Exchange MultiCast messages across all nodes", func() {
const (
totalSmall = 100000
totalMedium = 10000
totalLarge = 1000
)
var (
allRouterNames = TopologySmoke.AllRouterNames()
)
var testSufix string
if os.Getenv("IMAGE_QDROUTERD_INTEROP") != "" {
testSufix = " - Using Interoperability mode"
}
It(fmt.Sprintf("exchanges %d small messages with 1kb using senders and receivers across all router nodes%s", totalSmall, testSufix), func() {
runSmokeTest("multicast/smoke/interior", totalSmall, 1024, allRouterNames)
})
It(fmt.Sprintf("exchanges %d medium messages with 100kb using senders and receivers across all router nodes%s", totalMedium, testSufix), func() {
runSmokeTest("multicast/smoke/interior", totalMedium, 1024*100, allRouterNames)
})
It(fmt.Sprintf("exchanges %d large messages with 500kb using senders and receivers across all router nodes%s", totalLarge, testSufix), func() {
runSmokeTest("multicast/smoke/interior", totalLarge, 1024*500, allRouterNames)
})
})
|
[
"\"IMAGE_QDROUTERD_INTEROP\""
] |
[] |
[
"IMAGE_QDROUTERD_INTEROP"
] |
[]
|
["IMAGE_QDROUTERD_INTEROP"]
|
go
| 1 | 0 | |
python/ray/tune/trial_runner.py
|
from typing import Any, List, Mapping, Optional, Union
import click
from datetime import datetime
import json
import logging
import os
import time
import traceback
import warnings
import ray
from ray.tune.impl.out_of_band_serialize_dataset import out_of_band_serialize_dataset
from ray.util import get_node_ip_address
from ray.tune import TuneError
from ray.tune.callback import CallbackList
from ray.tune.experiment import Experiment
from ray.tune.insufficient_resources_manager import InsufficientResourcesManager
from ray.tune.ray_trial_executor import RayTrialExecutor, ExecutorEventType
from ray.tune.result import (
DEBUG_METRICS,
DEFAULT_METRIC,
DONE,
TIME_THIS_ITER_S,
RESULT_DUPLICATE,
SHOULD_CHECKPOINT,
)
from ray.tune.schedulers import FIFOScheduler, TrialScheduler
from ray.tune.stopper import NoopStopper
from ray.tune.suggest import BasicVariantGenerator, SearchAlgorithm
from ray.tune.syncer import CloudSyncer, get_cloud_syncer, SyncConfig
from ray.tune.trial import Checkpoint, Trial
from ray.tune.utils import warn_if_slow, flatten_dict
from ray.tune.utils.log import Verbosity, has_verbosity
from ray.tune.utils.placement_groups import PlacementGroupFactory
from ray.tune.utils.serialization import TuneFunctionDecoder, TuneFunctionEncoder
from ray.tune.web_server import TuneServer
from ray.util.debug import log_once
MAX_DEBUG_TRIALS = 20
logger = logging.getLogger(__name__)
def find_newest_experiment_checkpoint(ckpt_dir) -> Optional[str]:
"""Returns path to most recently modified checkpoint."""
full_paths = [
os.path.join(ckpt_dir, fname)
for fname in os.listdir(ckpt_dir)
if fname.startswith("experiment_state") and fname.endswith(".json")
]
if not full_paths:
return None
return max(full_paths)
def load_trials_from_experiment_checkpoint(
experiment_checkpoint: Mapping[str, Any], stub: bool = False
) -> List[Trial]:
"""Create trial objects from experiment checkpoint.
Given an experiment checkpoint (TrialRunner state dict), return
list of trials."""
checkpoints = [
json.loads(cp, cls=TuneFunctionDecoder) if isinstance(cp, str) else cp
for cp in experiment_checkpoint["checkpoints"]
]
trials = []
for trial_cp in checkpoints:
new_trial = Trial(trial_cp["trainable_name"], stub=stub)
new_trial.__setstate__(trial_cp)
trials.append(new_trial)
return trials
class _ExperimentCheckpointManager:
"""Helper class for managing experiment-level checkpoints.
This class implements the ``checkpoint()`` method used to checkpoint
experiment state. When called, this will serialize and write to disk
the state of the trial runner, trial executor, and search algorithm, to
a specified checkpoint file.
The checkpoint period is automatically adjusted to
``max(10, time_per_checkpoint * 19)``. This means that at most 5% of the
time (1/20) will be used for writing checkpoints, while 95% of the time
(19/20) will be used to handle the rest of the training loop.
"""
def __init__(
self,
checkpoint_dir: str,
checkpoint_period: Union[int, float, str],
start_time: float,
session_str: str,
syncer: CloudSyncer,
sync_trial_checkpoints: bool = True,
):
self._checkpoint_dir = checkpoint_dir
self._auto_checkpoint_enabled = checkpoint_period == "auto"
if self._auto_checkpoint_enabled:
self._checkpoint_period = 10.0 # Initial value
else:
self._checkpoint_period = float(checkpoint_period)
self._start_time = start_time
self._session_str = session_str
self._syncer = syncer
self._sync_trial_checkpoints = sync_trial_checkpoints
self._last_checkpoint_time = 0.0
@property
def auto_checkpoint_enabled(self):
return self._auto_checkpoint_enabled
def checkpoint(
self,
checkpoint_file: str,
trial_runner: "TrialRunner",
trial_executor: RayTrialExecutor,
search_alg: SearchAlgorithm,
force=False,
):
"""Saves execution state to `self._local_checkpoint_dir`.
Overwrites the current session checkpoint, which starts when self
is instantiated. Throttle depends on self._checkpoint_period.
Also automatically saves the search algorithm to the local
checkpoint dir.
Args:
force (bool): Forces a checkpoint despite checkpoint_period.
"""
if not self._checkpoint_dir:
return
now = time.time()
if now - self._last_checkpoint_time < self._checkpoint_period and (not force):
return
def _serialize_and_write():
runner_state = {
"checkpoints": list(trial_executor.get_checkpoints().values()),
"runner_data": trial_runner.__getstate__(),
"stats": {
"start_time": self._start_time,
"timestamp": self._last_checkpoint_time,
},
}
tmp_file_name = os.path.join(self._checkpoint_dir, ".tmp_checkpoint")
with open(tmp_file_name, "w") as f:
json.dump(runner_state, f, indent=2, cls=TuneFunctionEncoder)
os.replace(tmp_file_name, checkpoint_file)
search_alg.save_to_dir(self._checkpoint_dir, session_str=self._session_str)
checkpoint_time_start = time.monotonic()
with out_of_band_serialize_dataset():
_serialize_and_write()
if self._sync_trial_checkpoints:
exclude = None
else:
exclude = ["*/checkpoint_*"]
if force:
self._syncer.sync_up(exclude=exclude)
else:
self._syncer.sync_up_if_needed(exclude=exclude)
checkpoint_time_taken = time.monotonic() - checkpoint_time_start
if self._auto_checkpoint_enabled:
# Multiplying this time by 19 means we spend ~5% of the time
# writing global checkpoints and 95% of the time processing trials
self._checkpoint_period = max(10.0, checkpoint_time_taken * 19)
logger.debug(
f"Global experiment checkpointing took "
f"{checkpoint_time_taken:.2f} seconds. "
f"Adjusting checkpoint period to "
f"{self._checkpoint_period:.2f} seconds."
)
self._last_checkpoint_time = time.time()
return self._checkpoint_dir
class TrialRunner:
"""A TrialRunner implements the event loop for scheduling trials on Ray.
.. code-block: python
runner = TrialRunner()
runner.add_trial(Trial(...))
runner.add_trial(Trial(...))
while not runner.is_finished():
runner.step()
print(runner.debug_string())
The main job of TrialRunner is scheduling trials to efficiently use cluster
resources, without overloading the cluster.
While Ray itself provides resource management for tasks and actors, this is
not sufficient when scheduling trials that may instantiate multiple actors.
This is because if insufficient resources are available, concurrent trials
could deadlock waiting for new resources to become available. Furthermore,
oversubscribing the cluster could degrade training performance, leading to
misleading benchmark results.
Args:
search_alg (SearchAlgorithm): SearchAlgorithm for generating
Trial objects.
scheduler (TrialScheduler): Defaults to FIFOScheduler.
local_checkpoint_dir (str): Path where
global checkpoints are stored and restored from.
remote_checkpoint_dir (str): Remote path where
global checkpoints are stored and restored from. Used
if `resume` == REMOTE.
sync_config (SyncConfig): See `tune.py:run`.
stopper: Custom class for stopping whole experiments. See
``Stopper``.
resume (str|False): see `tune.py:run`.
server_port (int): Port number for launching TuneServer.
fail_fast (bool | str): Finishes as soon as a trial fails if True.
If fail_fast='raise' provided, Tune will automatically
raise the exception received by the Trainable. fail_fast='raise'
can easily leak resources and should be used with caution.
checkpoint_period (int|str): Trial runner checkpoint periodicity in
seconds. Defaults to ``"auto"``, which adjusts checkpointing
time so that at most 5% of the time is spent on writing
checkpoints.
trial_executor (TrialExecutor): Defaults to RayTrialExecutor.
callbacks (list): List of callbacks that will be called at different
times in the training loop. Must be instances of the
``ray.tune.trial_runner.Callback`` class.
metric (str): Metric used to check received results. If a result is
reported without this metric, an error will be raised. The error
can be omitted by not providing a metric or by setting the env
variable ``TUNE_DISABLE_STRICT_METRIC_CHECKING=0``
"""
CKPT_FILE_TMPL = "experiment_state-{}.json"
VALID_RESUME_TYPES = [True, "LOCAL", "REMOTE", "PROMPT", "ERRORED_ONLY", "AUTO"]
RAISE = "RAISE"
def __init__(
self,
search_alg=None,
scheduler=None,
local_checkpoint_dir=None,
remote_checkpoint_dir=None,
sync_config=None,
stopper=None,
resume=False,
server_port=None,
fail_fast=False,
checkpoint_period=None,
trial_executor=None,
callbacks=None,
metric=None,
# Deprecate on next refactor
driver_sync_trial_checkpoints=False,
):
self._search_alg = search_alg or BasicVariantGenerator()
self._scheduler_alg = scheduler or FIFOScheduler()
self.trial_executor = trial_executor or RayTrialExecutor()
self._insufficient_resources_manager = InsufficientResourcesManager()
self._pending_trial_queue_times = {}
# Set the number of maximum pending trials
max_pending_trials = os.getenv("TUNE_MAX_PENDING_TRIALS_PG", "auto")
if max_pending_trials == "auto":
# Auto detect
if isinstance(self._search_alg, BasicVariantGenerator):
# Use a minimum of 16 to trigger fast autoscaling
# Scale up to at most the number of available cluster CPUs
cluster_cpus = ray.cluster_resources().get("CPU", 1.0)
self._max_pending_trials = max(16, int(cluster_cpus * 1.1))
if self._max_pending_trials > 128:
logger.warning(
f"The maximum number of pending trials has been "
f"automatically set to the number of available "
f"cluster CPUs, which is high "
f"({self._max_pending_trials} CPUs/pending trials). "
f"If you're running an experiment with a large number "
f"of trials, this could lead to scheduling overhead. "
f"In this case, consider setting the "
f"`TUNE_MAX_PENDING_TRIALS_PG` environment variable "
f"to the desired maximum number of concurrent trials."
)
else:
self._max_pending_trials = 1
else:
# Manual override
self._max_pending_trials = int(max_pending_trials)
self.trial_executor.set_max_pending_trials(self._max_pending_trials)
self._metric = metric
if "TRIALRUNNER_WALLTIME_LIMIT" in os.environ:
raise ValueError(
"The TRIALRUNNER_WALLTIME_LIMIT environment variable is "
"deprecated. "
"Use `tune.run(time_budget_s=limit)` instead."
)
self._total_time = 0
self._iteration = 0
self._has_errored = False
self._fail_fast = fail_fast
if isinstance(self._fail_fast, str):
self._fail_fast = self._fail_fast.upper()
if self._fail_fast == TrialRunner.RAISE:
warnings.warn(
"fail_fast='raise' detected. Be careful when using this "
"mode as resources (such as Ray processes, "
"file descriptors, and temporary files) may not be "
"cleaned up properly. To use "
"a safer mode, use fail_fast=True."
)
else:
raise ValueError(
"fail_fast must be one of {bool, RAISE}. " f"Got {self._fail_fast}."
)
self._server = None
self._server_port = server_port
if server_port is not None:
self._server = TuneServer(self, self._server_port)
self._trials = []
self._live_trials = set() # Set of non-terminated trials
self._cached_trial_decisions = {}
self._queued_trial_decisions = {}
self._updated_queue = False
self._stop_queue = []
self._should_stop_experiment = False # used by TuneServer
self._local_checkpoint_dir = local_checkpoint_dir
if self._local_checkpoint_dir:
os.makedirs(self._local_checkpoint_dir, exist_ok=True)
sync_config = sync_config or SyncConfig()
self._remote_checkpoint_dir = remote_checkpoint_dir
self._syncer = get_cloud_syncer(
local_checkpoint_dir, remote_checkpoint_dir, sync_config.syncer
)
self._stopper = stopper or NoopStopper()
self._resumed = False
if self._validate_resume(
resume_type=resume,
driver_sync_trial_checkpoints=driver_sync_trial_checkpoints,
):
errored_only = False
if isinstance(resume, str):
errored_only = resume.upper() == "ERRORED_ONLY"
try:
self.resume(run_errored_only=errored_only)
self._resumed = True
except Exception as e:
if has_verbosity(Verbosity.V3_TRIAL_DETAILS):
logger.error(str(e))
logger.exception("Runner restore failed.")
if self._fail_fast:
raise
logger.info("Restarting experiment.")
else:
logger.debug("Starting a new experiment.")
self._start_time = time.time()
self._last_checkpoint_time = -float("inf")
self._session_str = datetime.fromtimestamp(self._start_time).strftime(
"%Y-%m-%d_%H-%M-%S"
)
self.checkpoint_file = None
if self._local_checkpoint_dir:
self.checkpoint_file = os.path.join(
self._local_checkpoint_dir,
TrialRunner.CKPT_FILE_TMPL.format(self._session_str),
)
self._callbacks = CallbackList(callbacks or [])
if checkpoint_period is None:
checkpoint_period = os.getenv("TUNE_GLOBAL_CHECKPOINT_S", "auto")
self._checkpoint_period = checkpoint_period
self._checkpoint_manager = self._create_checkpoint_manager(
driver_sync_trial_checkpoints
)
def setup_experiments(
self, experiments: List[Experiment], total_num_samples: int
) -> None:
"""Obtains any necessary information from experiments.
Mainly used to setup callbacks.
Args:
experiments (List[Experiment]): List of Experiments
to use.
total_num_samples (int): Total number of samples
factoring in grid search samplers.
"""
experiment = experiments[0]
spec = experiment.public_spec if experiment else {}
spec["total_num_samples"] = total_num_samples
self._callbacks.setup(**spec)
def end_experiment_callbacks(self) -> None:
"""Calls ``on_experiment_end`` method in callbacks."""
self._callbacks.on_experiment_end(trials=self._trials)
def _create_checkpoint_manager(self, sync_trial_checkpoints: bool = True):
return _ExperimentCheckpointManager(
checkpoint_dir=self._local_checkpoint_dir,
checkpoint_period=self._checkpoint_period,
start_time=self._start_time,
session_str=self._session_str,
syncer=self._syncer,
sync_trial_checkpoints=sync_trial_checkpoints,
)
@property
def resumed(self):
return self._resumed
@property
def search_alg(self):
return self._search_alg
@property
def scheduler_alg(self):
return self._scheduler_alg
def _validate_resume(self, resume_type, driver_sync_trial_checkpoints=True):
"""Checks whether to resume experiment.
Args:
resume_type: One of True, "REMOTE", "LOCAL",
"PROMPT", "ERRORED_ONLY", "AUTO".
driver_sync_trial_checkpoints: Boolean indicating if the driver
should sync trial checkpoints from the driver node to cloud.
"""
# TODO: Consider supporting ERRORED_ONLY+REMOTE?
if not resume_type:
return False
assert (
resume_type in self.VALID_RESUME_TYPES
), "resume_type {} is not one of {}".format(
resume_type, self.VALID_RESUME_TYPES
)
# Not clear if we need this assertion, since we should always have a
# local checkpoint dir.
assert self._local_checkpoint_dir or self._remote_checkpoint_dir
if resume_type == "AUTO":
if self._remote_checkpoint_dir:
logger.info(
f"Trying to find and download experiment checkpoint at "
f"{self._remote_checkpoint_dir}"
)
# Todo: This syncs the entire experiment including trial
# checkpoints. We should exclude these in the future.
try:
self._syncer.sync_down_if_needed()
self._syncer.wait()
except TuneError as e:
logger.warning(
f"Got error when trying to sync down: {e} "
f"\nPlease check this error message for potential "
f"access problems - if a directory was not found, "
f"that is expected at this stage when you're starting "
f"a new experiment."
)
logger.info(
"No remote checkpoint was found or an error occurred "
"when trying to download the experiment checkpoint. "
"Please check the previous warning message for more "
"details. "
"Ray Tune will now start a new experiment."
)
return False
if not self.checkpoint_exists(self._local_checkpoint_dir):
logger.warning(
"A remote checkpoint was fetched, but no checkpoint "
"data was found. This can happen when e.g. the cloud "
"bucket exists but does not contain any data. "
"Ray Tune will start a new, fresh run."
)
return False
logger.info(
"A remote experiment checkpoint was found and will be "
"used to restore the previous experiment state."
)
return True
elif not self.checkpoint_exists(self._local_checkpoint_dir):
logger.info(
"No local checkpoint was found. "
"Ray Tune will now start a new experiment."
)
return False
logger.info(
"A local experiment checkpoint was found and will be used "
"to restore the previous experiment state."
)
return True
if resume_type in [True, "LOCAL", "PROMPT", "ERRORED_ONLY"]:
if not self.checkpoint_exists(self._local_checkpoint_dir):
raise ValueError(
f"You called resume ({resume_type}) when no checkpoint "
f"exists in local directory "
f"({self._local_checkpoint_dir}). If you want to start "
f'a new experiment, use `resume="AUTO"` or '
f"`resume=None`. If you expected an experiment to "
f"already exist, check if you supplied the correct "
f"`local_dir` to `tune.run()`."
)
elif resume_type == "PROMPT":
if click.confirm(
f"Resume from local directory? " f"({self._local_checkpoint_dir})"
):
return True
if resume_type in ["REMOTE", "PROMPT"]:
if resume_type == "PROMPT" and not click.confirm(
f"Try downloading from remote directory? "
f"({self._remote_checkpoint_dir})"
):
return False
if not self._remote_checkpoint_dir:
raise ValueError(
"Called resume from remote without remote directory. "
"Fix this by passing a `SyncConfig` object with "
"`upload_dir` set to `tune.run(sync_config=...)`."
)
# Try syncing down the upload directory.
logger.info(
f"Downloading experiment checkpoint from "
f"{self._remote_checkpoint_dir}"
)
if driver_sync_trial_checkpoints:
exclude = None
else:
exclude = ["*/checkpoint_*"]
try:
self._syncer.sync_down_if_needed(exclude=exclude)
self._syncer.wait()
except TuneError as e:
raise RuntimeError(
"Syncing the remote experiment checkpoint to the driver "
"failed. Please check the error message. If you want to "
'start a new experiment, use `resume="AUTO"` or '
"`resume=None`. If you expected an experiment to "
"already exist, check if you supplied the correct "
"`upload_dir` to the `tune.SyncConfig` passed to "
"`tune.run()`."
) from e
if not self.checkpoint_exists(self._local_checkpoint_dir):
raise ValueError(
"Called resume when no checkpoint exists "
"in remote or local directory."
)
return True
@classmethod
def checkpoint_exists(cls, directory):
if not os.path.exists(directory):
return False
return any(
(fname.startswith("experiment_state") and fname.endswith(".json"))
for fname in os.listdir(directory)
)
def checkpoint(self, force=False):
"""Saves execution state to `self._local_checkpoint_dir`.
Overwrites the current session checkpoint, which starts when self
is instantiated. Throttle depends on self._checkpoint_period.
Also automatically saves the search algorithm to the local
checkpoint dir.
Args:
force (bool): Forces a checkpoint despite checkpoint_period.
"""
with warn_if_slow(
"experiment_checkpoint",
message="Checkpointing the experiment state took "
"{duration:.3f} s, which may be a performance "
"bottleneck. Please ensure the "
"`TUNE_GLOBAL_CHECKPOINT_S` environment variable is "
"something significantly higher than this duration "
"to ensure compute time is mostly spent on the main "
"training loop.",
disable=self._checkpoint_manager.auto_checkpoint_enabled,
):
self._checkpoint_manager.checkpoint(
checkpoint_file=self.checkpoint_file,
trial_runner=self,
trial_executor=self.trial_executor,
search_alg=self._search_alg,
force=force,
)
def resume(self, run_errored_only=False):
"""Resumes all checkpointed trials from previous run.
Requires user to manually re-register their objects. Also stops
all ongoing trials.
"""
newest_ckpt_path = find_newest_experiment_checkpoint(self._local_checkpoint_dir)
if not newest_ckpt_path:
raise ValueError(
f"Tried to resume from checkpoint dir "
f"`{self._local_checkpoint_dir}`, but no "
f"experiment checkpoint data was found."
)
with open(newest_ckpt_path, "r") as f:
runner_state = json.load(f, cls=TuneFunctionDecoder)
self.checkpoint_file = newest_ckpt_path
logger.warning(
"".join(
[
"Attempting to resume experiment from {}. ".format(
self._local_checkpoint_dir
),
"This will ignore any new changes to the specification.",
]
)
)
self.__setstate__(runner_state["runner_data"])
if self._search_alg.has_checkpoint(self._local_checkpoint_dir):
self._search_alg.restore_from_dir(self._local_checkpoint_dir)
trials = load_trials_from_experiment_checkpoint(runner_state)
for trial in sorted(trials, key=lambda t: t.last_update_time, reverse=True):
if run_errored_only and trial.status == Trial.ERROR:
new_trial = trial.reset()
self.add_trial(new_trial)
else:
self.add_trial(trial)
def update_pending_trial_resources(
self, resources: Union[dict, PlacementGroupFactory]
):
"""Update trial resources when resuming from checkpoint.
Only updating the pending ones.
"""
assert resources
if isinstance(resources, dict) and "gpu" not in resources:
resources["gpu"] = 0
for trial in self._trials:
if trial.status == Trial.PENDING:
trial.update_resources(resources=resources)
def is_finished(self):
"""Returns whether all trials have finished running."""
# The checks here are partly redundant but optimized for quick
# evaluation. Specifically, if there are live trials, we check
# these live trials first. Only if none of the live trials is
# live anymore do we loop over all trials for a final check.
trials_done = (
len(self._live_trials) == 0
or all(trial.is_finished() for trial in self._live_trials)
) and all(trial.is_finished() for trial in self._trials)
return trials_done and self._search_alg.is_finished()
def _update_trial_queue_and_get_next_trial(self) -> Optional[Trial]:
"""Adding suggested trials to the live queue of trials (they start as PENDING trials).
Returns:
next_trial: Trial
"""
self._updated_queue = False
# This will contain the next trial to start
next_trial = self._get_next_trial() # blocking
# Create pending trials. If the queue was updated before, only
# continue updating if this was successful (next_trial is not None)
if not self._updated_queue or (self._updated_queue and next_trial):
num_pending_trials = len(
[t for t in self._live_trials if t.status == Trial.PENDING]
)
while num_pending_trials < self._max_pending_trials:
if not self._update_trial_queue(blocking=False):
break
num_pending_trials += 1
return next_trial
def _wait_and_handle_event(self, next_trial: Optional[Trial]):
try:
# Single wait of entire tune loop.
future_result = self.trial_executor.get_next_executor_event(
self._live_trials, next_trial is not None
)
if future_result.type == ExecutorEventType.PG_READY:
self._on_pg_ready(next_trial)
elif future_result.type == ExecutorEventType.NO_RUNNING_TRIAL_TIMEOUT:
self._insufficient_resources_manager.on_no_available_trials(
self.get_trials()
)
elif future_result.type == ExecutorEventType.YIELD:
pass
else:
trial = future_result.trial
result = future_result.result
if future_result.type == ExecutorEventType.ERROR:
self._on_executor_error(trial, result)
elif future_result.type == ExecutorEventType.RESTORING_RESULT:
self._on_restoring_result(trial)
else:
assert future_result.type in (
ExecutorEventType.SAVING_RESULT,
ExecutorEventType.TRAINING_RESULT,
), f"Unexpected future type - {future_result.type}"
if future_result.type == ExecutorEventType.TRAINING_RESULT:
self._on_training_result(trial, result)
else:
self._on_saving_result(trial, result)
self._post_process_on_training_saving_result(trial)
except Exception as e:
if e is TuneError or self._fail_fast == TrialRunner.RAISE:
raise e
else:
raise TuneError(traceback.format_exc())
def step(self):
"""Runs one step of the trial event loop.
Callers should typically run this method repeatedly in a loop. They
may inspect or modify the runner's state in between calls to step().
"""
if self.is_finished():
raise TuneError("Called step when all trials finished?")
with warn_if_slow("on_step_begin"):
self.trial_executor.on_step_begin(self.get_trials())
with warn_if_slow("callbacks.on_step_begin"):
self._callbacks.on_step_begin(
iteration=self._iteration, trials=self._trials
)
next_trial = self._update_trial_queue_and_get_next_trial()
self._wait_and_handle_event(next_trial)
self._stop_experiment_if_needed()
try:
self.checkpoint()
except Exception as e:
logger.warning(f"Trial Runner checkpointing failed: {str(e)}")
self._iteration += 1
if self._server:
with warn_if_slow("server"):
self._process_stop_requests()
if self.is_finished():
self._server.shutdown()
self._reconcile_live_trials()
with warn_if_slow("on_step_end"):
self.trial_executor.on_step_end(self.get_trials())
with warn_if_slow("callbacks.on_step_end"):
self._callbacks.on_step_end(iteration=self._iteration, trials=self._trials)
def _on_pg_ready(self, next_trial: Optional[Trial]):
def _start_trial(trial: Trial) -> bool:
"""Helper function to start trial and call callbacks"""
with warn_if_slow("start_trial"):
if self.trial_executor.start_trial(trial):
self._callbacks.on_trial_start(
iteration=self._iteration, trials=self._trials, trial=trial
)
return True
return False
assert next_trial is not None
logger.info(f"starting {next_trial}")
if not _start_trial(next_trial) and next_trial.status != Trial.ERROR:
# Only try to start another trial if previous trial startup
# did not error (e.g. it just didn't start because its
# placement group is not ready, yet).
# Without this clause, this test fails:
# test_trial_runner_pg.py::
# TrialRunnerPlacementGroupHeterogeneousTest::
# testResourceDeadlock
next_trial = self.trial_executor.get_staged_trial()
if next_trial is not None:
# Must be able to start.
assert _start_trial(next_trial)
else:
logger.info(f"reconciling {self.get_trials()}")
self.trial_executor._pg_manager.reconcile_placement_groups(
self.get_trials()
)
def _on_saving_result(self, trial, result):
with warn_if_slow("process_trial_save") as _profile:
self._process_trial_save(trial, result)
with warn_if_slow("callbacks.on_trial_save"):
self._callbacks.on_trial_save(
iteration=self._iteration, trials=self._trials, trial=trial
)
if _profile.too_slow and trial.sync_on_checkpoint:
# TODO(ujvl): Suggest using cloud checkpointing once
# API has converged.
msg = (
"Consider turning off forced head-worker trial "
"checkpoint syncs by setting sync_on_checkpoint=False"
". Note that this may result in faulty trial "
"restoration if a failure occurs while the checkpoint "
"is being synced from the worker to the head node."
)
if trial.location.hostname and (
trial.location.hostname != get_node_ip_address()
):
if log_once("tune_head_worker_checkpoint"):
logger.warning(msg)
def _on_restoring_result(self, trial):
with warn_if_slow("process_trial_restore"):
self._process_trial_restore(trial)
with warn_if_slow("callbacks.on_trial_restore"):
self._callbacks.on_trial_restore(
iteration=self._iteration, trials=self._trials, trial=trial
)
def _on_training_result(self, trial, result):
if not isinstance(result, list):
result = [result]
with warn_if_slow("process_trial_result"):
self._process_trial_results(trial, result)
def _post_process_on_training_saving_result(self, trial):
# `self._queued_trial_decisions` now contains a final decision
# based on all results
if trial not in self._cached_trial_decisions:
final_decision = self._queued_trial_decisions.pop(trial.trial_id, None)
if final_decision:
self._execute_action(trial, final_decision)
def _on_executor_error(self, trial, result):
error_msg = f"Trial {trial}: Error processing event."
if self._fail_fast == TrialRunner.RAISE:
logger.error(error_msg)
assert isinstance(result[0], Exception)
raise result[0]
else:
logger.exception(error_msg)
assert isinstance(result[1], str)
self._process_trial_failure(trial, result[1])
def get_trial(self, tid):
trial = [t for t in self._trials if t.trial_id == tid]
return trial[0] if trial else None
def get_trials(self):
"""Returns the list of trials managed by this TrialRunner.
Note that the caller usually should not mutate trial state directly.
"""
return self._trials
def get_live_trials(self):
"""Returns the set of trials that are not in Trial.TERMINATED state."""
return self._live_trials
def add_trial(self, trial):
"""Adds a new trial to this TrialRunner.
Trials may be added at any time.
Args:
trial (Trial): Trial to queue.
"""
self._trials.append(trial)
if trial.status != Trial.TERMINATED:
self._live_trials.add(trial)
with warn_if_slow("scheduler.on_trial_add"):
self._scheduler_alg.on_trial_add(
TrialRunnerWrapper(self, runner_whitelist_attr={"search_alg"}), trial
)
self.trial_executor.mark_trial_to_checkpoint(trial)
def debug_string(self, delim="\n"):
from ray.tune.progress_reporter import trial_progress_str
result_keys = [list(t.last_result) for t in self.get_trials() if t.last_result]
metrics = set().union(*result_keys)
messages = [
self._scheduler_alg.debug_string(),
self.trial_executor.debug_string(),
trial_progress_str(self.get_trials(), metrics, force_table=True),
]
return delim.join(messages)
def _stop_experiment_if_needed(self):
"""Stops all trials."""
fail_fast = self._fail_fast and self._has_errored
if self._stopper.stop_all() or fail_fast or self._should_stop_experiment:
self._search_alg.set_finished()
[
self.trial_executor.stop_trial(t)
for t in self._trials
if t.status is not Trial.ERROR
]
def _get_next_trial(self):
"""Replenishes queue.
Blocks if all trials queued have finished, but search algorithm is
still not finished.
"""
no_trials_unfinished = True
no_trials_pending = True
for trial in self._live_trials:
if not trial.is_finished():
no_trials_unfinished = False
if trial.status == Trial.PENDING:
no_trials_pending = False
if not no_trials_unfinished and not no_trials_pending:
break
wait_for_trial = no_trials_unfinished and not self._search_alg.is_finished()
# Only fetch a new trial if we have no pending trial
if wait_for_trial or no_trials_pending:
self._update_trial_queue(blocking=wait_for_trial)
with warn_if_slow("choose_trial_to_run"):
trial = self._scheduler_alg.choose_trial_to_run(self)
if trial:
logger.debug("Running trial {}".format(trial))
return trial
def _process_trial_results(self, trial, results):
logger.debug(f"process_trial_results {results}")
with warn_if_slow(
"process_trial_results",
message="Processing trial results took {duration:.3f} s, "
"which may be a performance bottleneck. Please consider "
"reporting results less frequently to Ray Tune.",
):
for i, result in enumerate(results):
with warn_if_slow("process_trial_result"):
decision = self._process_trial_result(trial, result)
if decision is None:
# If we didn't get a decision, this means a
# non-training future (e.g. a save) was scheduled.
# We do not allow processing more results then.
if i < len(results) - 1:
if log_once("trial_runner_buffer_checkpoint"):
logger.warning(
f"Trial {trial} has a non-training future "
f"scheduled but {len(results) - i} results "
f"left to process. This means that a "
f"checkpoint was requested, but buffered "
f"training was continued before it was "
f"saved. Consider using non-buffered "
f"training by setting the env variable "
f"`TUNE_RESULT_BUFFER_LENGTH=1`."
)
elif decision == TrialScheduler.STOP:
# If the decision is to stop the trial,
# ignore all results that came after that.
break
def _process_trial_result(self, trial, result):
result.update(trial_id=trial.trial_id)
is_duplicate = RESULT_DUPLICATE in result
force_checkpoint = result.get(SHOULD_CHECKPOINT, False)
# TrialScheduler and SearchAlgorithm still receive a
# notification because there may be special handling for
# the `on_trial_complete` hook.
if is_duplicate:
logger.debug("Trial finished without logging 'done'.")
result = trial.last_result
result.update(done=True)
self._total_time += result.get(TIME_THIS_ITER_S, 0)
flat_result = flatten_dict(result)
self._validate_result_metrics(flat_result)
if self._stopper(trial.trial_id, result) or trial.should_stop(flat_result):
decision = TrialScheduler.STOP
else:
with warn_if_slow("scheduler.on_trial_result"):
decision = self._scheduler_alg.on_trial_result(self, trial, flat_result)
if decision == TrialScheduler.STOP:
result.update(done=True)
else:
# Only updating search alg if the trial is not to be stopped.
with warn_if_slow("search_alg.on_trial_result"):
self._search_alg.on_trial_result(trial.trial_id, flat_result)
# If this is not a duplicate result, the callbacks should
# be informed about the result.
if not is_duplicate:
with warn_if_slow("callbacks.on_trial_result"):
self._callbacks.on_trial_result(
iteration=self._iteration,
trials=self._trials,
trial=trial,
result=result.copy(),
)
trial.update_last_result(result)
# Include in next experiment checkpoint
self.trial_executor.mark_trial_to_checkpoint(trial)
# Checkpoints to disk. This should be checked even if
# the scheduler decision is STOP or PAUSE. Note that
# PAUSE only checkpoints to memory and does not update
# the global checkpoint state.
self._checkpoint_trial_if_needed(trial, force=force_checkpoint)
if trial.is_saving:
logger.debug(f"caching trial decision {trial}")
# Cache decision to execute on after the save is processed.
# This prevents changing the trial's state or kicking off
# another training step prematurely.
self._cached_trial_decisions[trial.trial_id] = decision
return None
else:
self._queue_decision(trial, decision)
return decision
def _validate_result_metrics(self, result):
"""
Check if any of the required metrics was not reported
in the last result. If the only items are ``done`` or any of
DEBUG_METRICS, this means that no result was ever received and
the trial just returned. This is also okay and will not raise
an error.
This will ignore checking for the DEFAULT_METRIC.
"""
if int(os.environ.get("TUNE_DISABLE_STRICT_METRIC_CHECKING", 0)) != 1 and (
len({k for k in result if k not in list(DEBUG_METRICS) + [DONE]}) > 1
):
base_metric = self._metric if self._metric != DEFAULT_METRIC else None
scheduler_metric = (
self._scheduler_alg.metric
if self._scheduler_alg.metric != DEFAULT_METRIC
else None
)
search_metrics = (
self._search_alg.metric
if self._search_alg.metric != DEFAULT_METRIC
else None
)
if isinstance(search_metrics, str):
search_metrics = [search_metrics]
if base_metric and base_metric not in result:
report_metric = base_metric
location = "tune.run()"
elif scheduler_metric and scheduler_metric not in result:
report_metric = scheduler_metric
location = type(self._scheduler_alg).__name__
elif search_metrics and any(
search_metric not in result for search_metric in search_metrics
):
report_metric = list(
filter(
lambda search_metric: search_metric not in result,
search_metrics,
)
)
if len(report_metric) == 1:
report_metric = report_metric[0]
location = type(self._search_alg).__name__
else:
report_metric = None
location = None
if report_metric:
raise ValueError(
"Trial returned a result which did not include the "
"specified metric(s) `{}` that `{}` expects. "
"Make sure your calls to `tune.report()` include the "
"metric, or set the "
"TUNE_DISABLE_STRICT_METRIC_CHECKING "
"environment variable to 1. Result: {}".format(
report_metric, location, result
)
)
def _process_trial_save(self, trial, result):
"""Processes a trial save.
Acts on the decision cached during the last `_process_trial` call.
Args:
trial (Trial): Trial being saved.
"""
logger.debug("Trial %s: Processing trial save.", trial)
try:
trial.saving_to.value = result
self._callbacks.on_checkpoint(
iteration=self._iteration,
trials=self._trials,
trial=trial,
checkpoint=trial.saving_to,
)
trial.on_checkpoint(trial.saving_to)
if trial.checkpoint.storage != Checkpoint.MEMORY:
self.trial_executor.mark_trial_to_checkpoint(trial)
except Exception:
logger.exception("Trial %s: Error handling checkpoint %s", trial, result)
if self._fail_fast == TrialRunner.RAISE:
raise
trial.saving_to = None
decision = self._cached_trial_decisions.pop(trial.trial_id, None)
if decision and result:
self._queue_decision(trial, decision)
def _process_trial_restore(self, trial):
"""Processes a trial restore.
Args:
trial (Trial): Trial being restored.
"""
logger.debug("Trial %s: Processing trial restore.", trial)
trial.on_restore()
logger.debug("Trial %s: Restore processed successfully", trial)
self.trial_executor.set_status(trial, Trial.RUNNING)
self.trial_executor.continue_training(trial)
self._live_trials.add(trial)
def _process_trial_failure(self, trial, error_msg):
"""Handle trial failure.
Attempt trial recovery if possible, clean up state otherwise.
Args:
trial (Trial): Failed trial.
error_msg (str): Error message prior to invoking this method.
"""
self._has_errored = True
if trial.status == Trial.RUNNING:
if trial.should_recover():
self._try_recover(trial, error_msg)
else:
self._scheduler_alg.on_trial_error(self, trial)
self._search_alg.on_trial_complete(trial.trial_id, error=True)
self._callbacks.on_trial_error(
iteration=self._iteration, trials=self._trials, trial=trial
)
self.trial_executor.stop_trial(trial, error=True, error_msg=error_msg)
def _queue_decision(self, trial, decision):
# Get old decision, setting it to the current decision if it isn't set
old_decision = self._queued_trial_decisions.setdefault(trial.trial_id, decision)
# Stopping always takes precedence. If we decided to stop, just quit
if old_decision is TrialScheduler.STOP:
return
# The old decision wasn't STOP. We update the decision only if it is
# STOP or PAUSE. The action will only be CONTINUE if it was set by
# the first received result and was never updated after that.
if decision is TrialScheduler.STOP or decision is TrialScheduler.PAUSE:
self._queued_trial_decisions[trial.trial_id] = decision
def _execute_action(self, trial, decision):
"""Executes action based on decision.
Args:
trial (Trial): Trial to act on.
decision (str): Scheduling decision to undertake.
"""
if decision == TrialScheduler.CONTINUE:
self.trial_executor.continue_training(trial)
elif decision == TrialScheduler.PAUSE:
self.trial_executor.pause_trial(trial)
elif decision == TrialScheduler.STOP:
self.stop_trial(trial)
elif decision == TrialScheduler.NOOP:
pass
else:
raise ValueError("Invalid decision: {}".format(decision))
def _checkpoint_trial_if_needed(self, trial, force=False):
"""Checkpoints trial based off trial.last_result."""
if trial.should_checkpoint() or force:
# Save trial runtime if possible.
if trial.runner:
self.trial_executor.save(trial, storage=Checkpoint.PERSISTENT)
def _try_recover(self, trial, error_msg):
"""Tries to recover trial.
Notifies SearchAlgorithm and Scheduler if failure to recover.
Args:
trial (Trial): Trial to recover.
error_msg (str): Error message from prior to invoking this method.
"""
self._cached_trial_decisions.pop(trial.trial_id, None)
# Resetting this, in case that the trial is in saving status when it crashes.
if trial.is_saving:
trial.saving_to = None
if trial.is_restoring:
# Restore was unsuccessful, try again without checkpoint.
trial.clear_checkpoint()
self.trial_executor.stop_trial(
trial, error=error_msg is not None, error_msg=error_msg
)
if self.trial_executor.has_resources_for_trial(trial):
requeue_trial = False
logger.info(
"Trial %s: Attempting to restore trial state from last checkpoint.",
trial,
)
# TODO(xwjiang): For better consistency, consider not starting
# trials here. Instead rely on requeuing the trial.
started = self.trial_executor.start_trial(trial)
if not started:
requeue_trial = True
elif trial.status == Trial.ERROR:
logger.exception(
"Trial %s: Error restoring trial from checkpoint, abort.", trial
)
if started:
# Clean up again if an actor was launched
self.trial_executor.stop_trial(trial, error=True)
self._scheduler_alg.on_trial_error(self, trial)
self._search_alg.on_trial_complete(trial.trial_id, error=True)
self._callbacks.on_trial_error(
iteration=self._iteration, trials=self._trials, trial=trial
)
else:
logger.debug("Trial %s: Restore dispatched correctly.", trial)
else:
requeue_trial = True
if requeue_trial:
logger.debug("Trial %s: Notifying Scheduler and requeueing.", trial)
self._requeue_trial(trial)
def _requeue_trial(self, trial):
"""Notification to TrialScheduler and requeue trial.
This does not notify the SearchAlgorithm because the function
evaluation is still in progress.
"""
self._scheduler_alg.on_trial_error(self, trial)
self.trial_executor.set_status(trial, Trial.PENDING)
# TODO(rliaw): Right now, this pushes the trial to the end of queue
# because restoration can be expensive. However, this is not
# ideal since it just hides the issue - a better fix would
# be to use an actor table to detect the IP of the Trainable
# and rsync the files there.
# See https://github.com/ray-project/ray/issues/5168
self._trials.pop(self._trials.index(trial))
self._trials.append(trial)
self._live_trials.add(trial)
with warn_if_slow("scheduler.on_trial_add"):
self._scheduler_alg.on_trial_add(
TrialRunnerWrapper(self, runner_whitelist_attr={"search_alg"}), trial
)
def _update_trial_queue(self, blocking: bool = False, timeout: int = 600) -> bool:
"""Adds next trials to queue if possible.
Note that the timeout is currently unexposed to the user.
Args:
blocking (bool): Blocks until either a trial is available
or is_finished (timeout or search algorithm finishes).
timeout (int): Seconds before blocking times out.
Returns:
Boolean indicating if a new trial was created or not.
"""
self._updated_queue = True
trial = self._search_alg.next_trial()
if blocking and not trial:
start = time.time()
# Checking `is_finished` instead of _search_alg.is_finished
# is fine because blocking only occurs if all trials are
# finished and search_algorithm is not yet finished
while (
not trial and not self.is_finished() and time.time() - start < timeout
):
logger.info("Blocking for next trial...")
trial = self._search_alg.next_trial()
time.sleep(1)
if trial:
self.add_trial(trial)
return True
return False
def request_stop_trial(self, trial):
self._stop_queue.append(trial)
def request_stop_experiment(self):
self._should_stop_experiment = True
def _process_stop_requests(self):
while self._stop_queue:
t = self._stop_queue.pop()
self.stop_trial(t)
def stop_trial(self, trial):
"""The canonical implementation of stopping a trial.
Trials may be in any external status when this function is called.
If trial is in state PENDING or PAUSED, calls `on_trial_remove` for
scheduler and `on_trial_complete()` for search_alg.
If trial is in state RUNNING, calls `on_trial_complete` for scheduler
and search_alg if RUNNING. Caller to ensure that there is no
outstanding future to be handled for the trial. If there is, the future
would be discarded.
"""
try:
if trial.status in [Trial.ERROR, Trial.TERMINATED]:
return
elif trial.status in [Trial.PENDING, Trial.PAUSED]:
self._scheduler_alg.on_trial_remove(self, trial)
self._search_alg.on_trial_complete(trial.trial_id)
elif trial.status is Trial.RUNNING:
# By this time trial.last_result should have been
# updated already.
self._scheduler_alg.on_trial_complete(
self, trial, flatten_dict(trial.last_result)
)
self._search_alg.on_trial_complete(
trial.trial_id, result=flatten_dict(trial.last_result)
)
self._callbacks.on_trial_complete(
iteration=self._iteration, trials=self._trials, trial=trial
)
self.trial_executor.export_trial_if_needed(trial)
self.trial_executor.stop_trial(trial)
self._live_trials.discard(trial)
except Exception:
logger.exception("Trial %s: Error stopping trial.", trial)
if self._fail_fast == TrialRunner.RAISE:
raise
self._process_trial_failure(trial, traceback.format_exc())
def cleanup_trials(self):
self.trial_executor.cleanup(self.get_trials())
def cleanup(self):
"""Cleanup trials and callbacks."""
self.cleanup_trials()
self.end_experiment_callbacks()
def _reconcile_live_trials(self):
"""Loop through live trials and remove if terminated"""
for trial in list(self._live_trials):
# Only for TERMINATED trials. ERRORed trials might be retried.
if trial.status == Trial.TERMINATED:
self._live_trials.remove(trial)
def __getstate__(self):
"""Gets state for trial.
Note that this is not used as a pickling override as
does not have all fields.
"""
state = self.__dict__.copy()
for k in [
"_trials",
"_live_trials",
"_stop_queue",
"_server",
"_search_alg",
"_scheduler_alg",
"_pending_trial_queue_times",
"trial_executor",
"_syncer",
"_callbacks",
"_checkpoint_manager",
]:
del state[k]
state["launch_web_server"] = bool(self._server)
return state
def __setstate__(self, state):
launch_web_server = state.pop("launch_web_server")
# Use session_str from previous checkpoint if does not exist
session_str = state.pop("_session_str")
self.__dict__.setdefault("_session_str", session_str)
# Use start_time from previous checkpoint if does not exist
start_time = state.pop("_start_time")
self.__dict__.setdefault("_start_time", start_time)
self.__dict__.update(state)
self._checkpoint_manager = self._create_checkpoint_manager()
if launch_web_server:
self._server = TuneServer(self, self._server_port)
class TrialExecutorWrapper(RayTrialExecutor):
"""Wraps around TrialExecutor class, intercepts API calls and warns users
of restricted API access.
This is meant to facilitate restricting
the current API exposure of TrialExecutor by TrialScheduler.
"""
def __init__(
self, trial_executor: RayTrialExecutor, whitelist_attr: Optional[set] = None
):
self._trial_executor = trial_executor
self._whitelist_attr = whitelist_attr or set()
def __getattr__(self, attr):
if attr not in self._whitelist_attr:
if log_once("restrict_accessing_trial_executor"):
logger.warning(
f"You are trying to access {attr} interface of "
f"TrialExecutor in TrialScheduler, which is being "
f"restricted. If you believe it is reasonable for "
f"your scheduler to access this TrialExecutor API, "
f"please reach out to Ray team on GitHub. A more "
f"strict API access pattern would be enforced "
f"starting 1.12.0"
)
return getattr(self._trial_executor, attr)
class TrialRunnerWrapper(TrialRunner):
"""Wraps around TrialRunner class, intercepts API calls and warns users
of restricted API access.
This is meant to facilitate restricting
the current API exposure of TrialRunner by TrialScheduler.
"""
_EXECUTOR_ATTR = "trial_executor"
def __init__(
self,
trial_runner: TrialRunner,
runner_whitelist_attr: Optional[set] = None,
executor_whitelist_attr: Optional[set] = None,
):
self._trial_runner = trial_runner
self._trial_executor = TrialExecutorWrapper(
trial_runner.trial_executor, executor_whitelist_attr
)
self._runner_whitelist_attr = runner_whitelist_attr or set()
def __getattr__(self, attr):
if attr == self._EXECUTOR_ATTR:
return self._trial_executor
if attr not in self._runner_whitelist_attr:
if log_once("restrict_accessing_trial_runner"):
logger.warning(
f"You are trying to access {attr} interface of "
f"TrialRunner in TrialScheduler, which is being "
f"restricted. If you believe it is reasonable for "
f"your scheduler to access this TrialRunner API, "
f"please reach out to Ray team on GitHub. A more "
f"strict API access pattern would be enforced "
f"starting 1.12s.0"
)
return getattr(self._trial_runner, attr)
|
[] |
[] |
[
"TUNE_GLOBAL_CHECKPOINT_S",
"TUNE_DISABLE_STRICT_METRIC_CHECKING",
"TUNE_MAX_PENDING_TRIALS_PG"
] |
[]
|
["TUNE_GLOBAL_CHECKPOINT_S", "TUNE_DISABLE_STRICT_METRIC_CHECKING", "TUNE_MAX_PENDING_TRIALS_PG"]
|
python
| 3 | 0 | |
pkg/pgkit/client_test.go
|
package pgkit
import (
"context"
"os"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/tsw303005/Dcard-URL-Shortener/pkg/logkit"
)
var _ = Describe("PGClient", func() {
var (
ctx context.Context
pgConf *PGConfig
pgClient *PGClient
)
BeforeEach(func() {
ctx = logkit.NewLogger(&logkit.LoggerConfig{
Development: true,
}).WithContext(context.Background())
pgConf = &PGConfig{
URL: "postgres://postgres@postgres:5432/postgres?sslmode=disable",
}
if url := os.Getenv("POSTGRES_URL"); url != "" {
pgConf.URL = url
}
})
JustBeforeEach(func() {
pgClient = NewPGClient(ctx, pgConf)
})
AfterEach(func() {
Expect(pgClient.Close()).NotTo(HaveOccurred())
})
When("success", func() {
It("returns new PGClient without error", func() {
Expect(pgClient).NotTo(BeNil())
})
})
})
|
[
"\"POSTGRES_URL\""
] |
[] |
[
"POSTGRES_URL"
] |
[]
|
["POSTGRES_URL"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
R "Iris-Mini-Social-Network/routes"
"os"
"github.com/kataras/iris"
"github.com/kataras/iris/middleware/recover"
)
func main() {
app := iris.New()
app.Use(recover.New())
app.RegisterView(iris.HTML("./views", ".html"))
app.StaticWeb("/", "./public")
user := app.Party("/user")
{
user.Post("/signup", R.UserSignup)
user.Post("/login", R.UserLogin)
}
app.Get("/", R.Index)
app.Get("/welcome", R.Welcome)
app.Get("/explore", R.Explore)
app.Get("/404", R.NotFound)
app.Get("/signup", R.Signup)
app.Get("/login", R.Login)
app.Get("/logout", R.Logout)
app.Get("/deactivate", R.Deactivate)
app.Get("/edit_profile", R.EditProfile)
app.Get("/create_post", R.CreatePost)
app.Get("/profile/:id", R.Profile)
app.Get("/profile", R.NotFound)
app.Get("/view_post/:id", R.ViewPost)
app.Get("/view_post", R.NotFound)
app.Get("/edit_post/:id", R.EditPost)
app.Get("/edit_post", R.NotFound)
app.Get("/followers/:id", R.Followers)
app.Get("/followers", R.NotFound)
app.Get("/followings/:id", R.Followings)
app.Get("/followings", R.NotFound)
app.Get("/likes/:id", R.Likes)
app.Get("/likes", R.NotFound)
api := app.Party("/api")
{
api.Post("/create_new_post", R.CreateNewPost)
api.Post("/delete_post", R.DeletePost)
api.Post("/update_post", R.UpdatePost)
api.Post("/update_profile", R.UpdateProfile)
api.Post("/change_avatar", R.ChangeAvatar)
api.Post("/follow", R.Follow)
api.Post("/unfollow", R.Unfollow)
api.Post("/like", R.Like)
api.Post("/unlike", R.Unlike)
api.Post("/deactivate-account", R.DeactivateAcc)
}
app.Run(iris.Addr(os.Getenv("PORT")))
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
extract_features.py
|
import numpy as np
import os
os.environ['WINNOW_CONFIG'] = os.path.abspath('config.yaml')
from glob import glob
from winnow.feature_extraction import IntermediateCnnExtractor,frameToVideoRepresentation,SimilarityModel
from winnow.utils import create_directory,scan_videos,create_video_list,get_original_fn_from_artifact
from db import *
from db.schema import *
import yaml
if __name__ == '__main__':
representations = ['frame_level','video_level','video_signatures']
with open("config.yaml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
DATASET_DIR = cfg['video_source_folder']
DST_DIR = cfg['destination_folder']
VIDEO_LIST_TXT = cfg['video_list_filename']
ROOT_FOLDER_INTERMEDIATE_REPRESENTATION =cfg['root_folder_intermediate']
USE_DB = cfg['use_db']
CONNINFO = cfg['conninfo']
KEEP_FILES = cfg['keep_fileoutput']
FRAME_LEVEL_SAVE_FOLDER = os.path.abspath(DST_DIR + '{}/{}'.format(ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[0]))
VIDEO_LEVEL_SAVE_FOLDER = DST_DIR + '{}/{}'.format(ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[1])
VIDEO_SIGNATURES_FILENAME = 'video_signatures'
FRAME_LEVEL_SAVE_FOLDER = os.path.join(DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[0])
VIDEO_LEVEL_SAVE_FOLDER = os.path.join(DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[1])
VIDEO_SIGNATURES_SAVE_FOLDER = os.path.join(DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[2])
VIDEO_SIGNATURES_FILENAME = 'video_signatures.npy'
print('Creating Intermediate Representations folder on :{}'.format(os.path.abspath(DST_DIR)))
create_directory(representations,DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION)
print('Searching for Dataset Video Files')
videos = scan_videos(DATASET_DIR,'**')
print('Number of files found: {}'.format(len(videos)))
processed_videos = scan_videos(FRAME_LEVEL_SAVE_FOLDER,'**_vgg_features.npy')
print('Found {} videos that have already been processed.'.format(len(processed_videos)))
# Get filenames
processed_filenames = get_original_fn_from_artifact(processed_videos,'_vgg_features')
full_video_names = [os.path.basename(x) for x in videos]
# Check for remaining videos
remaining_videos = [i for i,x in enumerate(full_video_names) if x not in processed_filenames]
remaining_videos_path = np.array(videos)[remaining_videos]
print('There are {} videos left'.format(len(remaining_videos_path)))
VIDEOS_LIST = create_video_list(remaining_videos_path,VIDEO_LIST_TXT)
print('Processed video List saved on :{}'.format(VIDEOS_LIST))
if len(remaining_videos_path) > 0:
# Instantiates the extractor
extractor = IntermediateCnnExtractor(VIDEOS_LIST,FRAME_LEVEL_SAVE_FOLDER)
# Starts Extracting Frame Level Features
extractor.start(batch_size=16,cores=4)
print('Converting Frame by Frame representations to Video Representations')
converter = frameToVideoRepresentation(FRAME_LEVEL_SAVE_FOLDER,VIDEO_LEVEL_SAVE_FOLDER)
converter.start()
print('Extracting Signatures from Video representations')
sm = SimilarityModel()
video_signatures = sm.predict(VIDEO_LEVEL_SAVE_FOLDER)
video_signatures = np.nan_to_num(video_signatures)
print('Saving Video Signatures on :{}'.format(VIDEO_SIGNATURES_SAVE_FOLDER))
if USE_DB:
db_engine,session = create_engine_session(CONNINFO)
create_tables(db_engine)
add_signatures(session,video_signatures,sm.original_filenames)
try:
session.commit()
except:
session.rollback()
print('DB Exception')
# raise
finally:
# Get DB stats
signatures = get_all(session,Signature)
print(f"Signatures table rows:{len(signatures)}")
if KEEP_FILES or USE_DB is False:
np.save(os.path.join(VIDEO_SIGNATURES_SAVE_FOLDER,'{}.npy'.format(VIDEO_SIGNATURES_FILENAME)),video_signatures)
np.save(os.path.join(VIDEO_SIGNATURES_SAVE_FOLDER,'{}-filenames.npy'.format(VIDEO_SIGNATURES_FILENAME)),sm.original_filenames)
print('Signatures of shape {} saved on :{}'.format(video_signatures.shape,VIDEO_SIGNATURES_SAVE_FOLDER))
|
[] |
[] |
[
"WINNOW_CONFIG"
] |
[]
|
["WINNOW_CONFIG"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# kuras directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, "kuras"))
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main.go
|
package main
import (
"bytes"
"encoding/json"
"github.com/gorilla/mux"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
)
func main() {
upstream := os.Getenv("AS_PROXY_TO_ADDR")
bind:= os.Getenv("AS_PROXY_BIND")
log.Println("Preparing local server...")
rtr := mux.NewRouter()
rtr.HandleFunc("/_matrix/client/r0/register", func(w http.ResponseWriter, r *http.Request) {
log.Println("Register request received:", r.RequestURI)
defer dumpAndCloseStream(r.Body)
b, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Fatal(err)
}
i := make(map[string]interface{})
err = json.Unmarshal(b, &i)
if err != nil {
log.Fatal(err)
}
if _, ok := i["type"]; !ok {
i["type"] = "m.login.application_service"
}
j, err := json.Marshal(i)
if err != nil {
log.Fatal(err)
}
r2, err := http.NewRequest(r.Method, upstream + r.RequestURI, bytes.NewBuffer(j))
if err != nil {
log.Fatal(err)
}
for k, v := range r.Header {
r2.Header.Set(k, v[0])
}
resp, err := http.DefaultClient.Do(r2)
if err != nil {
log.Fatal(err)
}
for k, v := range resp.Header {
w.Header().Set(k, v[0])
}
defer dumpAndCloseStream(resp.Body)
_, err = io.Copy(w, resp.Body)
if err != nil {
log.Fatal(err)
}
})
srv := &http.Server{Addr: bind, Handler: rtr}
stop := make(chan os.Signal)
signal.Notify(stop, os.Interrupt, os.Kill)
go func() {
defer close(stop)
<-stop
log.Println("Stopping local server...")
_ = srv.Close()
}()
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
log.Fatal(err)
}
log.Println("Goodbye!")
}
func dumpAndCloseStream(r io.ReadCloser) {
if r == nil {
return // nothing to dump or close
}
_, _ = io.Copy(ioutil.Discard, r)
_ = r.Close()
}
|
[
"\"AS_PROXY_TO_ADDR\"",
"\"AS_PROXY_BIND\""
] |
[] |
[
"AS_PROXY_BIND",
"AS_PROXY_TO_ADDR"
] |
[]
|
["AS_PROXY_BIND", "AS_PROXY_TO_ADDR"]
|
go
| 2 | 0 | |
tests/test_master.py
|
# -*- coding: utf-8 -*-
import os
import shutil
import unittest
import subprocess
import pipes
import tempfile
import pytaskmaster
help_origin = """usage: master [-h] [-s] [-f FILE] [-t] [-v] [TASK] ...
Run task from script file.
positional arguments:
TASK task for run
args args for task
optional arguments:
-h, --help show this help message and exit
-s, --show-tasks show all tasks from master file
-f FILE, --file FILE use custom FILE for run tasks
-t, --template create `master.py` from template
-v, --version output version information
"""
master_test = """
def task_master_blaster(argv):
pass
"""
master_test_show = """Tasks:
master_blaster
"""
example_test = """
def task_example(argv):
pass
"""
example_test_show = """Tasks:
example
"""
class TestMaster(unittest.TestCase):
def setUp(self):
path = os.pathsep \
+ os.path.abspath(os.path.dirname(__file__)) \
+ "bin"
os.environ["PATH"] += path
self.test_dir = "tmp_test"
self.old_cwd = os.getcwd()
os.mkdir(self.test_dir)
os.chdir(self.test_dir)
os.mkdir("dir")
self.file_name_master = "master.py"
self.file_name_example = "example.py"
with open(self.file_name_master, 'w') as open_file:
open_file.write(master_test)
with open(self.file_name_example, 'w') as open_file:
open_file.write(example_test)
with open("dir/" + self.file_name_example, 'w') as open_file:
open_file.write(example_test)
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.test_dir)
def test_help_message(self):
self.assertEqual(self._popen("master"), help_origin)
def _popen(self, command):
master = os.popen(command)
output = master.read()
master.close()
return output;
def _call(self, command):
stdout = tempfile.TemporaryFile()
stderr = tempfile.TemporaryFile()
code = subprocess.call(command, shell=True, stdout=stdout, stderr=stderr)
stdout.close()
stderr.close()
return code
def test_show_tasks(self):
self.assertEqual(self._popen("master -s"), master_test_show)
def test_show_tasks_another_file(self):
self.assertEqual(self._popen("master -f example.py -s"), example_test_show)
self.assertEqual(self._popen("master -f dir/example.py -s"), example_test_show)
def test_run_task(self):
self.assertEqual(self._call("master master"), 0)
def test_run_task_another_file(self):
self.assertEqual(self._call("master -f example.py example"), 0)
self.assertEqual(self._call("master -f dir/example.py example"), 0)
def test_create_template(self):
old_cwd = os.getcwd()
os.chdir("dir")
self.assertFalse(os.path.isfile("master.py"))
self.assertEqual(self._call("master -t"), 0)
self.assertTrue(os.path.isfile("master.py"))
os.chdir(self.old_cwd)
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
settings/settings.go
|
package settings
import (
"bytes"
"fmt"
"os"
"github.com/BurntSushi/toml"
)
var homedir = os.Getenv("HOME")
var buffer bytes.Buffer
var env = "development"
var environments = map[string]string{
"production": ".config/vantaa/production.toml",
"development": ".config/vantaa/development.toml",
"test": ".config/vantaa/test.toml",
}
var settings Settings = Settings{}
// Settings defines the Settings struct.
type Settings struct {
HashCost int
JWTExpirationDelta int
DbUrl string
PrivateKeyPath string
PublicKeyPath string
isset bool
}
// Init get the environments value from flags, default fall back is development.
func Init() {
env = os.Getenv("GO_ENV")
if env == "" {
fmt.Println("Warning: Setting development environment due to lack of GO_ENV value")
env = "development"
}
LoadSettingsByEnv(env)
}
// LoadSettingsByEnv load the .toml setting files from
// ~/.config/vantaa/<environment>.toml.
func LoadSettingsByEnv(env string) {
buffer.WriteString(homedir)
buffer.WriteString("/")
buffer.WriteString(environments[env])
if _, err := toml.DecodeFile(buffer.String(), &settings); err != nil {
fmt.Println("Failed to load configuration file in ~/.config/vantaa/")
panic(err)
}
settings.isset = true
}
// GetEnvironment returns the current environment
func GetEnvironment() string {
return env
}
// Get gets and return Settings according to the configuration files
func Get() Settings {
if !settings.isset {
Init()
}
return settings
}
// IsTestEnvironment check if the current environment is test
func IsTestEnvironment() bool {
return env == "test"
}
|
[
"\"HOME\"",
"\"GO_ENV\""
] |
[] |
[
"HOME",
"GO_ENV"
] |
[]
|
["HOME", "GO_ENV"]
|
go
| 2 | 0 | |
app/session.go
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package app
import (
"context"
"errors"
"math"
"net/http"
"os"
"github.com/mattermost/mattermost-server/v6/app/users"
"github.com/mattermost/mattermost-server/v6/audit"
"github.com/mattermost/mattermost-server/v6/model"
"github.com/mattermost/mattermost-server/v6/shared/mlog"
"github.com/mattermost/mattermost-server/v6/store"
)
func (a *App) CreateSession(session *model.Session) (*model.Session, *model.AppError) {
session, err := a.ch.srv.userService.CreateSession(session)
if err != nil {
var invErr *store.ErrInvalidInput
switch {
case errors.As(err, &invErr):
return nil, model.NewAppError("CreateSession", "app.session.save.existing.app_error", nil, invErr.Error(), http.StatusBadRequest)
default:
return nil, model.NewAppError("CreateSession", "app.session.save.app_error", nil, err.Error(), http.StatusInternalServerError)
}
}
return session, nil
}
func (a *App) GetCloudSession(token string) (*model.Session, *model.AppError) {
apiKey := os.Getenv("MM_CLOUD_API_KEY")
if apiKey != "" && apiKey == token {
// Need a bare-bones session object for later checks
session := &model.Session{
Token: token,
IsOAuth: false,
}
session.AddProp(model.SessionPropType, model.SessionTypeCloudKey)
return session, nil
}
return nil, model.NewAppError("GetCloudSession", "api.context.invalid_token.error", map[string]interface{}{"Token": token, "Error": ""}, "The provided token is invalid", http.StatusUnauthorized)
}
func (a *App) GetRemoteClusterSession(token string, remoteId string) (*model.Session, *model.AppError) {
rc, appErr := a.GetRemoteCluster(remoteId)
if appErr == nil && rc.Token == token {
// Need a bare-bones session object for later checks
session := &model.Session{
Token: token,
IsOAuth: false,
}
session.AddProp(model.SessionPropType, model.SessionTypeRemoteclusterToken)
return session, nil
}
return nil, model.NewAppError("GetRemoteClusterSession", "api.context.invalid_token.error", map[string]interface{}{"Token": token, "Error": ""}, "The provided token is invalid", http.StatusUnauthorized)
}
func (a *App) GetSession(token string) (*model.Session, *model.AppError) {
var session *model.Session
// We intentionally skip the error check here, we only want to check if the token is valid.
// If we don't have the session we are going to create one with the token eventually.
if session, _ = a.ch.srv.userService.GetSession(token); session != nil {
if session.Token != token {
return nil, model.NewAppError("GetSession", "api.context.invalid_token.error", map[string]interface{}{"Token": token, "Error": ""}, "session token is different from the one in DB", http.StatusUnauthorized)
}
if !session.IsExpired() {
a.ch.srv.userService.AddSessionToCache(session)
}
}
var appErr *model.AppError
if session == nil || session.Id == "" {
session, appErr = a.createSessionForUserAccessToken(token)
if appErr != nil {
detailedError := ""
statusCode := http.StatusUnauthorized
if appErr.Id != "app.user_access_token.invalid_or_missing" {
detailedError = appErr.Error()
statusCode = appErr.StatusCode
} else {
mlog.Warn("Error while creating session for user access token", mlog.Err(appErr))
}
return nil, model.NewAppError("GetSession", "api.context.invalid_token.error", map[string]interface{}{"Token": token, "Error": detailedError}, "", statusCode)
}
}
if session.Id == "" || session.IsExpired() {
return nil, model.NewAppError("GetSession", "api.context.invalid_token.error", map[string]interface{}{"Token": token, "Error": ""}, "session is either nil or expired", http.StatusUnauthorized)
}
if *a.Config().ServiceSettings.SessionIdleTimeoutInMinutes > 0 &&
!session.IsOAuth && !session.IsMobileApp() &&
session.Props[model.SessionPropType] != model.SessionTypeUserAccessToken &&
!*a.Config().ServiceSettings.ExtendSessionLengthWithActivity {
timeout := int64(*a.Config().ServiceSettings.SessionIdleTimeoutInMinutes) * 1000 * 60
if (model.GetMillis() - session.LastActivityAt) > timeout {
// Revoking the session is an asynchronous task anyways since we are not checking
// for the return value of the call before returning the error.
// So moving this to a goroutine has 2 advantages:
// 1. We are treating this as a proper asynchronous task.
// 2. This also fixes a race condition in the web hub, where GetSession
// gets called from (*WebConn).isMemberOfTeam and revoking a session involves
// clearing the webconn cache, which needs the hub again.
a.Srv().Go(func() {
err := a.RevokeSessionById(session.Id)
if err != nil {
mlog.Warn("Error while revoking session", mlog.Err(err))
}
})
return nil, model.NewAppError("GetSession", "api.context.invalid_token.error", map[string]interface{}{"Token": token, "Error": ""}, "idle timeout", http.StatusUnauthorized)
}
}
return session, nil
}
func (a *App) GetSessions(userID string) ([]*model.Session, *model.AppError) {
sessions, err := a.ch.srv.userService.GetSessions(userID)
if err != nil {
return nil, model.NewAppError("GetSessions", "app.session.get_sessions.app_error", nil, err.Error(), http.StatusInternalServerError)
}
return sessions, nil
}
func (a *App) RevokeAllSessions(userID string) *model.AppError {
if err := a.ch.srv.userService.RevokeAllSessions(userID); err != nil {
switch {
case errors.Is(err, users.GetSessionError):
return model.NewAppError("RevokeAllSessions", "app.session.get_sessions.app_error", nil, err.Error(), http.StatusInternalServerError)
case errors.Is(err, users.DeleteSessionError):
return model.NewAppError("RevokeAllSessions", "app.session.remove.app_error", nil, err.Error(), http.StatusInternalServerError)
default:
return model.NewAppError("RevokeAllSessions", "app.session.remove.app_error", nil, err.Error(), http.StatusInternalServerError)
}
}
return nil
}
func (a *App) AddSessionToCache(session *model.Session) {
a.ch.srv.userService.AddSessionToCache(session)
}
// RevokeSessionsFromAllUsers will go through all the sessions active
// in the server and revoke them
func (a *App) RevokeSessionsFromAllUsers() *model.AppError {
if err := a.ch.srv.userService.RevokeSessionsFromAllUsers(); err != nil {
switch {
case errors.Is(err, users.DeleteAllAccessDataError):
return model.NewAppError("RevokeSessionsFromAllUsers", "app.oauth.remove_access_data.app_error", nil, err.Error(), http.StatusInternalServerError)
default:
return model.NewAppError("RevokeSessionsFromAllUsers", "app.session.remove_all_sessions_for_team.app_error", nil, err.Error(), http.StatusInternalServerError)
}
}
return nil
}
func (a *App) ReturnSessionToPool(session *model.Session) {
a.ch.srv.userService.ReturnSessionToPool(session)
}
func (a *App) ClearSessionCacheForUser(userID string) {
a.ch.srv.userService.ClearUserSessionCache(userID)
}
func (a *App) ClearSessionCacheForAllUsers() {
a.ch.srv.userService.ClearAllUsersSessionCache()
}
func (a *App) ClearSessionCacheForUserSkipClusterSend(userID string) {
a.Srv().clearSessionCacheForUserSkipClusterSend(userID)
}
func (a *App) ClearSessionCacheForAllUsersSkipClusterSend() {
a.Srv().clearSessionCacheForAllUsersSkipClusterSend()
}
func (a *App) RevokeSessionsForDeviceId(userID string, deviceID string, currentSessionId string) *model.AppError {
if err := a.ch.srv.userService.RevokeSessionsForDeviceId(userID, deviceID, currentSessionId); err != nil {
return model.NewAppError("RevokeSessionsForDeviceId", "app.session.get_sessions.app_error", nil, err.Error(), http.StatusInternalServerError)
}
return nil
}
func (a *App) GetSessionById(sessionID string) (*model.Session, *model.AppError) {
session, err := a.ch.srv.userService.GetSessionByID(sessionID)
if err != nil {
return nil, model.NewAppError("GetSessionById", "app.session.get.app_error", nil, err.Error(), http.StatusBadRequest)
}
return session, nil
}
func (a *App) RevokeSessionById(sessionID string) *model.AppError {
session, err := a.GetSessionById(sessionID)
if err != nil {
return model.NewAppError("RevokeSessionById", "app.session.get.app_error", nil, err.Error(), http.StatusBadRequest)
}
return a.RevokeSession(session)
}
func (a *App) RevokeSession(session *model.Session) *model.AppError {
if err := a.ch.srv.userService.RevokeSession(session); err != nil {
switch {
case errors.Is(err, users.DeleteSessionError):
return model.NewAppError("RevokeSession", "app.session.remove.app_error", nil, err.Error(), http.StatusInternalServerError)
default:
return model.NewAppError("RevokeSession", "app.session.remove.app_error", nil, err.Error(), http.StatusInternalServerError)
}
}
return nil
}
func (a *App) AttachDeviceId(sessionID string, deviceID string, expiresAt int64) *model.AppError {
_, err := a.Srv().Store.Session().UpdateDeviceId(sessionID, deviceID, expiresAt)
if err != nil {
return model.NewAppError("AttachDeviceId", "app.session.update_device_id.app_error", nil, err.Error(), http.StatusInternalServerError)
}
return nil
}
func (a *App) UpdateLastActivityAtIfNeeded(session model.Session) {
now := model.GetMillis()
a.UpdateWebConnUserActivity(session, now)
if now-session.LastActivityAt < model.SessionActivityTimeout {
return
}
if err := a.Srv().Store.Session().UpdateLastActivityAt(session.Id, now); err != nil {
mlog.Warn("Failed to update LastActivityAt", mlog.String("user_id", session.UserId), mlog.String("session_id", session.Id), mlog.Err(err))
}
session.LastActivityAt = now
a.ch.srv.userService.AddSessionToCache(&session)
}
// ExtendSessionExpiryIfNeeded extends Session.ExpiresAt based on session lengths in config.
// A new ExpiresAt is only written if enough time has elapsed since last update.
// Returns true only if the session was extended.
func (a *App) ExtendSessionExpiryIfNeeded(session *model.Session) bool {
if !*a.Config().ServiceSettings.ExtendSessionLengthWithActivity {
return false
}
if session == nil || session.IsExpired() {
return false
}
sessionLength := a.GetSessionLengthInMillis(session)
// Only extend the expiry if the lessor of 1% or 1 day has elapsed within the
// current session duration.
threshold := int64(math.Min(float64(sessionLength)*0.01, float64(24*60*60*1000)))
// Minimum session length is 1 day as of this writing, therefore a minimum ~14 minutes threshold.
// However we'll add a sanity check here in case that changes. Minimum 5 minute threshold,
// meaning we won't write a new expiry more than every 5 minutes.
if threshold < 5*60*1000 {
threshold = 5 * 60 * 1000
}
now := model.GetMillis()
elapsed := now - (session.ExpiresAt - sessionLength)
if elapsed < threshold {
return false
}
auditRec := a.MakeAuditRecord("extendSessionExpiry", audit.Fail)
defer a.LogAuditRec(auditRec, nil)
auditRec.AddMeta("session", session)
newExpiry := now + sessionLength
if err := a.ch.srv.userService.ExtendSessionExpiry(session, newExpiry); err != nil {
mlog.Error("Failed to update ExpiresAt", mlog.String("user_id", session.UserId), mlog.String("session_id", session.Id), mlog.Err(err))
auditRec.AddMeta("err", err.Error())
return false
}
mlog.Debug("Session extended", mlog.String("user_id", session.UserId), mlog.String("session_id", session.Id),
mlog.Int64("newExpiry", newExpiry), mlog.Int64("session_length", sessionLength))
auditRec.Success()
auditRec.AddMeta("extended_session", session)
return true
}
// GetSessionLengthInMillis returns the session length, in milliseconds,
// based on the type of session (Mobile, SSO, Web/LDAP).
func (a *App) GetSessionLengthInMillis(session *model.Session) int64 {
if session == nil {
return 0
}
var days int
if session.IsMobileApp() {
days = *a.Config().ServiceSettings.SessionLengthMobileInDays
} else if session.IsSSOLogin() {
days = *a.Config().ServiceSettings.SessionLengthSSOInDays
} else {
days = *a.Config().ServiceSettings.SessionLengthWebInDays
}
return int64(days * 24 * 60 * 60 * 1000)
}
// SetSessionExpireInDays sets the session's expiry the specified number of days
// relative to either the session creation date or the current time, depending
// on the `ExtendSessionOnActivity` config setting.
func (a *App) SetSessionExpireInDays(session *model.Session, days int) {
a.ch.srv.userService.SetSessionExpireInDays(session, days)
}
func (a *App) CreateUserAccessToken(token *model.UserAccessToken) (*model.UserAccessToken, *model.AppError) {
user, nErr := a.ch.srv.userService.GetUser(token.UserId)
if nErr != nil {
var nfErr *store.ErrNotFound
switch {
case errors.As(nErr, &nfErr):
return nil, model.NewAppError("CreateUserAccessToken", MissingAccountError, nil, nfErr.Error(), http.StatusNotFound)
default:
return nil, model.NewAppError("CreateUserAccessToken", "app.user.get.app_error", nil, nErr.Error(), http.StatusInternalServerError)
}
}
if !*a.Config().ServiceSettings.EnableUserAccessTokens && !user.IsBot {
return nil, model.NewAppError("CreateUserAccessToken", "app.user_access_token.disabled", nil, "", http.StatusNotImplemented)
}
token.Token = model.NewId()
token, nErr = a.Srv().Store.UserAccessToken().Save(token)
if nErr != nil {
var appErr *model.AppError
switch {
case errors.As(nErr, &appErr):
return nil, appErr
default:
return nil, model.NewAppError("CreateUserAccessToken", "app.user_access_token.save.app_error", nil, nErr.Error(), http.StatusInternalServerError)
}
}
// Don't send emails to bot users.
if !user.IsBot {
if err := a.Srv().EmailService.SendUserAccessTokenAddedEmail(user.Email, user.Locale, a.GetSiteURL()); err != nil {
a.Log().Error("Unable to send user access token added email", mlog.Err(err), mlog.String("user_id", user.Id))
}
}
return token, nil
}
func (a *App) createSessionForUserAccessToken(tokenString string) (*model.Session, *model.AppError) {
token, nErr := a.Srv().Store.UserAccessToken().GetByToken(tokenString)
if nErr != nil {
return nil, model.NewAppError("createSessionForUserAccessToken", "app.user_access_token.invalid_or_missing", nil, nErr.Error(), http.StatusUnauthorized)
}
if !token.IsActive {
return nil, model.NewAppError("createSessionForUserAccessToken", "app.user_access_token.invalid_or_missing", nil, "inactive_token", http.StatusUnauthorized)
}
user, nErr := a.Srv().Store.User().Get(context.Background(), token.UserId)
if nErr != nil {
var nfErr *store.ErrNotFound
switch {
case errors.As(nErr, &nfErr):
return nil, model.NewAppError("createSessionForUserAccessToken", MissingAccountError, nil, nfErr.Error(), http.StatusNotFound)
default:
return nil, model.NewAppError("createSessionForUserAccessToken", "app.user.get.app_error", nil, nErr.Error(), http.StatusInternalServerError)
}
}
if !*a.Config().ServiceSettings.EnableUserAccessTokens && !user.IsBot {
return nil, model.NewAppError("createSessionForUserAccessToken", "app.user_access_token.invalid_or_missing", nil, "EnableUserAccessTokens=false", http.StatusUnauthorized)
}
if user.DeleteAt != 0 {
return nil, model.NewAppError("createSessionForUserAccessToken", "app.user_access_token.invalid_or_missing", nil, "inactive_user_id="+user.Id, http.StatusUnauthorized)
}
session := &model.Session{
Token: token.Token,
UserId: user.Id,
Roles: user.GetRawRoles(),
IsOAuth: false,
}
session.AddProp(model.SessionPropUserAccessTokenId, token.Id)
session.AddProp(model.SessionPropType, model.SessionTypeUserAccessToken)
if user.IsBot {
session.AddProp(model.SessionPropIsBot, model.SessionPropIsBotValue)
}
if user.IsGuest() {
session.AddProp(model.SessionPropIsGuest, "true")
} else {
session.AddProp(model.SessionPropIsGuest, "false")
}
a.ch.srv.userService.SetSessionExpireInDays(session, model.SessionUserAccessTokenExpiry)
session, nErr = a.Srv().Store.Session().Save(session)
if nErr != nil {
var invErr *store.ErrInvalidInput
switch {
case errors.As(nErr, &invErr):
return nil, model.NewAppError("CreateSession", "app.session.save.existing.app_error", nil, invErr.Error(), http.StatusBadRequest)
default:
return nil, model.NewAppError("CreateSession", "app.session.save.app_error", nil, nErr.Error(), http.StatusInternalServerError)
}
}
a.ch.srv.userService.AddSessionToCache(session)
return session, nil
}
func (a *App) RevokeUserAccessToken(token *model.UserAccessToken) *model.AppError {
var session *model.Session
session, _ = a.ch.srv.userService.GetSessionContext(context.Background(), token.Token)
if err := a.Srv().Store.UserAccessToken().Delete(token.Id); err != nil {
return model.NewAppError("RevokeUserAccessToken", "app.user_access_token.delete.app_error", nil, err.Error(), http.StatusInternalServerError)
}
if session == nil {
return nil
}
return a.RevokeSession(session)
}
func (a *App) DisableUserAccessToken(token *model.UserAccessToken) *model.AppError {
var session *model.Session
session, _ = a.ch.srv.userService.GetSessionContext(context.Background(), token.Token)
if err := a.Srv().Store.UserAccessToken().UpdateTokenDisable(token.Id); err != nil {
return model.NewAppError("DisableUserAccessToken", "app.user_access_token.update_token_disable.app_error", nil, err.Error(), http.StatusInternalServerError)
}
if session == nil {
return nil
}
return a.RevokeSession(session)
}
func (a *App) EnableUserAccessToken(token *model.UserAccessToken) *model.AppError {
var session *model.Session
session, _ = a.ch.srv.userService.GetSessionContext(context.Background(), token.Token)
err := a.Srv().Store.UserAccessToken().UpdateTokenEnable(token.Id)
if err != nil {
return model.NewAppError("EnableUserAccessToken", "app.user_access_token.update_token_enable.app_error", nil, err.Error(), http.StatusInternalServerError)
}
if session == nil {
return nil
}
return nil
}
func (a *App) GetUserAccessTokens(page, perPage int) ([]*model.UserAccessToken, *model.AppError) {
tokens, err := a.Srv().Store.UserAccessToken().GetAll(page*perPage, perPage)
if err != nil {
return nil, model.NewAppError("GetUserAccessTokens", "app.user_access_token.get_all.app_error", nil, err.Error(), http.StatusInternalServerError)
}
for _, token := range tokens {
token.Token = ""
}
return tokens, nil
}
func (a *App) GetUserAccessTokensForUser(userID string, page, perPage int) ([]*model.UserAccessToken, *model.AppError) {
tokens, err := a.Srv().Store.UserAccessToken().GetByUser(userID, page*perPage, perPage)
if err != nil {
return nil, model.NewAppError("GetUserAccessTokensForUser", "app.user_access_token.get_by_user.app_error", nil, err.Error(), http.StatusInternalServerError)
}
for _, token := range tokens {
token.Token = ""
}
return tokens, nil
}
func (a *App) GetUserAccessToken(tokenID string, sanitize bool) (*model.UserAccessToken, *model.AppError) {
token, err := a.Srv().Store.UserAccessToken().Get(tokenID)
if err != nil {
var nfErr *store.ErrNotFound
switch {
case errors.As(err, &nfErr):
return nil, model.NewAppError("GetUserAccessToken", "app.user_access_token.get_by_user.app_error", nil, nfErr.Error(), http.StatusNotFound)
default:
return nil, model.NewAppError("GetUserAccessToken", "app.user_access_token.get_by_user.app_error", nil, err.Error(), http.StatusInternalServerError)
}
}
if sanitize {
token.Token = ""
}
return token, nil
}
func (a *App) SearchUserAccessTokens(term string) ([]*model.UserAccessToken, *model.AppError) {
tokens, err := a.Srv().Store.UserAccessToken().Search(term)
if err != nil {
return nil, model.NewAppError("SearchUserAccessTokens", "app.user_access_token.search.app_error", nil, err.Error(), http.StatusInternalServerError)
}
for _, token := range tokens {
token.Token = ""
}
return tokens, nil
}
|
[
"\"MM_CLOUD_API_KEY\""
] |
[] |
[
"MM_CLOUD_API_KEY"
] |
[]
|
["MM_CLOUD_API_KEY"]
|
go
| 1 | 0 | |
config.py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'YOU_WILL_NEVER_GUESS'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
#mail server for reporting errors
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = ['[email protected]']
|
[] |
[] |
[
"MAIL_SERVER",
"MAIL_PASSWORD",
"DATABASE_URL",
"MAIL_PORT",
"SECRET_KEY",
"MAIL_USERNAME",
"MAIL_USE_TLS"
] |
[]
|
["MAIL_SERVER", "MAIL_PASSWORD", "DATABASE_URL", "MAIL_PORT", "SECRET_KEY", "MAIL_USERNAME", "MAIL_USE_TLS"]
|
python
| 7 | 0 | |
modin/pandas/test/test_io.py
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import pandas
from pandas.errors import ParserWarning
from collections import OrderedDict
from modin.pandas.utils import to_pandas
from pathlib import Path
import pyarrow as pa
import pyarrow.parquet as pq
import os
import shutil
import sqlalchemy as sa
import csv
from .utils import (
df_equals,
json_short_string,
json_short_bytes,
json_long_string,
json_long_bytes,
)
from modin import execution_engine
if os.environ.get("MODIN_BACKEND", "Pandas").lower() == "pandas":
import modin.pandas as pd
else:
import modin.experimental.pandas as pd
pd.DEFAULT_NPARTITIONS = 4
TEST_PARQUET_FILENAME = "test.parquet"
TEST_CSV_FILENAME = "test.csv"
TEST_JSON_FILENAME = "test.json"
TEST_HTML_FILENAME = "test.html"
TEST_EXCEL_FILENAME = "test.xlsx"
TEST_FEATHER_FILENAME = "test.feather"
TEST_READ_HDF_FILENAME = "test.hdf"
TEST_WRITE_HDF_FILENAME_MODIN = "test_write_modin.hdf"
TEST_WRITE_HDF_FILENAME_PANDAS = "test_write_pandas.hdf"
TEST_STATA_FILENAME = "test.dta"
TEST_PICKLE_FILENAME = "test.pkl"
TEST_SAS_FILENAME = os.getcwd() + "/data/test1.sas7bdat"
TEST_FWF_FILENAME = "test_fwf.txt"
TEST_GBQ_FILENAME = "test_gbq."
SMALL_ROW_SIZE = 2000
@pytest.fixture
def make_parquet_file():
"""Pytest fixture factory that makes a parquet file/dir for testing.
Yields:
Function that generates a parquet file/dir
"""
def _make_parquet_file(
row_size=SMALL_ROW_SIZE, force=False, directory=False, partitioned_columns=[]
):
"""Helper function to generate parquet files/directories.
Args:
row_size: Number of rows for the dataframe.
force: Create a new file/directory even if one already exists.
directory: Create a partitioned directory using pyarrow.
partitioned_columns: Create a partitioned directory using pandas.
Will be ignored if directory=True.
"""
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
if os.path.exists(TEST_PARQUET_FILENAME) and not force:
pass
elif directory:
if os.path.exists(TEST_PARQUET_FILENAME):
shutil.rmtree(TEST_PARQUET_FILENAME)
else:
os.mkdir(TEST_PARQUET_FILENAME)
table = pa.Table.from_pandas(df)
pq.write_to_dataset(table, root_path=TEST_PARQUET_FILENAME)
elif len(partitioned_columns) > 0:
df.to_parquet(TEST_PARQUET_FILENAME, partition_cols=partitioned_columns)
else:
df.to_parquet(TEST_PARQUET_FILENAME)
# Return function that generates csv files
yield _make_parquet_file
# Delete parquet file that was created
if os.path.exists(TEST_PARQUET_FILENAME):
if os.path.isdir(TEST_PARQUET_FILENAME):
shutil.rmtree(TEST_PARQUET_FILENAME)
else:
os.remove(TEST_PARQUET_FILENAME)
def create_test_modin_dataframe():
df = pd.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
)
return df
def create_test_pandas_dataframe():
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
)
return df
def assert_files_eq(path1, path2):
with open(path1, "rb") as file1, open(path2, "rb") as file2:
file1_content = file1.read()
file2_content = file2.read()
if file1_content == file2_content:
return True
else:
return False
def teardown_test_file(test_path):
if os.path.exists(test_path):
os.remove(test_path)
@pytest.fixture
def make_csv_file(delimiter=",", compression="infer"):
"""Pytest fixture factory that makes temp csv files for testing.
Yields:
Function that generates csv files
"""
filenames = []
def _make_csv_file(
filename=TEST_CSV_FILENAME,
row_size=SMALL_ROW_SIZE,
force=True,
delimiter=delimiter,
encoding=None,
compression=compression,
):
if os.path.exists(filename) and not force:
pass
else:
dates = pandas.date_range("2000", freq="h", periods=row_size)
df = pandas.DataFrame(
{
"col1": np.arange(row_size),
"col2": [str(x.date()) for x in dates],
"col3": np.arange(row_size),
"col4": [str(x.time()) for x in dates],
}
)
if compression == "gzip":
filename = "{}.gz".format(filename)
elif compression == "zip" or compression == "xz" or compression == "bz2":
filename = "{fname}.{comp}".format(fname=filename, comp=compression)
df.to_csv(
filename, sep=delimiter, encoding=encoding, compression=compression
)
filenames.append(filename)
return df
# Return function that generates csv files
yield _make_csv_file
# Delete csv files that were created
for filename in filenames:
if os.path.exists(filename):
try:
os.remove(filename)
except PermissionError:
pass
def setup_json_file(row_size, force=False):
if os.path.exists(TEST_JSON_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_json(TEST_JSON_FILENAME)
def setup_json_lines_file(row_size, force=False):
if os.path.exists(TEST_JSON_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_json(TEST_JSON_FILENAME, lines=True, orient="records")
def teardown_json_file():
if os.path.exists(TEST_JSON_FILENAME):
os.remove(TEST_JSON_FILENAME)
def setup_html_file(row_size, force=False):
if os.path.exists(TEST_HTML_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_html(TEST_HTML_FILENAME)
def teardown_html_file():
if os.path.exists(TEST_HTML_FILENAME):
os.remove(TEST_HTML_FILENAME)
def setup_clipboard(row_size, force=False):
df = pandas.DataFrame({"col1": np.arange(row_size), "col2": np.arange(row_size)})
df.to_clipboard()
def setup_excel_file(row_size, force=False):
if os.path.exists(TEST_EXCEL_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_excel(TEST_EXCEL_FILENAME)
def teardown_excel_file():
if os.path.exists(TEST_EXCEL_FILENAME):
try:
os.remove(TEST_EXCEL_FILENAME)
except PermissionError:
pass
def setup_feather_file(row_size, force=False):
if os.path.exists(TEST_FEATHER_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_feather(TEST_FEATHER_FILENAME)
def teardown_feather_file():
if os.path.exists(TEST_FEATHER_FILENAME):
os.remove(TEST_FEATHER_FILENAME)
def setup_hdf_file(row_size, force=False, format=None):
if os.path.exists(TEST_READ_HDF_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_hdf(TEST_READ_HDF_FILENAME, key="df", format=format)
def teardown_hdf_file():
if os.path.exists(TEST_READ_HDF_FILENAME):
os.remove(TEST_READ_HDF_FILENAME)
def setup_stata_file(row_size, force=False):
if os.path.exists(TEST_STATA_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_stata(TEST_STATA_FILENAME)
def teardown_stata_file():
if os.path.exists(TEST_STATA_FILENAME):
os.remove(TEST_STATA_FILENAME)
def setup_pickle_file(row_size, force=False):
if os.path.exists(TEST_PICKLE_FILENAME) and not force:
pass
else:
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
df.to_pickle(TEST_PICKLE_FILENAME)
def teardown_pickle_file():
if os.path.exists(TEST_PICKLE_FILENAME):
os.remove(TEST_PICKLE_FILENAME)
@pytest.fixture
def make_sql_connection():
"""Sets up sql connections and takes them down after the caller is done.
Yields:
Factory that generates sql connection objects
"""
filenames = []
def _sql_connection(filename, table=""):
# Remove file if exists
if os.path.exists(filename):
os.remove(filename)
filenames.append(filename)
# Create connection and, if needed, table
conn = "sqlite:///{}".format(filename)
if table:
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3, 4, 5, 6],
"col2": [7, 8, 9, 10, 11, 12, 13],
"col3": [14, 15, 16, 17, 18, 19, 20],
"col4": [21, 22, 23, 24, 25, 26, 27],
"col5": [0, 0, 0, 0, 0, 0, 0],
}
)
df.to_sql(table, conn)
return conn
yield _sql_connection
# Takedown the fixture
for filename in filenames:
if os.path.exists(filename):
os.remove(filename)
def setup_fwf_file(overwrite=False, fwf_data=None):
if not overwrite and os.path.exists(TEST_FWF_FILENAME):
return
if fwf_data is None:
fwf_data = """ACW000116041961TAVG -142 k 183 k 419 k 720 k 1075 k 1546 k 1517 k 1428 k 1360 k 1121 k 457 k -92 k
ACW000116041962TAVG 60 k 32 k -207 k 582 k 855 k 1328 k 1457 k 1340 k 1110 k 941 k 270 k -179 k
ACW000116041963TAVG -766 k -606 k -152 k 488 k 1171 k 1574 k 1567 k 1543 k 1279 k 887 k 513 k -161 k
ACW000116041964TAVG 9 k -138 k 2 k 685 k 1166 k 1389 k 1453 k 1504 k 1168 k 735 k 493 k 59 k
ACW000116041965TAVG -9 k -158 k -15 k 537 k 934 k 1447 k 1434 k 1424 k 1324 k 921 k -22 k -231 k
ACW000116041966TAVG -490 k -614 k 108 k 246 k 1082 k 1642 k 1620 k 1471 k 1195 k 803 k 329 k 2 k
ACW000116041967TAVG -270 k 36 k 397 k 481 k 1052 k 1373 k 1655 k 1598 k 1318 k 997 k 559 k -96 k
ACW000116041968TAVG -306 k -183 k 220 k 714 k 935 k 1635 k 1572 k 1718 k 1331 k 781 k 180 k -56 k
ACW000116041969TAVG -134 k -494 k -185 k 497 k 962 k 1634 k 1687 k 1773 k 1379 k 932 k 321 k -275 k
ACW000116041970TAVG -483 k -704 k -75 k 261 k 1093 k 1724 k 1470 k 1609 k 1163 k 836 k 300 k 73 k
ACW000116041971TAVG -6 k 83 k -40 k 472 k 1180 k 1411 k 1700 k 1600 k 1165 k 908 k 361 k 383 k
ACW000116041972TAVG -377 k -4 k 250 k 556 k 1117 k 1444 k 1778 k 1545 k 1073 k 797 k 481 k 404 k
ACW000116041973TAVG 61 k 169 k 453 k 472 k 1075 k 1545 k 1866 k 1579 k 1199 k 563 k 154 k 11 k
ACW000116041974TAVG 191 k 209 k 339 k 748 k 1094 k 1463 k 1498 k 1541 k 1319 k 585 k 428 k 335 k
ACW000116041975TAVG 346 k 88 k 198 k 488 k 1165 k 1483 k 1756 k 1906 k 1374 k 845 k 406 k 387 k
ACW000116041976TAVG -163 k -62 k -135 k 502 k 1128 k 1461 k 1822 k 1759 k 1136 k 715 k 458 k -205 k
ACW000116041977TAVG -192 k -279 k 234 k 332 k 1128 k 1566 k 1565 k 1556 k 1126 k 949 k 421 k 162 k
ACW000116041978TAVG 55 k -354 k 66 k 493 k 1155 k 1552 k 1564 k 1555 k 1061 k 932 k 688 k -464 k
ACW000116041979TAVG -618 k -632 k 35 k 474 k 993 k 1566 k 1484 k 1483 k 1229 k 647 k 412 k -40 k
ACW000116041980TAVG -340 k -500 k -35 k 524 k 1071 k 1534 k 1655 k 1502 k 1269 k 660 k 138 k 125 k"""
with open(TEST_FWF_FILENAME, "w") as f:
f.write(fwf_data)
def teardown_fwf_file():
if os.path.exists(TEST_FWF_FILENAME):
try:
os.remove(TEST_FWF_FILENAME)
except PermissionError:
pass
def test_from_parquet(make_parquet_file):
make_parquet_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_parquet(TEST_PARQUET_FILENAME)
modin_df = pd.read_parquet(TEST_PARQUET_FILENAME)
df_equals(modin_df, pandas_df)
def test_from_parquet_with_columns(make_parquet_file):
make_parquet_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_parquet(TEST_PARQUET_FILENAME, columns=["col1"])
modin_df = pd.read_parquet(TEST_PARQUET_FILENAME, columns=["col1"])
df_equals(modin_df, pandas_df)
def test_from_parquet_partition(make_parquet_file):
make_parquet_file(SMALL_ROW_SIZE, directory=True)
pandas_df = pandas.read_parquet(TEST_PARQUET_FILENAME)
modin_df = pd.read_parquet(TEST_PARQUET_FILENAME)
df_equals(modin_df, pandas_df)
def test_from_parquet_partition_with_columns(make_parquet_file):
make_parquet_file(SMALL_ROW_SIZE, directory=True)
pandas_df = pandas.read_parquet(TEST_PARQUET_FILENAME, columns=["col1"])
modin_df = pd.read_parquet(TEST_PARQUET_FILENAME, columns=["col1"])
df_equals(modin_df, pandas_df)
def test_from_parquet_partitioned_columns(make_parquet_file):
make_parquet_file(SMALL_ROW_SIZE, partitioned_columns=["col1"])
pandas_df = pandas.read_parquet(TEST_PARQUET_FILENAME)
modin_df = pd.read_parquet(TEST_PARQUET_FILENAME)
df_equals(modin_df, pandas_df)
def test_from_parquet_partitioned_columns_with_columns(make_parquet_file):
make_parquet_file(SMALL_ROW_SIZE, partitioned_columns=["col1"])
pandas_df = pandas.read_parquet(TEST_PARQUET_FILENAME, columns=["col1"])
modin_df = pd.read_parquet(TEST_PARQUET_FILENAME, columns=["col1"])
df_equals(modin_df, pandas_df)
def test_from_parquet_pandas_index():
# Ensure modin can read parquet files written by pandas with a non-RangeIndex object
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 100_000, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
filepath = "tmp.parquet"
pandas_df.set_index("idx").to_parquet(filepath)
# read the same parquet using modin.pandas
df_equals(pd.read_parquet(filepath), pandas.read_parquet(filepath))
pandas_df.set_index(["idx", "A"]).to_parquet(filepath)
df_equals(pd.read_parquet(filepath), pandas.read_parquet(filepath))
os.remove(filepath)
def test_from_parquet_pandas_index_partitioned():
# Ensure modin can read parquet files written by pandas with a non-RangeIndex object
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 10, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
filepath = "tmp_folder.parquet"
pandas_df.set_index("idx").to_parquet(filepath, partition_cols=["A"])
# read the same parquet using modin.pandas
df_equals(pd.read_parquet(filepath), pandas.read_parquet(filepath))
shutil.rmtree(filepath)
def test_from_parquet_hdfs():
path = "modin/pandas/test/data/hdfs.parquet"
pandas_df = pandas.read_parquet(path)
modin_df = pd.read_parquet(path)
df_equals(modin_df, pandas_df)
def test_from_json():
setup_json_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_json(TEST_JSON_FILENAME)
modin_df = pd.read_json(TEST_JSON_FILENAME)
df_equals(modin_df, pandas_df)
teardown_json_file()
def test_from_json_categories():
pandas_df = pandas.read_json(
"modin/pandas/test/data/test_categories.json",
dtype={"one": "int64", "two": "category"},
)
modin_df = pd.read_json(
"modin/pandas/test/data/test_categories.json",
dtype={"one": "int64", "two": "category"},
)
df_equals(modin_df, pandas_df)
def test_from_json_lines():
setup_json_lines_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_json(TEST_JSON_FILENAME, lines=True)
modin_df = pd.read_json(TEST_JSON_FILENAME, lines=True)
df_equals(modin_df, pandas_df)
teardown_json_file()
@pytest.mark.parametrize(
"data", [json_short_string, json_short_bytes, json_long_string, json_long_bytes],
)
def test_read_json_string_bytes(data):
with pytest.warns(UserWarning):
modin_df = pd.read_json(data)
# For I/O objects we need to rewind to reuse the same object.
if hasattr(data, "seek"):
data.seek(0)
df_equals(modin_df, pandas.read_json(data))
def test_from_html():
setup_html_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_html(TEST_HTML_FILENAME)[0]
modin_df = pd.read_html(TEST_HTML_FILENAME)
df_equals(modin_df, pandas_df)
teardown_html_file()
@pytest.mark.skip(reason="No clipboard on Travis")
def test_from_clipboard():
setup_clipboard(SMALL_ROW_SIZE)
pandas_df = pandas.read_clipboard()
modin_df = pd.read_clipboard()
df_equals(modin_df, pandas_df)
@pytest.mark.xfail(reason="read_excel is broken for now, see #1733 for details")
def test_from_excel():
setup_excel_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_excel(TEST_EXCEL_FILENAME)
modin_df = pd.read_excel(TEST_EXCEL_FILENAME)
df_equals(modin_df, pandas_df)
teardown_excel_file()
def test_from_excel_engine():
setup_excel_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_excel(TEST_EXCEL_FILENAME, engine="xlrd")
with pytest.warns(UserWarning):
modin_df = pd.read_excel(TEST_EXCEL_FILENAME, engine="xlrd")
df_equals(modin_df, pandas_df)
teardown_excel_file()
def test_from_excel_index_col():
setup_excel_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_excel(TEST_EXCEL_FILENAME, index_col=0)
with pytest.warns(UserWarning):
modin_df = pd.read_excel(TEST_EXCEL_FILENAME, index_col=0)
df_equals(modin_df, pandas_df)
teardown_excel_file()
def test_from_excel_all_sheets():
setup_excel_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_excel(TEST_EXCEL_FILENAME, sheet_name=None)
modin_df = pd.read_excel(TEST_EXCEL_FILENAME, sheet_name=None)
assert isinstance(pandas_df, (OrderedDict, dict))
assert isinstance(modin_df, type(pandas_df))
assert pandas_df.keys() == modin_df.keys()
for key in pandas_df.keys():
df_equals(modin_df.get(key), pandas_df.get(key))
teardown_excel_file()
# @pytest.mark.skip(reason="Arrow version mismatch between Pandas and Feather")
def test_from_feather():
setup_feather_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_feather(TEST_FEATHER_FILENAME)
modin_df = pd.read_feather(TEST_FEATHER_FILENAME)
df_equals(modin_df, pandas_df)
teardown_feather_file()
@pytest.mark.skipif(os.name == "nt", reason="Windows not supported")
def test_from_hdf():
setup_hdf_file(SMALL_ROW_SIZE, format=None)
pandas_df = pandas.read_hdf(TEST_READ_HDF_FILENAME, key="df")
modin_df = pd.read_hdf(TEST_READ_HDF_FILENAME, key="df")
df_equals(modin_df, pandas_df)
teardown_hdf_file()
@pytest.mark.skipif(os.name == "nt", reason="Windows not supported")
def test_from_hdf_format():
setup_hdf_file(SMALL_ROW_SIZE, format="table")
pandas_df = pandas.read_hdf(TEST_READ_HDF_FILENAME, key="df")
modin_df = pd.read_hdf(TEST_READ_HDF_FILENAME, key="df")
df_equals(modin_df, pandas_df)
teardown_hdf_file()
def test_from_stata():
setup_stata_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_stata(TEST_STATA_FILENAME)
modin_df = pd.read_stata(TEST_STATA_FILENAME)
df_equals(modin_df, pandas_df)
teardown_stata_file()
def test_from_pickle():
setup_pickle_file(SMALL_ROW_SIZE)
pandas_df = pandas.read_pickle(TEST_PICKLE_FILENAME)
modin_df = pd.read_pickle(TEST_PICKLE_FILENAME)
df_equals(modin_df, pandas_df)
teardown_pickle_file()
def test_from_sql(make_sql_connection):
filename = "test_from_sql.db"
table = "test_from_sql"
conn = make_sql_connection(filename, table)
query = "select * from {0}".format(table)
pandas_df = pandas.read_sql(query, conn)
modin_df = pd.read_sql(query, conn)
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_sql(query, conn, index_col="index")
modin_df = pd.read_sql(query, conn, index_col="index")
df_equals(modin_df, pandas_df)
with pytest.warns(UserWarning):
pd.read_sql_query(query, conn)
with pytest.warns(UserWarning):
pd.read_sql_table(table, conn)
# Test SQLAlchemy engine
conn = sa.create_engine(conn)
pandas_df = pandas.read_sql(query, conn)
modin_df = pd.read_sql(query, conn)
df_equals(modin_df, pandas_df)
# Test SQLAlchemy Connection
conn = conn.connect()
pandas_df = pandas.read_sql(query, conn)
modin_df = pd.read_sql(query, conn)
df_equals(modin_df, pandas_df)
def test_from_sql_with_chunksize(make_sql_connection):
filename = "test_from_sql.db"
table = "test_from_sql"
conn = make_sql_connection(filename, table)
query = "select * from {0}".format(table)
pandas_gen = pandas.read_sql(query, conn, chunksize=10)
modin_gen = pd.read_sql(query, conn, chunksize=10)
for modin_df, pandas_df in zip(modin_gen, pandas_gen):
df_equals(modin_df, pandas_df)
@pytest.mark.skip(reason="No SAS write methods in Pandas")
def test_from_sas():
pandas_df = pandas.read_sas(TEST_SAS_FILENAME)
modin_df = pd.read_sas(TEST_SAS_FILENAME)
df_equals(modin_df, pandas_df)
def test_from_csv(make_csv_file):
make_csv_file()
pandas_df = pandas.read_csv(TEST_CSV_FILENAME)
modin_df = pd.read_csv(TEST_CSV_FILENAME)
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_csv(Path(TEST_CSV_FILENAME))
modin_df = pd.read_csv(Path(TEST_CSV_FILENAME))
df_equals(modin_df, pandas_df)
def test_from_csv_sep_none(make_csv_file):
make_csv_file()
with pytest.warns(ParserWarning):
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, sep=None)
with pytest.warns(ParserWarning):
modin_df = pd.read_csv(TEST_CSV_FILENAME, sep=None)
df_equals(modin_df, pandas_df)
def test_from_csv_bad_quotes():
csv_bad_quotes = """1, 2, 3, 4
one, two, three, four
five, "six", seven, "eight
"""
with open(TEST_CSV_FILENAME, "w") as f:
f.write(csv_bad_quotes)
pandas_df = pandas.read_csv(TEST_CSV_FILENAME)
modin_df = pd.read_csv(TEST_CSV_FILENAME)
df_equals(modin_df, pandas_df)
def test_from_csv_quote_none():
csv_bad_quotes = """1, 2, 3, 4
one, two, three, four
five, "six", seven, "eight
"""
with open(TEST_CSV_FILENAME, "w") as f:
f.write(csv_bad_quotes)
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, quoting=csv.QUOTE_NONE)
modin_df = pd.read_csv(TEST_CSV_FILENAME, quoting=csv.QUOTE_NONE)
df_equals(modin_df, pandas_df)
def test_from_csv_categories():
pandas_df = pandas.read_csv(
"modin/pandas/test/data/test_categories.csv",
names=["one", "two"],
dtype={"one": "int64", "two": "category"},
)
modin_df = pd.read_csv(
"modin/pandas/test/data/test_categories.csv",
names=["one", "two"],
dtype={"one": "int64", "two": "category"},
)
df_equals(modin_df, pandas_df)
def test_from_csv_gzip(make_csv_file):
make_csv_file(compression="gzip")
gzip_path = "{}.gz".format(TEST_CSV_FILENAME)
pandas_df = pandas.read_csv(gzip_path)
modin_df = pd.read_csv(gzip_path)
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_csv(gzip_path, compression="gzip")
modin_df = pd.read_csv(gzip_path, compression="gzip")
df_equals(modin_df, pandas_df)
def test_from_csv_bz2(make_csv_file):
make_csv_file(compression="bz2")
bz2_path = "{}.bz2".format(TEST_CSV_FILENAME)
pandas_df = pandas.read_csv(bz2_path)
modin_df = pd.read_csv(bz2_path)
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_csv(bz2_path, compression="bz2")
modin_df = pd.read_csv(bz2_path, compression="bz2")
df_equals(modin_df, pandas_df)
def test_from_csv_xz(make_csv_file):
make_csv_file(compression="xz")
xz_path = "{}.xz".format(TEST_CSV_FILENAME)
pandas_df = pandas.read_csv(xz_path)
modin_df = pd.read_csv(xz_path)
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_csv(xz_path, compression="xz")
modin_df = pd.read_csv(xz_path, compression="xz")
df_equals(modin_df, pandas_df)
def test_from_csv_zip(make_csv_file):
make_csv_file(compression="zip")
zip_path = "{}.zip".format(TEST_CSV_FILENAME)
pandas_df = pandas.read_csv(zip_path)
modin_df = pd.read_csv(zip_path)
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_csv(zip_path, compression="zip")
modin_df = pd.read_csv(zip_path, compression="zip")
df_equals(modin_df, pandas_df)
def test_parse_dates_read_csv():
pandas_df = pandas.read_csv("modin/pandas/test/data/test_time_parsing.csv")
modin_df = pd.read_csv("modin/pandas/test/data/test_time_parsing.csv")
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/test_time_parsing.csv",
names=[
"timestamp",
"symbol",
"high",
"low",
"open",
"close",
"spread",
"volume",
],
header=0,
index_col=0,
encoding="utf-8",
)
modin_df = pd.read_csv(
"modin/pandas/test/data/test_time_parsing.csv",
names=[
"timestamp",
"symbol",
"high",
"low",
"open",
"close",
"spread",
"volume",
],
header=0,
index_col=0,
encoding="utf-8",
)
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/test_time_parsing.csv",
names=[
"timestamp",
"symbol",
"high",
"low",
"open",
"close",
"spread",
"volume",
],
header=0,
index_col=0,
parse_dates=["timestamp"],
encoding="utf-8",
)
modin_df = pd.read_csv(
"modin/pandas/test/data/test_time_parsing.csv",
names=[
"timestamp",
"symbol",
"high",
"low",
"open",
"close",
"spread",
"volume",
],
header=0,
index_col=0,
parse_dates=["timestamp"],
encoding="utf-8",
)
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/test_time_parsing.csv",
names=[
"timestamp",
"symbol",
"high",
"low",
"open",
"close",
"spread",
"volume",
],
header=0,
index_col=2,
parse_dates=["timestamp"],
encoding="utf-8",
)
modin_df = pd.read_csv(
"modin/pandas/test/data/test_time_parsing.csv",
names=[
"timestamp",
"symbol",
"high",
"low",
"open",
"close",
"spread",
"volume",
],
header=0,
index_col=2,
parse_dates=["timestamp"],
encoding="utf-8",
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize(
"kwargs",
[
{"header": None, "usecols": [0, 7]},
{"usecols": [0, 7]},
{"names": [0, 7], "usecols": [0, 7]},
],
)
def test_from_csv_with_args(kwargs):
file_name = "modin/pandas/test/data/issue_621.csv"
pandas_df = pandas.read_csv(file_name, **kwargs)
modin_df = pd.read_csv(file_name, **kwargs)
df_equals(modin_df, pandas_df)
def test_from_table(make_csv_file):
make_csv_file(delimiter="\t")
pandas_df = pandas.read_table(TEST_CSV_FILENAME)
modin_df = pd.read_table(TEST_CSV_FILENAME)
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_table(Path(TEST_CSV_FILENAME))
modin_df = pd.read_table(Path(TEST_CSV_FILENAME))
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("usecols", [["a"], ["a", "b", "e"], [0, 1, 4]])
def test_from_csv_with_usecols(usecols):
fname = "modin/pandas/test/data/test_usecols.csv"
pandas_df = pandas.read_csv(fname, usecols=usecols)
modin_df = pd.read_csv(fname, usecols=usecols)
df_equals(modin_df, pandas_df)
@pytest.mark.skipif(
execution_engine.get().lower() == "python", reason="Using pandas implementation"
)
def test_from_csv_s3(make_csv_file):
dataset_url = "s3://noaa-ghcn-pds/csv/1788.csv"
pandas_df = pandas.read_csv(dataset_url)
# This first load is to trigger all the import deprecation warnings
modin_df = pd.read_csv(dataset_url)
# This will warn if it defaults to pandas behavior, but it shouldn't
with pytest.warns(None) as record:
modin_df = pd.read_csv(dataset_url)
assert not any(
"defaulting to pandas implementation" in str(err) for err in record.list
)
df_equals(modin_df, pandas_df)
def test_from_csv_default(make_csv_file):
# We haven't implemented read_csv from https, but if it's implemented, then this needs to change
dataset_url = "https://raw.githubusercontent.com/modin-project/modin/master/modin/pandas/test/data/blah.csv"
pandas_df = pandas.read_csv(dataset_url)
with pytest.warns(UserWarning):
modin_df = pd.read_csv(dataset_url)
df_equals(modin_df, pandas_df)
def test_from_csv_chunksize(make_csv_file):
make_csv_file()
# Tests __next__ and correctness of reader as an iterator
# Use larger chunksize to read through file quicker
rdf_reader = pd.read_csv(TEST_CSV_FILENAME, chunksize=500)
pd_reader = pandas.read_csv(TEST_CSV_FILENAME, chunksize=500)
for modin_df, pd_df in zip(rdf_reader, pd_reader):
df_equals(modin_df, pd_df)
# Tests that get_chunk works correctly
rdf_reader = pd.read_csv(TEST_CSV_FILENAME, chunksize=1)
pd_reader = pandas.read_csv(TEST_CSV_FILENAME, chunksize=1)
modin_df = rdf_reader.get_chunk(1)
pd_df = pd_reader.get_chunk(1)
df_equals(modin_df, pd_df)
# Tests that read works correctly
rdf_reader = pd.read_csv(TEST_CSV_FILENAME, chunksize=1)
pd_reader = pandas.read_csv(TEST_CSV_FILENAME, chunksize=1)
modin_df = rdf_reader.read()
pd_df = pd_reader.read()
df_equals(modin_df, pd_df)
def test_from_csv_skiprows(make_csv_file):
make_csv_file()
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, skiprows=2)
modin_df = pd.read_csv(TEST_CSV_FILENAME, skiprows=2)
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_csv(
TEST_CSV_FILENAME, names=["c1", "c2", "c3", "c4"], skiprows=2
)
modin_df = pd.read_csv(
TEST_CSV_FILENAME, names=["c1", "c2", "c3", "c4"], skiprows=2
)
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_csv(
TEST_CSV_FILENAME, names=["c1", "c2", "c3", "c4"], skiprows=lambda x: x % 2
)
modin_df = pd.read_csv(
TEST_CSV_FILENAME, names=["c1", "c2", "c3", "c4"], skiprows=lambda x: x % 2
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize(
"encoding", ["latin8", "ISO-8859-1", "latin1", "iso-8859-1", "cp1252", "utf8"]
)
def test_from_csv_encoding(make_csv_file, encoding):
make_csv_file(encoding=encoding)
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, encoding=encoding)
modin_df = pd.read_csv(TEST_CSV_FILENAME, encoding=encoding)
df_equals(modin_df, pandas_df)
def test_from_csv_default_to_pandas_behavior(make_csv_file):
make_csv_file()
with pytest.warns(UserWarning):
# Test nrows
pd.read_csv(TEST_CSV_FILENAME, nrows=10)
with pytest.warns(UserWarning):
# This tests that we default to pandas on a buffer
from io import StringIO
pd.read_csv(StringIO(open(TEST_CSV_FILENAME, "r").read()))
with pytest.warns(UserWarning):
pd.read_csv(TEST_CSV_FILENAME, skiprows=lambda x: x in [0, 2])
def test_from_csv_index_col(make_csv_file):
make_csv_file()
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, index_col="col1")
modin_df = pd.read_csv(TEST_CSV_FILENAME, index_col="col1")
df_equals(modin_df, pandas_df)
def test_from_csv_skipfooter(make_csv_file):
make_csv_file()
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, skipfooter=13)
modin_df = pd.read_csv(TEST_CSV_FILENAME, skipfooter=13)
df_equals(modin_df, pandas_df)
def test_from_csv_parse_dates(make_csv_file):
make_csv_file(force=True)
pandas_df = pandas.read_csv(TEST_CSV_FILENAME, parse_dates=[["col2", "col4"]])
modin_df = pd.read_csv(TEST_CSV_FILENAME, parse_dates=[["col2", "col4"]])
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_csv(
TEST_CSV_FILENAME, parse_dates={"time": ["col2", "col4"]}
)
modin_df = pd.read_csv(TEST_CSV_FILENAME, parse_dates={"time": ["col2", "col4"]})
df_equals(modin_df, pandas_df)
def test_from_csv_newlines_in_quotes():
pandas_df = pandas.read_csv("modin/pandas/test/data/newlines.csv")
modin_df = pd.read_csv("modin/pandas/test/data/newlines.csv")
df_equals(modin_df, pandas_df)
@pytest.mark.skip(reason="No clipboard on Travis")
def test_to_clipboard():
modin_df = create_test_modin_dataframe()
pandas_df = create_test_pandas_dataframe()
modin_df.to_clipboard()
modin_as_clip = pandas.read_clipboard()
pandas_df.to_clipboard()
pandas_as_clip = pandas.read_clipboard()
assert modin_as_clip.equals(pandas_as_clip)
def test_dataframe_to_csv():
modin_df = create_test_modin_dataframe()
pandas_df = create_test_pandas_dataframe()
TEST_CSV_DF_FILENAME = "test_df.csv"
TEST_CSV_pandas_FILENAME = "test_pandas.csv"
modin_df.to_csv(TEST_CSV_DF_FILENAME)
pandas_df.to_csv(TEST_CSV_pandas_FILENAME)
assert assert_files_eq(TEST_CSV_DF_FILENAME, TEST_CSV_pandas_FILENAME)
teardown_test_file(TEST_CSV_pandas_FILENAME)
teardown_test_file(TEST_CSV_DF_FILENAME)
def test_series_to_csv():
modin_df = create_test_modin_dataframe()
pandas_df = create_test_pandas_dataframe()
TEST_CSV_DF_FILENAME = "test_df.csv"
TEST_CSV_pandas_FILENAME = "test_pandas.csv"
modin_s = modin_df["col1"]
pandas_s = pandas_df["col1"]
modin_s.to_csv(TEST_CSV_DF_FILENAME)
pandas_s.to_csv(TEST_CSV_pandas_FILENAME)
df_equals(modin_s, pandas_s)
assert modin_s.name == pandas_s.name
assert assert_files_eq(TEST_CSV_DF_FILENAME, TEST_CSV_pandas_FILENAME)
teardown_test_file(TEST_CSV_pandas_FILENAME)
teardown_test_file(TEST_CSV_DF_FILENAME)
@pytest.mark.skip(reason="Defaulting to Pandas")
def test_to_dense():
modin_df = create_test_modin_dataframe()
with pytest.raises(NotImplementedError):
modin_df.to_dense()
def test_to_dict():
modin_df = create_test_modin_dataframe()
assert modin_df.to_dict() == to_pandas(modin_df).to_dict()
@pytest.mark.xfail(strict=False, reason="Flaky test, defaults to pandas")
def test_to_excel():
modin_df = create_test_modin_dataframe()
pandas_df = create_test_pandas_dataframe()
TEST_EXCEL_DF_FILENAME = "test_df.xlsx"
TEST_EXCEL_pandas_FILENAME = "test_pandas.xlsx"
modin_writer = pandas.ExcelWriter(TEST_EXCEL_DF_FILENAME)
pandas_writer = pandas.ExcelWriter(TEST_EXCEL_pandas_FILENAME)
modin_df.to_excel(modin_writer)
pandas_df.to_excel(pandas_writer)
modin_writer.save()
pandas_writer.save()
assert assert_files_eq(TEST_EXCEL_DF_FILENAME, TEST_EXCEL_pandas_FILENAME)
teardown_test_file(TEST_EXCEL_DF_FILENAME)
teardown_test_file(TEST_EXCEL_pandas_FILENAME)
def test_to_feather():
modin_df = create_test_modin_dataframe()
pandas_df = create_test_pandas_dataframe()
TEST_FEATHER_DF_FILENAME = "test_df.feather"
TEST_FEATHER_pandas_FILENAME = "test_pandas.feather"
modin_df.to_feather(TEST_FEATHER_DF_FILENAME)
pandas_df.to_feather(TEST_FEATHER_pandas_FILENAME)
assert assert_files_eq(TEST_FEATHER_DF_FILENAME, TEST_FEATHER_pandas_FILENAME)
teardown_test_file(TEST_FEATHER_pandas_FILENAME)
teardown_test_file(TEST_FEATHER_DF_FILENAME)
def test_to_html():
modin_df = create_test_modin_dataframe()
pandas_df = create_test_pandas_dataframe()
TEST_HTML_DF_FILENAME = "test_df.html"
TEST_HTML_pandas_FILENAME = "test_pandas.html"
modin_df.to_html(TEST_HTML_DF_FILENAME)
pandas_df.to_html(TEST_HTML_pandas_FILENAME)
assert assert_files_eq(TEST_HTML_DF_FILENAME, TEST_HTML_pandas_FILENAME)
teardown_test_file(TEST_HTML_pandas_FILENAME)
teardown_test_file(TEST_HTML_DF_FILENAME)
def test_to_json():
modin_df = create_test_modin_dataframe()
pandas_df = create_test_pandas_dataframe()
TEST_JSON_DF_FILENAME = "test_df.json"
TEST_JSON_pandas_FILENAME = "test_pandas.json"
modin_df.to_json(TEST_JSON_DF_FILENAME)
pandas_df.to_json(TEST_JSON_pandas_FILENAME)
assert assert_files_eq(TEST_JSON_DF_FILENAME, TEST_JSON_pandas_FILENAME)
teardown_test_file(TEST_JSON_pandas_FILENAME)
teardown_test_file(TEST_JSON_DF_FILENAME)
def test_to_latex():
modin_df = create_test_modin_dataframe()
assert modin_df.to_latex() == to_pandas(modin_df).to_latex()
def test_to_parquet():
modin_df = create_test_modin_dataframe()
pandas_df = create_test_pandas_dataframe()
TEST_PARQUET_DF_FILENAME = "test_df.parquet"
TEST_PARQUET_pandas_FILENAME = "test_pandas.parquet"
modin_df.to_parquet(TEST_PARQUET_DF_FILENAME)
pandas_df.to_parquet(TEST_PARQUET_pandas_FILENAME)
assert assert_files_eq(TEST_PARQUET_DF_FILENAME, TEST_PARQUET_pandas_FILENAME)
teardown_test_file(TEST_PARQUET_pandas_FILENAME)
teardown_test_file(TEST_PARQUET_DF_FILENAME)
@pytest.mark.skip(reason="Defaulting to Pandas")
def test_to_period():
modin_df = create_test_modin_dataframe()
with pytest.raises(NotImplementedError):
modin_df.to_period()
def test_to_pickle():
modin_df = create_test_modin_dataframe()
pandas_df = create_test_pandas_dataframe()
TEST_PICKLE_DF_FILENAME = "test_df.pkl"
TEST_PICKLE_pandas_FILENAME = "test_pandas.pkl"
modin_df.to_pickle(TEST_PICKLE_DF_FILENAME)
pandas_df.to_pickle(TEST_PICKLE_pandas_FILENAME)
assert assert_files_eq(TEST_PICKLE_DF_FILENAME, TEST_PICKLE_pandas_FILENAME)
teardown_test_file(TEST_PICKLE_pandas_FILENAME)
teardown_test_file(TEST_PICKLE_DF_FILENAME)
pd.to_pickle(modin_df, TEST_PICKLE_DF_FILENAME)
pandas.to_pickle(pandas_df, TEST_PICKLE_pandas_FILENAME)
assert assert_files_eq(TEST_PICKLE_DF_FILENAME, TEST_PICKLE_pandas_FILENAME)
teardown_test_file(TEST_PICKLE_pandas_FILENAME)
teardown_test_file(TEST_PICKLE_DF_FILENAME)
def test_to_sql_without_index(make_sql_connection):
table_name = "tbl_without_index"
modin_df = create_test_modin_dataframe()
pandas_df = create_test_pandas_dataframe()
# We do not pass the table name so the fixture won't generate a table
conn = make_sql_connection("test_to_sql.db")
modin_df.to_sql(table_name, conn, index=False)
df_modin_sql = pandas.read_sql(table_name, con=conn)
# We do not pass the table name so the fixture won't generate a table
conn = make_sql_connection("test_to_sql_pandas.db")
pandas_df.to_sql(table_name, conn, index=False)
df_pandas_sql = pandas.read_sql(table_name, con=conn)
assert df_modin_sql.sort_index().equals(df_pandas_sql.sort_index())
def test_to_sql_with_index(make_sql_connection):
table_name = "tbl_with_index"
modin_df = create_test_modin_dataframe()
pandas_df = create_test_pandas_dataframe()
# We do not pass the table name so the fixture won't generate a table
conn = make_sql_connection("test_to_sql.db")
modin_df.to_sql(table_name, conn)
df_modin_sql = pandas.read_sql(table_name, con=conn, index_col="index")
# We do not pass the table name so the fixture won't generate a table
conn = make_sql_connection("test_to_sql_pandas.db")
pandas_df.to_sql(table_name, conn)
df_pandas_sql = pandas.read_sql(table_name, con=conn, index_col="index")
assert df_modin_sql.sort_index().equals(df_pandas_sql.sort_index())
def test_to_stata():
modin_df = create_test_modin_dataframe()
pandas_df = create_test_pandas_dataframe()
TEST_STATA_DF_FILENAME = "test_df.stata"
TEST_STATA_pandas_FILENAME = "test_pandas.stata"
modin_df.to_stata(TEST_STATA_DF_FILENAME)
pandas_df.to_stata(TEST_STATA_pandas_FILENAME)
assert assert_files_eq(TEST_STATA_DF_FILENAME, TEST_STATA_pandas_FILENAME)
teardown_test_file(TEST_STATA_pandas_FILENAME)
teardown_test_file(TEST_STATA_DF_FILENAME)
@pytest.mark.skipif(os.name == "nt", reason="Windows not supported")
def test_HDFStore():
modin_store = pd.HDFStore(TEST_WRITE_HDF_FILENAME_MODIN)
pandas_store = pandas.HDFStore(TEST_WRITE_HDF_FILENAME_PANDAS)
modin_df = create_test_modin_dataframe()
pandas_df = create_test_pandas_dataframe()
modin_store["foo"] = modin_df
pandas_store["foo"] = pandas_df
assert assert_files_eq(
TEST_WRITE_HDF_FILENAME_MODIN, TEST_WRITE_HDF_FILENAME_PANDAS
)
modin_df = modin_store.get("foo")
pandas_df = pandas_store.get("foo")
df_equals(modin_df, pandas_df)
assert isinstance(modin_store, pd.HDFStore)
hdf_file = "/tmp/test_read_hdf.hdf5"
with pd.HDFStore(hdf_file, mode="w") as store:
store.append("data/df1", pd.DataFrame(np.random.randn(5, 5)))
store.append("data/df2", pd.DataFrame(np.random.randn(4, 4)))
modin_df = pd.read_hdf(hdf_file, key="data/df1", mode="r")
pandas_df = pandas.read_hdf(hdf_file, key="data/df1", mode="r")
df_equals(modin_df, pandas_df)
def test_ExcelFile():
setup_excel_file(SMALL_ROW_SIZE)
modin_excel_file = pd.ExcelFile(TEST_EXCEL_FILENAME)
pandas_excel_file = pandas.ExcelFile(TEST_EXCEL_FILENAME)
df_equals(modin_excel_file.parse(), pandas_excel_file.parse())
assert modin_excel_file.io == TEST_EXCEL_FILENAME
assert isinstance(modin_excel_file, pd.ExcelFile)
modin_excel_file.close()
pandas_excel_file.close()
teardown_excel_file()
def test_fwf_file():
fwf_data = """id8141 360.242940 149.910199 11950.7
id1594 444.953632 166.985655 11788.4
id1849 364.136849 183.628767 11806.2
id1230 413.836124 184.375703 11916.8
id1948 502.953953 173.237159 12468.3"""
setup_fwf_file(True, fwf_data=fwf_data)
colspecs = [(0, 6), (8, 20), (21, 33), (34, 43)]
df = pd.read_fwf(TEST_FWF_FILENAME, colspecs=colspecs, header=None, index_col=0)
assert isinstance(df, pd.DataFrame)
teardown_fwf_file()
@pytest.mark.parametrize(
"kwargs",
[
{
"colspecs": [
(0, 11),
(11, 15),
(19, 24),
(27, 32),
(35, 40),
(43, 48),
(51, 56),
(59, 64),
(67, 72),
(75, 80),
(83, 88),
(91, 96),
(99, 104),
(107, 112),
],
"names": ["stationID", "year", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
"na_values": ["-9999"],
"index_col": ["stationID", "year"],
},
{
"widths": [20, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8],
"names": ["id", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
"index_col": [0],
},
],
)
def test_fwf_file_colspecs_widths(kwargs):
setup_fwf_file(overwrite=True)
modin_df = pd.read_fwf(TEST_FWF_FILENAME, **kwargs)
pandas_df = pd.read_fwf(TEST_FWF_FILENAME, **kwargs)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("usecols", [["a"], ["a", "b", "d"], [0, 1, 3]])
def test_fwf_file_usecols(usecols):
fwf_data = """a b c d
id8141 360.242940 149.910199 11950.7
id1594 444.953632 166.985655 11788.4
id1849 364.136849 183.628767 11806.2
id1230 413.836124 184.375703 11916.8
id1948 502.953953 173.237159 12468.3"""
setup_fwf_file(overwrite=True, fwf_data=fwf_data)
pandas_df = pandas.read_fwf(TEST_FWF_FILENAME, usecols=usecols)
modin_df = pd.read_fwf(TEST_FWF_FILENAME, usecols=usecols)
df_equals(modin_df, pandas_df)
teardown_fwf_file()
def test_fwf_file_chunksize():
setup_fwf_file(overwrite=True)
# Tests __next__ and correctness of reader as an iterator
rdf_reader = pd.read_fwf(TEST_FWF_FILENAME, chunksize=5)
pd_reader = pandas.read_fwf(TEST_FWF_FILENAME, chunksize=5)
for modin_df, pd_df in zip(rdf_reader, pd_reader):
df_equals(modin_df, pd_df)
# Tests that get_chunk works correctly
rdf_reader = pd.read_fwf(TEST_FWF_FILENAME, chunksize=1)
pd_reader = pandas.read_fwf(TEST_FWF_FILENAME, chunksize=1)
modin_df = rdf_reader.get_chunk(1)
pd_df = pd_reader.get_chunk(1)
df_equals(modin_df, pd_df)
# Tests that read works correctly
rdf_reader = pd.read_fwf(TEST_FWF_FILENAME, chunksize=1)
pd_reader = pandas.read_fwf(TEST_FWF_FILENAME, chunksize=1)
modin_df = rdf_reader.read()
pd_df = pd_reader.read()
df_equals(modin_df, pd_df)
def test_fwf_file_skiprows():
setup_fwf_file(overwrite=True)
pandas_df = pandas.read_fwf(TEST_FWF_FILENAME, skiprows=2)
modin_df = pd.read_fwf(TEST_FWF_FILENAME, skiprows=2)
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_fwf(TEST_FWF_FILENAME, usecols=[0, 4, 7], skiprows=[2, 5])
modin_df = pd.read_fwf(TEST_FWF_FILENAME, usecols=[0, 4, 7], skiprows=[2, 5])
df_equals(modin_df, pandas_df)
def test_fwf_file_index_col():
fwf_data = """a b c d
id8141 360.242940 149.910199 11950.7
id1594 444.953632 166.985655 11788.4
id1849 364.136849 183.628767 11806.2
id1230 413.836124 184.375703 11916.8
id1948 502.953953 173.237159 12468.3"""
setup_fwf_file(overwrite=True, fwf_data=fwf_data)
pandas_df = pandas.read_fwf(TEST_FWF_FILENAME, index_col="c")
modin_df = pd.read_fwf(TEST_FWF_FILENAME, index_col="c")
df_equals(modin_df, pandas_df)
teardown_fwf_file()
def test_fwf_file_skipfooter():
setup_fwf_file(overwrite=True)
pandas_df = pandas.read_fwf(TEST_FWF_FILENAME, skipfooter=2)
modin_df = pd.read_fwf(TEST_FWF_FILENAME, skipfooter=2)
df_equals(modin_df, pandas_df)
def test_fwf_file_parse_dates():
dates = pandas.date_range("2000", freq="h", periods=10)
fwf_data = "col1 col2 col3 col4"
for i in range(10, 20):
fwf_data = fwf_data + "\n{col1} {col2} {col3} {col4}".format(
col1=str(i),
col2=str(dates[i - 10].date()),
col3=str(i),
col4=str(dates[i - 10].time()),
)
setup_fwf_file(overwrite=True, fwf_data=fwf_data)
pandas_df = pandas.read_fwf(TEST_FWF_FILENAME, parse_dates=[["col2", "col4"]])
modin_df = pd.read_fwf(TEST_FWF_FILENAME, parse_dates=[["col2", "col4"]])
df_equals(modin_df, pandas_df)
pandas_df = pandas.read_fwf(
TEST_FWF_FILENAME, parse_dates={"time": ["col2", "col4"]}
)
modin_df = pd.read_fwf(TEST_FWF_FILENAME, parse_dates={"time": ["col2", "col4"]})
df_equals(modin_df, pandas_df)
teardown_fwf_file()
@pytest.mark.skip(reason="Need to verify GBQ access")
def test_from_gbq():
# Test API, but do not supply credentials until credits can be secured.
with pytest.raises(
ValueError, match="Could not determine project ID and one was not supplied."
):
pd.read_gbq("SELECT 1")
@pytest.mark.skip(reason="Need to verify GBQ access")
def test_to_gbq():
modin_df = create_test_modin_dataframe()
# Test API, but do not supply credentials until credits can be secured.
with pytest.raises(
ValueError, match="Could not determine project ID and one was not supplied."
):
modin_df.to_gbq("modin.table")
def test_cleanup():
filenames = [
TEST_PARQUET_FILENAME,
TEST_CSV_FILENAME,
TEST_JSON_FILENAME,
TEST_HTML_FILENAME,
TEST_EXCEL_FILENAME,
TEST_FEATHER_FILENAME,
TEST_READ_HDF_FILENAME,
TEST_WRITE_HDF_FILENAME_MODIN,
TEST_WRITE_HDF_FILENAME_PANDAS,
TEST_STATA_FILENAME,
TEST_PICKLE_FILENAME,
TEST_SAS_FILENAME,
TEST_FWF_FILENAME,
TEST_GBQ_FILENAME,
]
for f in filenames:
if os.path.exists(f):
# Need try..except for Windows
try:
os.remove(f)
except PermissionError:
pass
|
[] |
[] |
[
"MODIN_BACKEND"
] |
[]
|
["MODIN_BACKEND"]
|
python
| 1 | 0 | |
cmd/mdtitle/main.go
|
package main
import (
"fmt"
"log"
"os"
"os/exec"
"time"
"github.com/knaka/mdpp"
)
func waitForDebugger() {
if os.Getenv("WAIT_FOR_DEBUGGER") != "" {
log.Println("PID", os.Getpid())
for {
err := exec.Command("sh", "-c", fmt.Sprintf("ps w | grep '\\b[d]lv\\b.*\\battach\\b.*\\b%d\\b'", os.Getpid())).Run()
time.Sleep(1 * time.Second)
if err == nil {
break
}
}
}
}
func main() {
waitForDebugger()
for _, inPath := range os.Args[1:] {
title := mdpp.GetMarkdownTitle(inPath)
fmt.Println(title)
}
}
|
[
"\"WAIT_FOR_DEBUGGER\""
] |
[] |
[
"WAIT_FOR_DEBUGGER"
] |
[]
|
["WAIT_FOR_DEBUGGER"]
|
go
| 1 | 0 | |
tests/test-upgrade.py
|
import os
import platform
import time
from validators import (
validate_dns_dashboard,
validate_storage,
validate_ingress,
validate_gpu,
validate_registry,
validate_forward,
validate_metrics_server,
validate_fluentd,
validate_jaeger,
validate_metallb_config,
)
from subprocess import check_call, CalledProcessError
from utils import (
microk8s_enable,
wait_for_pod_state,
wait_for_installation,
run_until_success,
is_container,
)
upgrade_from = os.environ.get("UPGRADE_MICROK8S_FROM", "beta")
# Have UPGRADE_MICROK8S_TO point to a file to upgrade to that file
upgrade_to = os.environ.get("UPGRADE_MICROK8S_TO", "edge")
under_time_pressure = os.environ.get("UNDER_TIME_PRESSURE", "False")
class TestUpgrade(object):
"""
Validates a microk8s upgrade path
"""
def test_upgrade(self):
"""
Deploy, probe, upgrade, validate nothing broke.
"""
print("Testing upgrade from {} to {}".format(upgrade_from, upgrade_to))
cmd = "sudo snap install microk8s --classic --channel={}".format(upgrade_from)
run_until_success(cmd)
wait_for_installation()
if is_container():
# In some setups (eg LXC on GCE) the hashsize nf_conntrack file under
# sys is marked as rw but any update on it is failing causing kube-proxy
# to fail.
here = os.path.dirname(os.path.abspath(__file__))
apply_patch = os.path.join(here, "patch-kube-proxy.sh")
check_call("sudo {}".format(apply_patch).split())
# Run through the validators and
# select those that were valid for the original snap
test_matrix = {}
try:
enable = microk8s_enable("dns")
wait_for_pod_state("", "kube-system", "running", label="k8s-app=kube-dns")
assert "Nothing to do for" not in enable
enable = microk8s_enable("dashboard")
assert "Nothing to do for" not in enable
validate_dns_dashboard()
test_matrix["dns_dashboard"] = validate_dns_dashboard
except CalledProcessError:
print("Will not test dns-dashboard")
try:
enable = microk8s_enable("storage")
assert "Nothing to do for" not in enable
validate_storage()
test_matrix["storage"] = validate_storage
except CalledProcessError:
print("Will not test storage")
try:
enable = microk8s_enable("ingress")
assert "Nothing to do for" not in enable
validate_ingress()
test_matrix["ingress"] = validate_ingress
except CalledProcessError:
print("Will not test ingress")
try:
enable = microk8s_enable("gpu")
assert "Nothing to do for" not in enable
validate_gpu()
test_matrix["gpu"] = validate_gpu
except CalledProcessError:
print("Will not test gpu")
try:
enable = microk8s_enable("registry")
assert "Nothing to do for" not in enable
validate_registry()
test_matrix["registry"] = validate_registry
except CalledProcessError:
print("Will not test registry")
try:
validate_forward()
test_matrix["forward"] = validate_forward
except CalledProcessError:
print("Will not test port forward")
try:
enable = microk8s_enable("metrics-server")
assert "Nothing to do for" not in enable
validate_metrics_server()
test_matrix["metrics_server"] = validate_metrics_server
except CalledProcessError:
print("Will not test the metrics server")
# AMD64 only tests
if platform.machine() == "x86_64" and under_time_pressure == "False":
"""
# Prometheus operator on our lxc is chashlooping disabling the test for now.
try:
enable = microk8s_enable("prometheus", timeout_insec=30)
assert "Nothing to do for" not in enable
validate_prometheus()
test_matrix['prometheus'] = validate_prometheus
except:
print('Will not test the prometheus')
"""
try:
enable = microk8s_enable("fluentd", timeout_insec=30)
assert "Nothing to do for" not in enable
validate_fluentd()
test_matrix["fluentd"] = validate_fluentd
except CalledProcessError:
print("Will not test the fluentd")
try:
enable = microk8s_enable("jaeger", timeout_insec=30)
assert "Nothing to do for" not in enable
validate_jaeger()
test_matrix["jaeger"] = validate_jaeger
except CalledProcessError:
print("Will not test the jaeger addon")
# We are not testing cilium because we want to test the upgrade of the default CNI
"""
try:
enable = microk8s_enable("cilium", timeout_insec=300)
assert "Nothing to do for" not in enable
validate_cilium()
test_matrix['cilium'] = validate_cilium
except CalledProcessError:
print('Will not test the cilium addon')
"""
try:
ip_ranges = (
"192.168.0.105-192.168.0.105,192.168.0.110-192.168.0.111,192.168.1.240/28"
)
enable = microk8s_enable("{}:{}".format("metallb", ip_ranges), timeout_insec=500)
assert "MetalLB is enabled" in enable and "Nothing to do for" not in enable
validate_metallb_config(ip_ranges)
test_matrix["metallb"] = validate_metallb_config
except CalledProcessError:
print("Will not test the metallb addon")
# We will not be testing multus because it takes too long for cilium and multus
# to settle after the update and the multus test needs to be refactored so we do
# delete and recreate the networks configured.
"""
try:
enable = microk8s_enable("multus", timeout_insec=150)
assert "Nothing to do for" not in enable
validate_multus()
test_matrix['multus'] = validate_multus
except CalledProcessError:
print('Will not test the multus addon')
"""
# Refresh the snap to the target
if upgrade_to.endswith(".snap"):
cmd = "sudo snap install {} --classic --dangerous".format(upgrade_to)
else:
cmd = "sudo snap refresh microk8s --channel={}".format(upgrade_to)
run_until_success(cmd)
# Allow for the refresh to be processed
time.sleep(10)
wait_for_installation()
# Test any validations that were valid for the original snap
for test, validation in test_matrix.items():
print("Testing {}".format(test))
validation()
if not is_container():
# On lxc umount docker overlay is not permitted.
check_call("sudo snap remove microk8s".split())
|
[] |
[] |
[
"UPGRADE_MICROK8S_TO",
"UNDER_TIME_PRESSURE",
"UPGRADE_MICROK8S_FROM"
] |
[]
|
["UPGRADE_MICROK8S_TO", "UNDER_TIME_PRESSURE", "UPGRADE_MICROK8S_FROM"]
|
python
| 3 | 0 | |
example/wsgi.py
|
"""
WSGI config for example project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main.go
|
package main
import (
"fmt"
"os"
"io/ioutil"
"encoding/json"
"reflect"
"log"
"github.com/scc300/scc300-network/blockchain"
"github.com/scc300/scc300-network/web"
"github.com/scc300/scc300-network/web/controllers"
)
// Blockchain initialization and start customer and merchant web applications
func main() {
// Definition of the Fabric SDK properties
fSetup := blockchain.FabricSetup{
// Network parameters
OrdererID: "orderer.hf.scc300.io",
// Channel parameters
ChannelID: "scc300",
ChannelConfig: os.Getenv("GOPATH") + "/src/github.com/scc300/scc300-network/fixtures/artifacts/scc300.channel.tx",
// Chaincode parameters
ChainCodeID: "scc300-network",
ChaincodeGoPath: os.Getenv("GOPATH"),
ChaincodePath: "github.com/scc300/scc300-network/chaincode/",
OrgAdmin: "Admin",
OrgName: "org1",
ConfigFile: "config.yaml",
// User parameters
UserName: "User1",
}
// Initialization of the Fabric SDK from the previously set properties
err := fSetup.Initialize()
if err != nil {
fmt.Printf("Unable to initialize the Fabric SDK: %v\n", err)
return
}
// Close SDK
defer fSetup.CloseSDK()
// Install and instantiate the chaincode
err = fSetup.InstallAndInstantiateCC()
if err != nil {
fmt.Printf("Unable to install and instantiate the chaincode: %v\n", err)
return
}
// Commitment initialisation - Get spec source from file and initialise
specSource := getSpecSource("./specs/SellItem.quark")
_, err = fSetup.InvokeInitSpec(specSource)
if err != nil {
log.Fatalf("Unable to initialise SellItem commitment on the chaincode: %v\n", err)
}
// Commitment Data Initialisation - Read JSON file and add initial data to blockchain (because we assume data already exists)
jsonStrs := getJSONObjectStrsFromFile("./specs/test_data.json")
_, err = fSetup.InvokeInitCommitmentData(jsonStrs)
if err != nil {
log.Fatalf("Unable to initialise commitment data on the chaincode: %v\n", err)
}
// Create 2 servers - 1 merchant, 1 customer
web.StartServers(&controllers.Application{
Fabric: &fSetup,
})
}
// Function to obtain the specification source code as a string (input is a filepath to the .quark file)
func getSpecSource(filepath string) (source string) {
data, err := ioutil.ReadFile(filepath)
if (err != nil) {
log.Fatalf("Couldn't read spec file %s", filepath)
}
return string(data)
}
// Obtains JSON strings of data from a given filepath as a string
// Returns a slice of strings - each a JSON object in string form
func getJSONObjectStrsFromFile(filepath string) (strs []string) {
data, err := ioutil.ReadFile(filepath)
if (err != nil) {
log.Fatalf("Couldn't read JSON file %s", filepath)
}
// Parse the JSON
var objs interface{}
json.Unmarshal([]byte(string(data)), &objs)
// Ensure that it is an array of objects.
objArr, ok := objs.([]interface{})
if !ok {
log.Fatal("expected an array of objects")
}
// Handle each object as a map[string]interface{}
jsonStrs := make([]string, 0)
for i, obj := range objArr {
obj, ok := obj.(map[string]interface{})
if !ok {
log.Fatalf("expected type map[string]interface{}, got %s", reflect.TypeOf(objArr[i]))
}
jsonString, _ := json.Marshal(obj)
jsonStrs = append(jsonStrs, string(jsonString))
}
return jsonStrs
}
|
[
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
integration/benchmark_test.go
|
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"sync"
"testing"
"time"
)
type result struct {
totalBuildTime float64
resolvingFiles float64
walkingFiles float64
hashingFiles float64
}
func TestSnapshotBenchmark(t *testing.T) {
if b, err := strconv.ParseBool(os.Getenv("BENCHMARK")); err != nil || !b {
t.SkipNow()
}
cwd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
contextDir := filepath.Join(cwd, "benchmark_fs")
nums := []int{10000, 50000, 100000, 200000, 300000, 500000, 700000}
var timeMap sync.Map
var wg sync.WaitGroup
for _, num := range nums {
t.Run(fmt.Sprintf("test_benchmark_%d", num), func(t *testing.T) {
wg.Add(1)
var err error
go func(num int, err *error) {
dockerfile := "Dockerfile"
kanikoImage := fmt.Sprintf("%s_%d", GetKanikoImage(config.imageRepo, dockerfile), num)
buildArgs := []string{"--build-arg", fmt.Sprintf("NUM=%d", num)}
var benchmarkDir string
benchmarkDir, *err = buildKanikoImage("", dockerfile,
buildArgs, []string{}, kanikoImage, contextDir, config.gcsBucket,
config.serviceAccount, false)
if *err != nil {
return
}
r := newResult(t, filepath.Join(benchmarkDir, dockerfile))
timeMap.Store(num, r)
wg.Done()
defer os.Remove(benchmarkDir)
}(num, &err)
if err != nil {
t.Errorf("could not run benchmark results for num %d due to %s", num, err)
}
})
}
wg.Wait()
fmt.Println("Number of Files,Total Build Time,Walking Filesystem, Resolving Files")
timeMap.Range(func(key interface{}, value interface{}) bool {
d, _ := key.(int)
v, _ := value.(result)
fmt.Println(fmt.Sprintf("%d,%f,%f,%f", d, v.totalBuildTime, v.walkingFiles, v.resolvingFiles))
return true
})
}
func newResult(t *testing.T, f string) result {
var current map[string]time.Duration
jsonFile, err := os.Open(f)
defer jsonFile.Close()
if err != nil {
t.Errorf("could not read benchmark file %s", f)
}
byteValue, _ := ioutil.ReadAll(jsonFile)
if err := json.Unmarshal(byteValue, ¤t); err != nil {
t.Errorf("could not unmarshal benchmark file")
}
r := result{}
if c, ok := current["Resolving Paths"]; ok {
r.resolvingFiles = c.Seconds()
}
if c, ok := current["Walking filesystem"]; ok {
r.walkingFiles = c.Seconds()
}
if c, ok := current["Total Build Time"]; ok {
r.totalBuildTime = c.Seconds()
}
if c, ok := current["Hashing files"]; ok {
r.hashingFiles = c.Seconds()
}
fmt.Println(r)
return r
}
func TestSnapshotBenchmarkGcloud(t *testing.T) {
if b, err := strconv.ParseBool(os.Getenv("BENCHMARK")); err != nil || !b {
t.SkipNow()
}
cwd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
contextDir := filepath.Join(cwd, "benchmark_fs")
nums := []int{10000, 50000, 100000, 200000, 300000, 500000, 700000}
var wg sync.WaitGroup
fmt.Println("Number of Files,Total Build Time,Walking Filesystem, Resolving Files")
for _, num := range nums {
t.Run(fmt.Sprintf("test_benchmark_%d", num), func(t *testing.T) {
wg.Add(1)
go func(num int) {
dir, err := runInGcloud(contextDir, num)
if err != nil {
t.Errorf("error when running in gcloud %v", err)
return
}
r := newResult(t, filepath.Join(dir, "results"))
fmt.Println(fmt.Sprintf("%d,%f,%f,%f, %f", num, r.totalBuildTime, r.walkingFiles, r.resolvingFiles, r.hashingFiles))
wg.Done()
defer os.Remove(dir)
defer os.Chdir(cwd)
}(num)
})
}
wg.Wait()
}
func runInGcloud(dir string, num int) (string, error) {
os.Chdir(dir)
cmd := exec.Command("gcloud", "builds",
"submit", "--config=cloudbuild.yaml",
fmt.Sprintf("--substitutions=_COUNT=%d", num))
_, err := RunCommandWithoutTest(cmd)
if err != nil {
return "", err
}
// grab gcs and to temp dir and return
tmpDir, err := ioutil.TempDir("", fmt.Sprintf("%d", num))
if err != nil {
return "", err
}
src := fmt.Sprintf("%s/gcb/benchmark_file_%d", config.gcsBucket, num)
dest := filepath.Join(tmpDir, "results")
copyCommand := exec.Command("gsutil", "cp", src, dest)
_, err = RunCommandWithoutTest(copyCommand)
if err != nil {
return "", fmt.Errorf("failed to download file to GCS bucket %s: %s", src, err)
}
return tmpDir, nil
}
|
[
"\"BENCHMARK\"",
"\"BENCHMARK\""
] |
[] |
[
"BENCHMARK"
] |
[]
|
["BENCHMARK"]
|
go
| 1 | 0 | |
helper.go
|
// Copyright 2019 HAProxy Technologies LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"log"
"math/rand"
"os"
"runtime"
"strconv"
"strings"
//networking "k8s.io/api/networking/v1beta1"
extensions "k8s.io/api/extensions/v1beta1"
)
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
func LogErr(err error) {
if err == nil {
return
}
_, file, no, ok := runtime.Caller(1)
if ok {
file1 := strings.Replace(file, "/src/", "", 1)
log.SetFlags(LogTypeShort)
log.Printf("%s:%d %s\n", file1, no, err.Error())
log.SetFlags(LogType)
}
}
var chars = []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
//RandomString returns random string of size n
func RandomString(n int) string {
b := make([]rune, n)
size := len(chars)
for i := range b {
b[i] = chars[rand.Intn(size)]
}
return string(b)
}
//ConvertIngressRules converts data from kubernetes format
func ConvertIngressRules(ingressRules []extensions.IngressRule) map[string]*IngressRule {
rules := make(map[string]*IngressRule)
for _, k8sRule := range ingressRules {
paths := make(map[string]*IngressPath)
for pathIndex, k8sPath := range k8sRule.HTTP.Paths {
paths[k8sPath.Path] = &IngressPath{
PathIndex: pathIndex,
Path: k8sPath.Path,
ServiceName: k8sPath.Backend.ServiceName,
ServicePortInt: int64(k8sPath.Backend.ServicePort.IntValue()),
ServicePortString: k8sPath.Backend.ServicePort.StrVal,
Status: "",
}
}
rules[k8sRule.Host] = &IngressRule{
Host: k8sRule.Host,
Paths: paths,
Status: "",
}
}
return rules
}
//ConvertIngressRules converts data from kubernetes format
func ConvertIngressTLS(ingressTLS []extensions.IngressTLS) map[string]*IngressTLS {
tls := make(map[string]*IngressTLS)
for _, k8sTLS := range ingressTLS {
for _, host := range k8sTLS.Hosts {
tls[host] = &IngressTLS{
Host: host,
SecretName: StringW{
Value: k8sTLS.SecretName,
},
Status: EMPTY,
}
}
}
return tls
}
func ptrInt64(value int64) *int64 {
return &value
}
//nolint deadcode
func ptrString(value string) *string {
return &value
}
func ParseTimeout(data string) (int64, error) {
var v int64
var err error
switch {
case strings.HasSuffix(data, "ms"):
v, err = strconv.ParseInt(strings.TrimSuffix(data, "ms"), 10, 64)
case strings.HasSuffix(data, "s"):
v, err = strconv.ParseInt(strings.TrimSuffix(data, "s"), 10, 64)
v *= 1000
case strings.HasSuffix(data, "m"):
v, err = strconv.ParseInt(strings.TrimSuffix(data, "m"), 10, 64)
v = v * 1000 * 60
case strings.HasSuffix(data, "h"):
v, err = strconv.ParseInt(strings.TrimSuffix(data, "h"), 10, 64)
v = v * 1000 * 60 * 60
case strings.HasSuffix(data, "d"):
v, err = strconv.ParseInt(strings.TrimSuffix(data, "d"), 10, 64)
v = v * 1000 * 60 * 60 * 24
default:
v, err = strconv.ParseInt(data, 10, 64)
}
return v, err
}
//annotationConvertToMS converts annotation time value to milisecon value
func annotationConvertTimeToMS(data StringW) (int64, error) {
return ParseTimeout(data.Value)
}
|
[
"\"HOME\"",
"\"USERPROFILE\""
] |
[] |
[
"HOME",
"USERPROFILE"
] |
[]
|
["HOME", "USERPROFILE"]
|
go
| 2 | 0 | |
docsrc/themes/hugo-material-docs/static/modules/annotatorjs2/annotator-2.0.0-alpha.3/doc/conf.py
|
# -*- coding: utf-8 -*-
#
# Annotator documentation build configuration file
import json
import os
# By default, we do not want to use the RTD theme
sphinx_rtd_theme = None
# If the docs are built on readthedocs, it will be used by default
if os.environ.get('READTHEDOCS') != 'True':
try:
import sphinx_rtd_theme
except ImportError:
# Now we know for sure we do not have it
pass
# -- General configuration
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Annotator'
copyright = u'2014, The Annotator project contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = json.load(open('../package.json'))['version']
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The default language to highlight source code in. This should be a valid
# Pygments lexer name.
highlight_language = 'javascript'
# -- Sphinx extension configuration
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.extlinks',
'sphinx.ext.todo',
'sphinxcontrib.httpdomain',
]
# A dictionary of external sites, mapping unique short alias names to a base
# URL and a prefix.
extlinks = {
'gh': ('https://github.com/openannotation/annotator/%s', ''),
'issue': ('https://github.com/openannotation/annotator/issues/%s',
'issue '),
}
# If this is True, todo and todolist produce output, else they produce nothing.
todo_include_todos = os.environ.get('SPHINX_TODOS') is not None
# -- Options for HTML output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme' if sphinx_rtd_theme else 'default'
# Add any paths that contain custom themes here, relative to this directory.
if sphinx_rtd_theme:
html_theme_path = [
sphinx_rtd_theme.get_html_theme_path()
]
|
[] |
[] |
[
"READTHEDOCS",
"SPHINX_TODOS"
] |
[]
|
["READTHEDOCS", "SPHINX_TODOS"]
|
python
| 2 | 0 | |
middle/browserVfs.go
|
package middle
import (
"os"
"runtime"
"io/ioutil"
"path/filepath"
)
// BrowserLocation represents a location in the browser VFS.
type BrowserLocation struct {
// Location is the actual path (should be passable to file IO functions if not virtual)
Location string
// Dir is true if this is a directory, false otherwise.
Dir bool
// If not empty, this is a drive, and has a special overridden name
Drive string
}
// BrowserVFSPathDefault is the path for the 'drives panel'. Should be virtual.
const BrowserVFSPathDefault = "computer://"
// BrowserVFSLocationReal returns true if the path is a real file or directory that can be accessed using standard file IO.
func BrowserVFSLocationReal(vfsPath string) bool {
return vfsPath != BrowserVFSPathDefault
}
// BrowserVFSList lists potentially-virtual directories at each path.
func BrowserVFSList(vfsPath string) []BrowserLocation {
vfsEntries := []BrowserLocation{}
if vfsPath == BrowserVFSPathDefault {
// Determine drives (OS-dependent)
if runtime.GOOS == "windows" {
// we can get away with this, right?
for i := 0; i < 26; i++ {
drive := string(rune('A' + i)) + ":\\"
_, err := ioutil.ReadDir(drive)
if err == nil {
vfsEntries = append(vfsEntries, BrowserLocation {
Drive: drive,
Location: drive,
Dir: true,
})
}
}
return vfsEntries
}
home := os.Getenv("HOME")
return []BrowserLocation {
BrowserLocation {
Drive: "Home",
Location: home,
Dir: true,
},
BrowserLocation {
Drive: "Root",
Location: "/",
Dir: true,
},
BrowserLocation {
Drive: "PWD",
Location: ".",
Dir: true,
},
}
}
fileInfos, err := ioutil.ReadDir(vfsPath)
if err == nil {
for _, fi := range fileInfos {
vfsEntries = append(vfsEntries, BrowserLocation {
Location: filepath.Join(vfsPath, fi.Name()),
Dir: fi.IsDir(),
})
}
}
// List
return vfsEntries
}
// Appends the Downloads directory
func BrowserVFSAppendDownloads(existing []BrowserLocation) []BrowserLocation {
home := os.Getenv("HOME")
uprof := os.Getenv("USERPROFILE")
locations := []string{
home + "/Downloads",
uprof + "/Downloads",
}
for _, v := range locations {
_, err := ioutil.ReadDir(v)
if err == nil {
existing = append(existing, BrowserLocation {
Drive: "Downloads",
Location: v,
Dir: true,
})
break
}
}
return existing
}
|
[
"\"HOME\"",
"\"HOME\"",
"\"USERPROFILE\""
] |
[] |
[
"HOME",
"USERPROFILE"
] |
[]
|
["HOME", "USERPROFILE"]
|
go
| 2 | 0 | |
src/train.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import torch
import torch.utils.data
from opts import opts
from models.model import create_model, load_model, save_model
from models.data_parallel import DataParallel
from logger import Logger
from datasets.dataset_factory import get_dataset
from trains.train_factory import train_factory
from mrc_utils.preprocess import process
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
Dataset = get_dataset(opt.dataset, opt.task)
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
logger = Logger(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
print('Preprocessing data...')
mean, var = process(opt)
Dataset.mean = mean
Dataset.var = var
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
start_epoch = 0
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)
Trainer = train_factory[opt.task]
trainer = Trainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
print('Setting up data...')
val_loader = torch.utils.data.DataLoader(
Dataset(opt, 'val'),
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True
)
if opt.test:
_, preds = trainer.val(0, val_loader)
val_loader.dataset.run_eval(preds, opt.save_dir)
return
train_loader = torch.utils.data.DataLoader(
Dataset(opt, 'train'),
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True,
drop_last=True
)
print('Starting training...')
best = 1e10
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
mark = epoch if opt.save_all else 'last'
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
with torch.no_grad():
log_dict_val, preds = trainer.val(epoch, val_loader)
for k, v in log_dict_val.items():
logger.scalar_summary('val_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if log_dict_val[opt.metric] < best:
best = log_dict_val[opt.metric]
save_model(os.path.join(opt.save_dir, 'model_best.pth'),
epoch, model)
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'),
epoch, model, optimizer)
logger.write('\n')
if epoch in opt.lr_step:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logger.close()
if __name__ == '__main__':
opt = opts().parse()
main(opt)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
apiserver/cmd/server/server.go
|
// Copyright © 2016 National Data Service
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
"github.com/ndslabs/apiserver/pkg/config"
"github.com/ndslabs/apiserver/pkg/email"
"github.com/ndslabs/apiserver/pkg/etcd"
"github.com/ndslabs/apiserver/pkg/kube"
mw "github.com/ndslabs/apiserver/pkg/middleware"
api "github.com/ndslabs/apiserver/pkg/types"
"github.com/ndslabs/apiserver/pkg/validate"
"github.com/ndslabs/apiserver/pkg/version"
"github.com/StephanDollberg/go-json-rest-middleware-jwt"
"github.com/ant0ine/go-json-rest/rest"
jwtbase "github.com/dgrijalva/jwt-go"
"github.com/golang/glog"
"path/filepath"
"k8s.io/api/core/v1"
kuberest "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var adminUser = "admin"
var systemNamespace = "kube-system"
type Server struct {
Config *config.Config
etcd *etcd.EtcdHelper
kube *kube.KubeHelper
Validator *validate.Validator
email *email.EmailHelper
Namespace string
local bool
homePvcSuffix string
hostname string
jwt *jwt.JWTMiddleware
prefix string
ingress config.IngressType
domain string
requireApproval bool
origin string
}
var defaultTimeout = 600
func main() {
var confPath, adminPasswd string
flag.StringVar(&confPath, "conf", "apiserver.json", "Configuration path")
flag.StringVar(&adminPasswd, "passwd", "admin", "Admin user password")
var kubeconfig *string
if home := os.Getenv("HOME"); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
flag.Parse()
cfg, err := readConfig(confPath)
if err != nil {
glog.Error(err)
os.Exit(-1)
}
if cfg.Port == "" {
cfg.Port = "30001"
}
if cfg.Etcd.Address == "" {
cfg.Etcd.Address = "localhost:4001"
}
if cfg.Etcd.MaxMessages <= 0 {
cfg.Etcd.MaxMessages = 100
}
if cfg.Kubernetes.Address == "" {
cfg.Kubernetes.Address = "localhost:6443"
}
if cfg.Kubernetes.TokenPath == "" {
cfg.Kubernetes.TokenPath = "/run/secrets/kubernetes.io/serviceaccount/token"
}
if cfg.Kubernetes.QPS <= 0 {
cfg.Kubernetes.QPS = 50
}
if cfg.Kubernetes.Burst <= 0 {
cfg.Kubernetes.Burst = 100
}
if cfg.DefaultLimits.MemMax <= 0 {
cfg.DefaultLimits.MemMax = 8196 //M
}
if cfg.DefaultLimits.MemDefault <= 0 {
cfg.DefaultLimits.MemDefault = 100 //M
}
if cfg.DefaultLimits.CpuMax <= 0 {
cfg.DefaultLimits.CpuMax = 2000 //m
}
if cfg.DefaultLimits.CpuDefault <= 0 {
cfg.DefaultLimits.CpuDefault = 1000 //m
}
if cfg.DefaultLimits.StorageDefault <= 0 {
cfg.DefaultLimits.StorageDefault = 10
}
if cfg.DefaultLimits.InactiveTimeout <= 0 {
cfg.DefaultLimits.InactiveTimeout = 8 * 60 // minutes
}
hostname, err := os.Hostname()
if err != nil {
glog.Fatal(err)
}
glog.Infof("Connecting to etcd on %s\n", cfg.Etcd.Address)
etcd, err := etcd.NewEtcdHelper(cfg.Etcd.Address, cfg.Etcd.MaxMessages)
if err != nil {
glog.Errorf("Etcd not available: %s\n", err)
glog.Fatal(err)
}
glog.Infof("Connected to etcd\n")
var kConfig *kuberest.Config
if _, err := os.Stat(*kubeconfig); os.IsNotExist(err) {
glog.Infof("File %s does not exist, assuming in-cluster\n", *kubeconfig)
// Assume running in cluster
kConfig, err = kuberest.InClusterConfig()
if err != nil {
panic(err.Error())
}
} else {
glog.Infof("File %s exists, assuming out-of-cluster\n", *kubeconfig)
// Assume running out of cluster
kConfig, err = clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
panic(err.Error())
}
}
kConfig.QPS = cfg.Kubernetes.QPS
kConfig.Burst = cfg.Kubernetes.Burst
kube, err := kube.NewKubeHelper(cfg.Kubernetes.Address,
cfg.Kubernetes.Username, cfg.Kubernetes.Password, cfg.Kubernetes.TokenPath, kConfig, cfg.AuthSignInURL, cfg.AuthURL)
if err != nil {
glog.Errorf("Kubernetes API server not available\n")
glog.Fatal(err)
}
glog.Infof("Connected to Kubernetes\n")
email, err := email.NewEmailHelper(cfg.Email.Host, cfg.Email.Port, cfg.Email.TLS, cfg.Support.Email, cfg.Origin, cfg.Name)
if err != nil {
glog.Errorf("Error in email server configuration\n")
glog.Fatal(err)
}
server := Server{}
server.hostname = hostname
if cfg.Ingress == config.IngressTypeLoadBalancer {
if len(cfg.Domain) > 0 {
server.domain = cfg.Domain
} else {
glog.Error("Domain must be specified for ingress type LoadBalancer")
}
}
server.etcd = etcd
server.kube = kube
server.email = email
server.Config = cfg
server.homePvcSuffix = cfg.HomePvcSuffix
server.requireApproval = cfg.RequireApproval
glog.Info("Checking for TLS issuer...\n")
if cfg.Certmgr.ClusterIssuer != "" {
glog.Infof("Using TLS cluster issuer: %s\n", cfg.Certmgr.ClusterIssuer)
} else if cfg.Certmgr.Issuer != "" {
glog.Infof("Using TLS issuer: %s\n", cfg.Certmgr.Issuer)
}
server.ingress = config.IngressTypeNodePort
if cfg.Ingress != "" {
server.ingress = cfg.Ingress
}
server.prefix = "/api/"
if cfg.Prefix != "" {
server.prefix = cfg.Prefix
}
server.start(cfg, adminPasswd)
}
func (s *Server) start(cfg *config.Config, adminPasswd string) {
glog.Infof("Starting Workbench API server (%s %s)", version.VERSION, version.BUILD_DATE)
glog.Infof("Using etcd %s ", cfg.Etcd.Address)
glog.Infof("Using kube-apiserver %s", cfg.Kubernetes.Address)
glog.Infof("Using home pvc suffix %s", cfg.HomePvcSuffix)
glog.Infof("Using specs dir %s", cfg.Specs.Path)
glog.Infof("Using nodeSelector %s: %s", cfg.Kubernetes.NodeSelectorName, cfg.Kubernetes.NodeSelectorValue)
glog.Infof("Listening on port %s", cfg.Port)
api := rest.NewApi()
api.Use(rest.DefaultDevStack...)
api.Use(&mw.NoCacheMiddleware{})
glog.Infof("prefix %s", s.prefix)
if len(cfg.Origin) > 0 {
glog.Infof("CORS origin %s\n", cfg.Origin)
s.origin = cfg.Origin
api.Use(&rest.CorsMiddleware{
RejectNonCorsRequests: false,
OriginValidator: func(origin string, request *rest.Request) bool {
// NDS-552
return origin == cfg.Origin || origin == cfg.Origin+"."
},
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE"},
AllowedHeaders: []string{
"Accept", "Content-Type", "X-Custom-Header", "Origin", "accept", "authorization"},
AccessControlAllowCredentials: true,
AccessControlMaxAge: 3600,
})
}
timeout := time.Minute * 30
if cfg.Timeout > 0 {
timeout = time.Minute * time.Duration(cfg.Timeout)
}
glog.Infof("session timeout %s", timeout)
glog.Infof("domain %s", cfg.Domain)
glog.Infof("ingress %s", cfg.Ingress)
jwt := &jwt.JWTMiddleware{
Key: []byte(adminPasswd),
Realm: "ndslabs",
Timeout: timeout,
MaxRefresh: time.Hour * 24,
Authenticator: func(userId string, password string) bool {
if userId == adminUser && password == adminPasswd {
return true
} else {
if strings.Contains(userId, "@") {
account := s.getAccountByEmail(userId)
if account != nil {
userId = account.Namespace
}
}
return s.etcd.CheckPassword(userId, password) && s.etcd.CheckAccess(userId)
}
},
Authorizator: func(userId string, request *rest.Request) bool {
payload := request.Env["JWT_PAYLOAD"].(map[string]interface{})
if payload["server"] == s.hostname {
return true
} else {
return false
}
},
PayloadFunc: func(userId string) map[string]interface{} {
payload := make(map[string]interface{})
if userId == adminUser {
payload[adminUser] = true
}
if strings.Contains(userId, "@") {
account := s.getAccountByEmail(userId)
if account != nil {
userId = account.Namespace
}
}
payload["server"] = s.hostname
payload["user"] = userId
account, err := s.etcd.GetAccount(userId)
if err == nil {
account.LastLogin = time.Now().Unix()
s.etcd.PutAccount(account.Namespace, account, false)
}
return payload
},
}
s.jwt = jwt
api.Use(&rest.IfMiddleware{
Condition: func(request *rest.Request) bool {
glog.Infof("remoteAddr: %s", request.Request.RemoteAddr)
return strings.HasPrefix(request.URL.Path, s.prefix+"accounts") ||
strings.HasPrefix(request.URL.Path, s.prefix+"change_password") ||
strings.HasPrefix(request.URL.Path, s.prefix+"check_token") ||
strings.HasPrefix(request.URL.Path, s.prefix+"check_console") ||
strings.HasPrefix(request.URL.Path, s.prefix+"configs") ||
strings.HasPrefix(request.URL.Path, s.prefix+"export") ||
strings.HasPrefix(request.URL.Path, s.prefix+"import") ||
strings.HasPrefix(request.URL.Path, s.prefix+"log_level") ||
strings.HasPrefix(request.URL.Path, s.prefix+"logs") ||
strings.HasPrefix(request.URL.Path, s.prefix+"mount") ||
strings.HasPrefix(request.URL.Path, s.prefix+"refresh_token") ||
strings.HasPrefix(request.URL.Path, s.prefix+"services") ||
strings.HasPrefix(request.URL.Path, s.prefix+"shutdown") ||
strings.HasPrefix(request.URL.Path, s.prefix+"stacks") ||
strings.HasPrefix(request.URL.Path, s.prefix+"start") ||
strings.HasPrefix(request.URL.Path, s.prefix+"stop") ||
strings.HasPrefix(request.URL.Path, s.prefix+"support") ||
strings.HasPrefix(request.URL.Path, s.prefix+"volumes")
},
IfTrue: jwt,
})
routes := make([]*rest.Route, 0)
routes = append(routes,
rest.Get(s.prefix, s.GetPaths),
rest.Get(s.prefix+"version", Version),
rest.Post(s.prefix+"authenticate", jwt.LoginHandler),
rest.Delete(s.prefix+"authenticate", s.Logout),
rest.Get(s.prefix+"check_token", s.CheckToken),
rest.Get(s.prefix+"refresh_token", jwt.RefreshHandler),
rest.Get(s.prefix+"accounts", s.GetAllAccounts),
rest.Post(s.prefix+"accounts", s.PostAccount),
rest.Post(s.prefix+"register", s.RegisterAccount),
rest.Put(s.prefix+"register/verify", s.VerifyAccount),
rest.Get(s.prefix+"register/approve", s.ApproveAccount),
rest.Get(s.prefix+"register/deny", s.DenyAccount),
rest.Put(s.prefix+"accounts/:userId", s.PutAccount),
rest.Get(s.prefix+"accounts/:userId", s.GetAccount),
rest.Post(s.prefix+"reset/:userId", s.ResetPassword),
rest.Post(s.prefix+"reset", s.ResetPassword),
rest.Delete(s.prefix+"accounts/:userId", s.DeleteAccount),
rest.Get(s.prefix+"services", s.GetAllServices),
rest.Post(s.prefix+"services", s.PostService),
rest.Put(s.prefix+"services/:key", s.PutService),
rest.Get(s.prefix+"services/:key", s.GetService),
rest.Delete(s.prefix+"services/:key", s.DeleteService),
rest.Get(s.prefix+"configs", s.GetConfigs),
rest.Get(s.prefix+"stacks", s.GetAllStacks),
rest.Post(s.prefix+"stacks", s.PostStack),
rest.Put(s.prefix+"stacks/:sid", s.PutStack),
rest.Get(s.prefix+"stacks/:sid", s.GetStack),
rest.Delete(s.prefix+"stacks/:sid", s.DeleteStack),
rest.Get(s.prefix+"start", s.QuickstartStack),
rest.Get(s.prefix+"start/:sid", s.StartStack),
rest.Get(s.prefix+"stop/:sid", s.StopStack),
rest.Get(s.prefix+"logs/:ssid", s.GetLogs),
rest.Get(s.prefix+"console", s.GetConsole),
rest.Get(s.prefix+"check_console", s.CheckConsole),
rest.Get(s.prefix+"vocabulary/:name", s.GetVocabulary),
rest.Put(s.prefix+"stacks/:sid/rename", s.RenameStack),
rest.Put(s.prefix+"change_password", s.ChangePassword),
rest.Post(s.prefix+"support", s.PostSupport),
rest.Get(s.prefix+"contact", s.GetContact),
rest.Get(s.prefix+"healthz", s.GetHealthz),
rest.Post(s.prefix+"import/:userId", s.ImportAccount),
rest.Get(s.prefix+"export/:userId", s.ExportAccount),
rest.Get(s.prefix+"stop_all", s.StopAllStacks),
rest.Put(s.prefix+"log_level/:level", s.PutLogLevel),
rest.Get(s.prefix+"download", s.DownloadClient),
)
router, err := rest.MakeRouter(routes...)
if err != nil {
glog.Fatal(err)
}
api.SetApp(router)
if len(cfg.Specs.Path) > 0 {
glog.Infof("Loading service specs from %s\n", cfg.Specs.Path)
err = s.loadSpecs(cfg.Specs.Path)
if err != nil {
glog.Warningf("Error loading specs: %s\n", err)
}
s.addVocabulary(cfg.Specs.Path + "/vocab/tags.json")
s.Validator = validate.NewValidator(cfg.Specs.Path + "/schemas/spec-schema.json")
}
s.createAdminUser(adminPasswd)
go s.initExistingAccounts()
go s.kube.WatchEvents(s)
go s.shutdownInactiveServices()
// primary rest api server
httpsrv := &http.Server{
Addr: ":" + cfg.Port,
Handler: api.MakeHandler(),
}
glog.Infof("Listening on %s", cfg.Port)
// internal admin server, currently only handling oauth registration
adminsrv := &http.Server{
Addr: ":" + cfg.AdminPort,
Handler: http.HandlerFunc(s.RegisterUserOauth),
}
glog.Infof("Admin server listening on %s", cfg.AdminPort)
stop := make(chan os.Signal, 2)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
go func() {
httpsrv.ListenAndServe()
}()
go func() {
adminsrv.ListenAndServe()
}()
<-stop
// Handle shutdown
fmt.Println("Shutting down apiserver")
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
httpsrv.Shutdown(ctx)
fmt.Println("Apiserver stopped")
}
func (s *Server) CheckConsole(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
ssid := r.Request.FormValue("ssid")
if !s.kube.NamespaceExists(userId) || !s.stackServiceExists(userId, ssid) {
rest.NotFound(w, r)
return
} else {
w.WriteHeader(http.StatusOK)
return
}
}
func (s *Server) GetConsole(w rest.ResponseWriter, r *rest.Request) {
userId := r.Request.FormValue("namespace")
ssid := r.Request.FormValue("ssid")
if !s.kube.NamespaceExists(userId) || !s.stackServiceExists(userId, ssid) {
rest.NotFound(w, r)
return
}
pods, _ := s.kube.GetPods(userId, "name", ssid)
pod := pods.Items[0].Name
container := pods.Items[0].Spec.Containers[0].Name
glog.V(4).Infof("exec called for %s %s %s\n", userId, ssid, pod)
s.kube.Exec(userId, pod, container, s.kube).ServeHTTP(w.(http.ResponseWriter), r.Request)
}
func (s *Server) initExistingAccounts() {
accounts, err := s.etcd.GetAccounts()
if err != nil {
glog.Error(err)
return
}
for _, account := range *accounts {
if !s.kube.NamespaceExists(account.Namespace) && account.Status == api.AccountStatusApproved {
s.kube.CreateNamespace(account.Namespace)
// Create a PVC for this user's data
storageClass := s.Config.Kubernetes.StorageClass
claimName := account.Namespace + s.Config.HomePvcSuffix
s.kube.CreatePersistentVolumeClaim(account.Namespace, claimName, storageClass)
if account.ResourceLimits.CPUMax > 0 &&
account.ResourceLimits.MemoryMax > 0 {
s.kube.CreateResourceQuota(account.Namespace,
account.ResourceLimits.CPUMax,
account.ResourceLimits.MemoryMax)
s.kube.CreateLimitRange(account.Namespace,
account.ResourceLimits.CPUDefault,
account.ResourceLimits.MemoryDefault)
}
}
stacks, err := s.etcd.GetStacks(account.Namespace)
if err != nil {
glog.Error(err)
}
for _, stack := range *stacks {
if stack.Status == "starting" || stack.Status == "started" {
_, err = s.startStack(account.Namespace, &stack)
if err != nil {
glog.Errorf("Error starting stack %s %s\n", account.Namespace, stack.Id)
glog.Error(err)
}
} else if stack.Status == "stopping" {
_, err = s.stopStack(account.Namespace, stack.Id)
if err != nil {
glog.Errorf("Error stopping stack %s %s\n", account.Namespace, stack.Id)
glog.Error(err)
}
}
}
}
}
func (s *Server) GetPaths(w rest.ResponseWriter, r *rest.Request) {
paths := []string{}
paths = append(paths, s.prefix+"accounts")
paths = append(paths, s.prefix+"authenticate")
paths = append(paths, s.prefix+"change_password")
paths = append(paths, s.prefix+"configs")
paths = append(paths, s.prefix+"console")
paths = append(paths, s.prefix+"contact")
paths = append(paths, s.prefix+"healthz")
paths = append(paths, s.prefix+"log_level")
paths = append(paths, s.prefix+"logs")
paths = append(paths, s.prefix+"mount")
paths = append(paths, s.prefix+"register")
paths = append(paths, s.prefix+"reset")
paths = append(paths, s.prefix+"services")
paths = append(paths, s.prefix+"stacks")
paths = append(paths, s.prefix+"start")
paths = append(paths, s.prefix+"stop")
paths = append(paths, s.prefix+"support")
paths = append(paths, s.prefix+"version")
paths = append(paths, s.prefix+"vocabulary")
w.WriteJson(&paths)
}
func Version(w rest.ResponseWriter, r *rest.Request) {
w.WriteJson(fmt.Sprintf("%s %s", version.VERSION, version.BUILD_DATE))
}
func (s *Server) CheckToken(w rest.ResponseWriter, r *rest.Request) {
// Basic token validation is handled by jwt middleware
userId := s.getUser(r)
host := r.Request.FormValue("host")
// Log last activity for user
account, err := s.etcd.GetAccount(userId)
if err == nil {
account.LastLogin = time.Now().Unix()
s.etcd.PutAccount(account.Namespace, account, false)
}
// If host specified, see if it belongs to this namespace
if len(host) > 0 {
ok, err := (s.checkIngress(userId, host))
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
} else {
if ok || s.IsAdmin(r) {
w.WriteHeader(http.StatusOK)
} else {
w.WriteHeader(http.StatusForbidden)
}
}
} else {
w.WriteHeader(http.StatusOK)
}
}
func (s *Server) Logout(w rest.ResponseWriter, r *rest.Request) {
w.WriteHeader(http.StatusOK)
}
func (s *Server) GetAllAccounts(w rest.ResponseWriter, r *rest.Request) {
if !s.IsAdmin(r) {
rest.Error(w, "", http.StatusForbidden)
return
}
accounts, err := s.etcd.GetAccounts()
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
w.WriteJson(&err)
} else {
w.WriteJson(&accounts)
}
}
func (s *Server) getUser(r *rest.Request) string {
if r.Env["JWT_PAYLOAD"] != nil {
payload := r.Env["JWT_PAYLOAD"].(map[string]interface{})
if payload[adminUser] == true {
return ""
} else {
return payload["user"].(string)
}
}
return ""
}
func (s *Server) IsAdmin(r *rest.Request) bool {
payload := r.Env["JWT_PAYLOAD"].(map[string]interface{})
if payload[adminUser] == true {
return true
} else {
return false
}
}
func (s *Server) GetAccount(w rest.ResponseWriter, r *rest.Request) {
userId := r.PathParam("userId")
if strings.Contains(userId, "@") {
account := s.getAccountByEmail(userId)
if account != nil {
userId = account.Namespace
}
}
// Check IsAdmin or userId = current user
if !(s.IsAdmin(r) || s.getUser(r) == userId) {
rest.Error(w, "", http.StatusForbidden)
return
}
glog.V(4).Infof("Getting account %s\n", userId)
account, err := s.etcd.GetAccount(userId)
if err != nil {
rest.NotFound(w, r)
return
} else {
glog.V(4).Infof("Getting quotas for %s\n", userId)
quota, err := s.kube.GetResourceQuota(userId)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
} else {
usedMemory := quota.Items[0].Status.Used[v1.ResourceMemory]
hardMemory := quota.Items[0].Status.Hard[v1.ResourceMemory]
usedCPU := quota.Items[0].Status.Used[v1.ResourceCPU]
hardCPU := quota.Items[0].Status.Hard[v1.ResourceCPU]
glog.V(4).Infof("Usage: %d %d \n", usedMemory.Value(), hardMemory.Value())
account.ResourceUsage = api.ResourceUsage{
CPU: usedCPU.String(),
Memory: usedMemory.String(),
CPUPct: fmt.Sprintf("%f",
float64(usedCPU.Value())/float64(hardCPU.Value())),
MemoryPct: fmt.Sprintf("%f",
float64(usedMemory.Value())/float64(hardMemory.Value())),
}
}
account.Password = ""
if !(s.IsAdmin(r)) {
account.Token = ""
}
w.WriteJson(account)
}
}
func (s *Server) PostAccount(w rest.ResponseWriter, r *rest.Request) {
if !s.IsAdmin(r) {
rest.Error(w, "", http.StatusForbidden)
return
}
account := api.Account{}
err := r.DecodeJsonPayload(&account)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if s.accountExists(account.Namespace) {
w.WriteHeader(http.StatusConflict)
return
}
err = s.etcd.PutAccount(account.Namespace, &account, true)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = s.setupAccount(&account)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteJson(&account)
}
func (s *Server) updateIngress(uid string) error {
ingresses, err := s.kube.GetIngresses(uid)
if err != nil {
glog.Error(err)
return err
}
if ingresses != nil {
for _, ingress := range ingresses.Items {
glog.V(4).Infof("Touching ingress %s\n", ingress.Name)
_, err = s.kube.CreateUpdateIngress(uid, &ingress, true)
if err != nil {
glog.Error(err)
return err
}
}
}
return nil
}
func (s *Server) createLMABasicAuthSecret() error {
if s.kube.NamespaceExists(systemNamespace) {
account, err := s.etcd.GetAccount(adminUser)
if err != nil {
glog.Error(err)
return err
}
_, err = s.kube.CreateBasicAuthSecret(systemNamespace, adminUser, "", account.Password)
if err != nil {
glog.Error(err)
return err
}
}
err := s.updateIngress(systemNamespace)
if err != nil {
glog.Error(err)
return err
}
return nil
}
func (s *Server) setupAccount(account *api.Account) error {
_, err := s.kube.CreateNamespace(account.Namespace)
if err != nil {
return err
}
// Create a PVC for this user's data
storageClass := s.Config.Kubernetes.StorageClass
claimName := account.Namespace + s.Config.HomePvcSuffix
s.kube.CreatePersistentVolumeClaim(account.Namespace, claimName, storageClass)
if account.ResourceLimits == (api.AccountResourceLimits{}) {
glog.Warningf("No resource limits specified for account %s, using defaults\n", account.Name)
account.ResourceLimits = api.AccountResourceLimits{
CPUMax: s.Config.DefaultLimits.CpuMax,
CPUDefault: s.Config.DefaultLimits.CpuDefault,
MemoryMax: s.Config.DefaultLimits.MemMax,
MemoryDefault: s.Config.DefaultLimits.MemDefault,
StorageQuota: s.Config.DefaultLimits.StorageDefault,
}
}
_, err = s.kube.CreateResourceQuota(account.Namespace,
account.ResourceLimits.CPUMax,
account.ResourceLimits.MemoryMax)
if err != nil {
return err
}
_, err = s.kube.CreateLimitRange(account.Namespace,
account.ResourceLimits.CPUDefault,
account.ResourceLimits.MemoryDefault)
if err != nil {
return err
}
return nil
}
// Registers an account for this user and sends a verification email message
func (s *Server) RegisterAccount(w rest.ResponseWriter, r *rest.Request) {
account := api.Account{}
err := r.DecodeJsonPayload(&account)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if s.accountExists(account.Namespace) {
rest.Error(w, "Username is in use", http.StatusConflict)
return
}
if s.emailExists(account.EmailAddress) {
rest.Error(w, "Email address is already associated with another account", http.StatusConflict)
return
}
// Set the account status to unverified
account.Status = api.AccountStatusUnverified
// Put account generates the registration token
err = s.etcd.PutAccount(account.Namespace, &account, true)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
verifyUrl := s.origin + "/landing/?t=" + account.Token + "&u=" + account.Namespace
err = s.email.SendVerificationEmail(account.Name, account.EmailAddress, verifyUrl)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteJson(&account)
}
func (s *Server) VerifyAccount(w rest.ResponseWriter, r *rest.Request) {
data := make(map[string]string)
err := r.DecodeJsonPayload(&data)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
userId := data["u"]
token := data["t"]
account, err := s.etcd.GetAccount(userId)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if account.InactiveTimeout == 0 {
account.InactiveTimeout = s.Config.DefaultLimits.InactiveTimeout
}
glog.Infof("Inactive timeout for %s set to %v\n", account.Namespace, account.InactiveTimeout)
if s.requireApproval {
if account.Status == api.AccountStatusUnverified &&
account.Token == token {
account.Status = api.AccountStatusUnapproved
err = s.etcd.PutAccount(userId, account, false)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = s.email.SendVerifiedEmail(account.Name, account.EmailAddress)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
approveUrl := s.origin + "/api/register/approve?t=" + account.Token + "&u=" + account.Namespace
denyUrl := s.origin + "/api/register/deny?t=" + account.Token + "&u=" + account.Namespace
err = s.email.SendNewAccountEmail(account, approveUrl, denyUrl)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
} else {
w.WriteHeader(http.StatusNotFound)
}
} else {
account.Status = api.AccountStatusApproved
err := s.etcd.PutAccount(userId, account, false)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = s.setupAccount(account)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = s.email.SendStatusEmail(account.Name, account.Namespace, account.EmailAddress, s.origin, account.NextURL, true)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
func (s *Server) ApproveAccount(w rest.ResponseWriter, r *rest.Request) {
userId := r.Request.FormValue("u")
token := r.Request.FormValue("t")
account, err := s.etcd.GetAccount(userId)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if account.Status == api.AccountStatusUnapproved &&
account.Token == token {
account.Status = api.AccountStatusApproved
err = s.etcd.PutAccount(userId, account, false)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = s.setupAccount(account)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = s.email.SendStatusEmail(account.Name, account.Namespace, account.EmailAddress, s.origin, account.NextURL, true)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
w.WriteJson(map[string]string{"message": "Account has been approved."})
} else {
rest.Error(w, "Token not found", http.StatusNotFound)
}
}
func (s *Server) DenyAccount(w rest.ResponseWriter, r *rest.Request) {
userId := r.Request.FormValue("u")
token := r.Request.FormValue("t")
account, err := s.etcd.GetAccount(userId)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if account.Status == api.AccountStatusUnapproved &&
account.Token == token {
account.Status = api.AccountStatusDenied
err = s.etcd.PutAccount(userId, account, false)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = s.email.SendStatusEmail(account.Name, account.Namespace, account.EmailAddress, "", "", false)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
w.WriteJson(map[string]string{"message": "Account has been denied."})
} else {
rest.Error(w, "Token not found", http.StatusNotFound)
}
}
func (s *Server) PutAccount(w rest.ResponseWriter, r *rest.Request) {
userId := r.PathParam("userId")
// Check IsAdmin or userId = current user
if !(s.IsAdmin(r) || s.getUser(r) == userId) {
rest.Error(w, "", http.StatusForbidden)
return
}
account := api.Account{}
err := r.DecodeJsonPayload(&account)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = s.etcd.PutAccount(userId, &account, true)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteJson(&account)
}
func (s *Server) DeleteAccount(w rest.ResponseWriter, r *rest.Request) {
userId := r.PathParam("userId")
glog.V(4).Infof("DeleteAccount %s", userId)
// Check IsAdmin or userId = current user
if !(s.IsAdmin(r) || s.getUser(r) == userId) {
rest.Error(w, "", http.StatusForbidden)
return
}
if userId == "admin" {
rest.Error(w, "", http.StatusForbidden)
return
}
if !s.accountExists(userId) {
rest.NotFound(w, r)
return
}
if s.kube.NamespaceExists(userId) {
claimName := userId + s.Config.HomePvcSuffix
err := s.kube.DeletePersistentVolumeClaim(userId, claimName)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = s.kube.DeleteNamespace(userId)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
err := s.etcd.DeleteAccount(userId)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
func (s *Server) GetAllServices(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
catalog := r.Request.FormValue("catalog")
if catalog == "system" {
services, err := s.etcd.GetGlobalServices()
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteJson(&services)
} else if catalog == "all" {
services, err := s.etcd.GetAllServices(userId)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteJson(&services)
} else {
services, err := s.etcd.GetServices(userId)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteJson(&services)
}
}
func (s *Server) GetService(w rest.ResponseWriter, r *rest.Request) {
key := r.PathParam("key")
catalog := r.Request.FormValue("catalog")
userId := s.getUser(r)
glog.V(4).Infof("GetService %s\n", key)
if catalog == "system" {
if !s.serviceExists(userId, key) {
rest.NotFound(w, r)
return
}
spec, err := s.etcd.GetServiceSpec(userId, key)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
} else {
w.WriteJson(&spec)
}
} else {
if !s.serviceExists(userId, key) {
rest.NotFound(w, r)
return
}
spec, err := s.etcd.GetServiceSpec(userId, key)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
} else {
w.WriteJson(&spec)
}
}
}
func (s *Server) PostService(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
catalog := r.Request.FormValue("catalog")
service := api.ServiceSpec{}
err := r.DecodeJsonPayload(&service)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
ok, err := s.Validator.ValidateSpec(&service)
if !ok {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
if s.serviceExists(userId, service.Key) {
rest.Error(w, "Service exists with key", http.StatusConflict)
return
}
dep, ok := s.checkDependencies(userId, &service)
if !ok {
glog.Warningf("Cannot add service, dependency %s missing\n", dep)
rest.Error(w, fmt.Sprintf("Missing dependency %s", dep), http.StatusNotFound)
return
}
cf, ok := s.checkConfigs(userId, &service)
if !ok {
glog.Warningf("Cannot add service, config dependency %s missing\n", cf)
rest.Error(w, fmt.Sprintf("Missing config dependency %s", cf), http.StatusNotFound)
return
}
if catalog == "system" {
if !s.IsAdmin(r) {
rest.Error(w, "", http.StatusForbidden)
return
}
err = s.etcd.PutGlobalService(service.Key, &service)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
glog.V(1).Infof("Added system service %s\n", service.Key)
} else {
// Don't allow privileged services in user catalogs
service.SecurityContext = v1.SecurityContext{}
// Always require auth on user catalog services
service.AuthRequired = true
err = s.etcd.PutService(userId, service.Key, &service)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
glog.V(1).Infof("Added user %s service %s\n", userId, service.Key)
}
w.WriteHeader(http.StatusOK)
}
func (s *Server) PutService(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
key := r.PathParam("key")
catalog := r.Request.FormValue("catalog")
service := api.ServiceSpec{}
err := r.DecodeJsonPayload(&service)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
ok, err := s.Validator.ValidateSpec(&service)
if !ok {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
dep, ok := s.checkDependencies(userId, &service)
if !ok {
glog.Warningf("Cannot add service, dependency %s missing\n", dep)
rest.Error(w, fmt.Sprintf("Missing dependency %s", dep), http.StatusNotFound)
return
}
cf, ok := s.checkConfigs(userId, &service)
if !ok {
glog.Warningf("Cannot add service, config dependency %s missing\n", cf)
rest.Error(w, fmt.Sprintf("Missing config dependency %s", cf), http.StatusNotFound)
return
}
if catalog == "system" {
if !s.IsAdmin(r) {
rest.Error(w, "", http.StatusForbidden)
return
}
if s.serviceInUse(key) > 0 {
glog.Warningf("Cannot update service spec %s because it is in use by one or more accounts\n", key)
rest.Error(w, "Service is in use", http.StatusConflict)
return
}
err = s.etcd.PutGlobalService(key, &service)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
glog.V(1).Infof("Updated system service %s\n", key)
} else {
if s.serviceInUse(key) > 0 {
glog.Warningf("Cannot update service spec %s because it is in use by one or more accounts\n", key)
rest.Error(w, "Service is in use", http.StatusConflict)
return
}
// Don't allow privileged services in user catalogs
service.SecurityContext = v1.SecurityContext{}
// Always require auth on user catalog services
service.AuthRequired = true
err = s.etcd.PutService(userId, key, &service)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
glog.V(1).Infof("Updated user %s service %s\n", userId, key)
}
w.WriteJson(&service)
}
func (s *Server) DeleteService(w rest.ResponseWriter, r *rest.Request) {
key := r.PathParam("key")
catalog := r.Request.FormValue("catalog")
userId := s.getUser(r)
glog.V(4).Infof("DeleteService %s %s %s\n", key, catalog, userId)
if catalog == "system" {
if !s.IsAdmin(r) {
rest.Error(w, "", http.StatusForbidden)
return
}
if !s.serviceExists(userId, key) {
rest.Error(w, "No such service", http.StatusNotFound)
return
}
if s.serviceIsDependencyGlobal(key) > 0 {
glog.Warningf("Cannot delete system service spec %s because it is required by one or more services\n", key)
rest.Error(w, "Required by another service", http.StatusConflict)
return
}
if s.serviceInUse(key) > 0 {
glog.Warningf("Cannot delete system service spec %s because it is in use by one or more accounts\n", key)
rest.Error(w, "Service is in use", http.StatusConflict)
return
}
err := s.etcd.DeleteGlobalService(key)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
glog.V(1).Infof("Deleted system service %s\n", key)
} else {
service, _ := s.etcd.GetServiceSpec(userId, key)
if service == nil || service.Catalog != "user" {
rest.Error(w, "No such service", http.StatusNotFound)
return
}
if s.serviceIsDependency(userId, key) > 0 {
glog.Warningf("Cannot delete user service spec %s because it is required by one or more services\n", key)
rest.Error(w, "Required by another service", http.StatusConflict)
return
}
if s.serviceInUse(key) > 0 {
glog.Warningf("Cannot delete user service spec %s because it is in use by one or more accounts\n", key)
rest.Error(w, "Service is in use", http.StatusConflict)
return
}
err := s.etcd.DeleteService(userId, key)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
w.WriteHeader(http.StatusOK)
}
func (s *Server) serviceInUse(sid string) int {
inUse := 0
accounts, err := s.etcd.GetAccounts()
if err != nil {
glog.Errorf("Error getting accounts\n")
}
if accounts != nil {
for _, account := range *accounts {
stacks, _ := s.etcd.GetStacks(account.Namespace)
for _, stack := range *stacks {
for _, service := range stack.Services {
if service.Service == sid {
inUse++
}
}
}
}
}
return inUse
}
func (s *Server) GetAllStacks(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
stacks, err := s.getStacks(userId)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
w.WriteJson(&err)
} else {
w.WriteJson(&stacks)
}
}
func (s *Server) getStacks(userId string) (*[]api.Stack, error) {
stacks := []api.Stack{}
stks, err := s.etcd.GetStacks(userId)
if err == nil {
for _, stack := range *stks {
stack, _ := s.getStackWithStatus(userId, stack.Id)
stacks = append(stacks, *stack)
}
}
return &stacks, nil
}
func (s *Server) getStackByServiceId(userId string, sid string) (*api.Stack, error) {
var stack *api.Stack = nil
stks, err := s.etcd.GetStacks(userId)
if err == nil {
for _, stk := range *stks {
if stk.Key == sid {
stack = &stk
break
}
}
}
return stack, nil
}
func (s *Server) accountExists(userId string) bool {
accounts, _ := s.etcd.GetAccounts()
if accounts == nil {
return false
}
exists := false
for _, account := range *accounts {
if account.Namespace == userId {
exists = true
break
}
}
return exists
}
func (s *Server) emailExists(email string) bool {
accounts, _ := s.etcd.GetAccounts()
if accounts == nil {
return false
}
exists := false
for _, account := range *accounts {
if account.EmailAddress == email {
exists = true
break
}
}
return exists
}
func (s *Server) stackServiceExists(userId string, id string) bool {
stacks, _ := s.getStacks(userId)
if stacks == nil {
return false
}
exists := false
for _, stack := range *stacks {
for _, stackService := range stack.Services {
if stackService.Id == id {
exists = true
break
}
}
}
return exists
}
func (s *Server) serviceIsDependencyGlobal(sid string) int {
services, _ := s.etcd.GetGlobalServices()
dependencies := 0
for _, service := range *services {
for _, dependency := range service.Dependencies {
if dependency.DependencyKey == sid {
dependencies++
}
}
}
return dependencies
}
func (s *Server) serviceIsDependency(sid string, userId string) int {
services, _ := s.etcd.GetServices(userId)
dependencies := 0
for _, service := range *services {
for _, dependency := range service.Dependencies {
if dependency.DependencyKey == sid {
dependencies++
}
}
}
return dependencies
}
func (s *Server) serviceExists(uid string, sid string) bool {
service, _ := s.etcd.GetServiceSpec(uid, sid)
if service != nil {
return true
} else {
return false
}
}
func (s *Server) GetStack(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
sid := r.PathParam("sid")
stack, err := s.getStackWithStatus(userId, sid)
if stack == nil {
rest.NotFound(w, r)
return
}
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
} else {
w.WriteJson(&stack)
}
}
func (s *Server) PostStack(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
stack := &api.Stack{}
err := r.DecodeJsonPayload(stack)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
stack, err = s.addStack(userId, stack)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteJson(&stack)
}
func (s *Server) addStack(userId string, stack *api.Stack) (*api.Stack, error) {
glog.V(4).Infof("Adding stack %s %s\n", stack.Key, stack.Name)
_, err := s.etcd.GetServiceSpec(userId, stack.Key)
if err != nil {
glog.V(4).Infof("Service %s not found for user %s\n", stack.Key, userId)
return nil, err
}
sid := s.kube.GenerateName(5)
stack.Id = sid
stack.Status = stackStatus[Stopped]
for i := range stack.Services {
stackService := &stack.Services[i]
stackService.Id = fmt.Sprintf("%s-%s", sid, stackService.Service)
spec, _ := s.etcd.GetServiceSpec(userId, stackService.Service)
if spec != nil {
for _, mount := range spec.VolumeMounts {
if mount.Type == api.MountTypeDocker {
continue
}
glog.V(4).Infof("Looking for mount %s\n", mount.MountPath)
found := false
for _, toPath := range stackService.VolumeMounts {
if toPath == mount.MountPath && len(toPath) > 0 {
found = true
}
}
if !found {
glog.V(4).Infof("Didn't find mount %s, creating temporary folder\n", mount.MountPath)
// Create a new temporary folder
if stackService.VolumeMounts == nil {
stackService.VolumeMounts = map[string]string{}
}
volPath := s.getVolPath(&mount, stackService.Id)
stackService.VolumeMounts[volPath] = mount.MountPath
}
}
// Start the Kubernetes service and ingress
if len(spec.Ports) > 0 {
_, err := s.createKubernetesService(userId, stack, spec)
if err != nil {
glog.V(4).Infof("Failed to start service service %s-%s\n", stack.Id, spec.Key)
continue
}
}
}
}
err = s.etcd.PutStack(userId, stack.Id, stack)
if err != nil {
glog.Error(err)
return nil, err
}
return stack, nil
}
// Create the Kubernetes service and ingress rules
func (s *Server) createKubernetesService(userId string, stack *api.Stack, spec *api.ServiceSpec) (*v1.Service, error) {
name := fmt.Sprintf("%s-%s", stack.Id, spec.Key)
glog.V(4).Infof("createKubernetesService %s %s\n", userId, name)
template := s.kube.CreateServiceTemplate(name, stack.Id, spec, s.useNodePort())
svc, _ := s.kube.GetService(userId, name)
if svc.Name == "" {
glog.V(4).Infof("Starting Kubernetes service %s\n", name)
svc, err := s.kube.StartService(userId, template)
if err != nil {
glog.Errorf("Error starting service %s\n", name)
glog.Error(err)
return nil, err
}
if s.useLoadBalancer() && spec.Access == api.AccessExternal {
s.createIngressRule(userId, svc, stack)
}
}
return svc, nil
}
func (s *Server) createIngressRule(userId string, svc *v1.Service, stack *api.Stack) error {
clusterIssuer := s.Config.Certmgr.ClusterIssuer
issuer := s.Config.Certmgr.Issuer
delErr := s.kube.DeleteIngress(userId, svc.Name+"-ingress")
if delErr != nil {
glog.Warning(delErr)
}
_, err := s.kube.CreateIngress(userId, s.domain, svc.Name,
svc.Spec.Ports, stack.Secure, clusterIssuer, issuer)
if err != nil {
glog.Errorf("Error creating ingress for %s\n", svc.Name)
glog.Error(err)
return err
}
glog.V(4).Infof("Started ingress for service %s (secure=%t)\n", svc.Name, stack.Secure)
if clusterIssuer != "" {
glog.Infof("Using TLS clsuter issuer: %s\n", clusterIssuer)
} else if issuer != "" {
glog.Infof("Using TLS issuer: %s\n", issuer)
}
return nil
}
func (s *Server) PutStack(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
sid := r.PathParam("sid")
newStack := api.Stack{}
err := r.DecodeJsonPayload(&newStack)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
oldStack, err := s.etcd.GetStack(userId, sid)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// If the user deleted an optional service, need to stop the
// associated Kubernetes service and ingress rule
for i := range oldStack.Services {
stackService := &oldStack.Services[i]
newStackService := newStack.GetStackService(stackService.Id)
if newStackService == nil {
// User deleted a service
name := fmt.Sprintf("%s-%s", sid, stackService.Service)
glog.V(4).Infof("Stopping service %s\n", name)
spec, _ := s.etcd.GetServiceSpec(userId, stackService.Service)
if len(spec.Ports) > 0 {
err := s.kube.StopService(userId, name)
if err != nil {
glog.Error(err)
}
}
err := s.kube.DeleteIngress(userId, stackService.Id+"-ingress")
if err != nil {
glog.Error(err)
}
}
}
// If the user added an optional service, need to create the
// associated Kubernetes service
for i := range newStack.Services {
stackService := &newStack.Services[i]
oldStackService := oldStack.GetStackService(stackService.Id)
if oldStackService == nil {
// User added a new service
stackService.Id = fmt.Sprintf("%s-%s", sid, stackService.Service)
spec, _ := s.etcd.GetServiceSpec(userId, stackService.Service)
if spec != nil && len(spec.Ports) > 0 {
_, err := s.createKubernetesService(userId, &newStack, spec)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
if oldStack.Secure != newStack.Secure {
// Need to delete and recreate the ingress rule
spec, _ := s.etcd.GetServiceSpec(userId, stackService.Service)
name := fmt.Sprintf("%s-%s", newStack.Id, spec.Key)
svc, _ := s.kube.GetService(userId, name)
if s.useLoadBalancer() && spec.Access == api.AccessExternal {
err := s.createIngressRule(userId, svc, &newStack)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
// User may have changed volume mounts
spec, _ := s.etcd.GetServiceSpec(userId, stackService.Service)
if spec != nil {
for _, mount := range spec.VolumeMounts {
found := 0
for fromPath, toPath := range stackService.VolumeMounts {
if toPath == mount.MountPath && len(toPath) > 0 {
found++
}
if len(fromPath) == 0 {
volPath := s.getVolPath(&mount, stackService.Id)
stackService.VolumeMounts[volPath] = mount.MountPath
}
}
if found > 1 {
glog.Error(fmt.Sprintf("Two volume mounts cannot refer to the same to path\n"))
w.WriteHeader(http.StatusConflict)
return
}
if found == 0 {
// Create a new temporary folder
volPath := s.getVolPath(&mount, stackService.Id)
stackService.VolumeMounts[volPath] = mount.MountPath
}
}
}
}
newStack.Status = stackStatus[Stopped]
err = s.etcd.PutStack(userId, sid, &newStack)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteJson(&newStack)
}
func (s *Server) RenameStack(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
sid := r.PathParam("sid")
data := make(map[string]string)
err := r.DecodeJsonPayload(&data)
stack, err := s.etcd.GetStack(userId, sid)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
stack.Name = data["name"]
err = s.etcd.PutStack(userId, sid, stack)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteJson(&stack)
}
func (s *Server) DeleteStack(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
sid := r.PathParam("sid")
stack, err := s.etcd.GetStack(userId, sid)
if stack == nil {
rest.NotFound(w, r)
return
}
if stack.Status == stackStatus[Started] ||
stack.Status == stackStatus[Starting] {
// Can't delete a running stack
w.WriteHeader(http.StatusConflict)
return
}
// Delete the running kubernetes service and ingress rule
for i := range stack.Services {
stackService := &stack.Services[i]
stackService.Id = fmt.Sprintf("%s-%s", sid, stackService.Service)
name := fmt.Sprintf("%s-%s", stack.Id, stackService.Service)
glog.V(4).Infof("Stopping service %s\n", name)
for j := range stackService.VolumeMounts {
volName := j
volPath := stackService.VolumeMounts[j]
glog.V(4).Infof("Volume name: %s -> %s", volName, volPath)
}
spec, _ := s.etcd.GetServiceSpec(userId, stackService.Service)
if len(spec.Ports) > 0 {
err := s.kube.StopService(userId, name)
// Log and continue
if err != nil {
glog.Error(err)
}
}
if s.useLoadBalancer() {
s.kube.DeleteIngress(userId, stackService.Id+"-ingress")
glog.V(4).Infof("Deleted ingress for service %s\n", stackService.Id)
}
}
err = s.etcd.DeleteStack(userId, sid)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
func (s *Server) startStackService(serviceKey string, userId string, stack *api.Stack, addrPortMap *map[string]kube.ServiceAddrPort) {
service, _ := s.etcd.GetServiceSpec(userId, serviceKey)
for _, dep := range service.Dependencies {
if dep.Required {
glog.V(4).Infof("Starting required dependency %s\n", dep.DependencyKey)
s.startController(userId, dep.DependencyKey, stack, addrPortMap)
} else {
s.startStackService(dep.DependencyKey, userId, stack, addrPortMap)
}
}
}
func (s *Server) startController(userId string, serviceKey string, stack *api.Stack, addrPortMap *map[string]kube.ServiceAddrPort) (bool, error) {
var stackService *api.StackService
found := false
for i := range stack.Services {
ss := &stack.Services[i]
if ss.Service == serviceKey {
stackService = ss
found = true
}
}
if !found {
return false, nil
}
pods, _ := s.kube.GetPods(userId, "name", fmt.Sprintf("%s-%s", stack.Id, serviceKey))
running := false
for _, pod := range pods.Items {
if pod.Status.Phase == "Running" {
running = true
}
}
if running {
glog.V(4).Infof("Controller %s already running\n", serviceKey)
return true, nil
}
glog.V(4).Infof("Starting controller for %s\n", serviceKey)
spec, _ := s.etcd.GetServiceSpec(userId, serviceKey)
// If useFrom is set on *this* spec,
for _, config := range spec.Config {
if len(config.UseFrom) > 0 {
for i := range stack.Services {
ss := &stack.Services[i]
useFrom := strings.Split(config.UseFrom, ".")
if useFrom[0] == ss.Service {
glog.V(4).Infof("Setting %s %s to %s %s\n", stackService.Id, config.Name, ss.Id, ss.Config[config.Name])
stackService.Config[config.Name] = ss.Config[useFrom[1]]
}
}
}
}
// Enumerate other services in this stack to see if "setTo" is set
// on any configs for this service.
if stackService.Config == nil {
stackService.Config = map[string]string{}
}
for _, ss := range stack.Services {
ssSpec, _ := s.etcd.GetServiceSpec(userId, ss.Service)
for _, config := range ssSpec.Config {
if len(config.SetTo) > 0 {
setTo := strings.Split(config.SetTo, ".")
// Is the setTo key this service?
if setTo[0] == serviceKey {
glog.V(4).Infof("Setting %s.%s to %s.%s value\n", ss.Id, config.Name, setTo[1], ss.Config[setTo[1]])
stackService.Config[setTo[1]] = ss.Config[config.Name]
}
}
}
}
name := fmt.Sprintf("%s-%s", stack.Id, spec.Key)
k8vols := make([]v1.Volume, 0)
extraVols := make([]config.Volume, 0)
// Mount the home directory
k8homeVol := v1.Volume{}
k8homeVol.Name = "home"
k8homeVol.PersistentVolumeClaim = &v1.PersistentVolumeClaimVolumeSource{
ClaimName: userId + s.Config.HomePvcSuffix,
}
k8vols = append(k8vols, k8homeVol)
for _, volume := range s.Config.Volumes {
// TODO: should "shared" volumes continue to use hostPath?
extraVols = append(extraVols, volume)
k8vol := v1.Volume{}
k8vol.Name = volume.Name
k8vol.HostPath = &v1.HostPathVolumeSource{
Path: volume.Path,
}
k8vols = append(k8vols, k8vol)
}
// Create the controller template
account, _ := s.etcd.GetAccount(userId)
cfg := s.Config
nodeSelectorName := cfg.Kubernetes.NodeSelectorName
nodeSelectorValue := cfg.Kubernetes.NodeSelectorValue
template := s.kube.CreateControllerTemplate(userId, name, stack.Id, s.domain, account.EmailAddress, s.email.Server, stackService, spec, addrPortMap, &extraVols, nodeSelectorName, nodeSelectorValue)
jsonTemplate, _ := json.MarshalIndent(template, "", " ")
glog.V(4).Infof("Template:\n%s", jsonTemplate)
//storageClass := s.Config.Kubernetes.StorageClass
if len(stackService.VolumeMounts) > 0 || len(spec.VolumeMounts) > 0 {
for fromPath, toPath := range stackService.VolumeMounts {
found := false
for _, mount := range spec.VolumeMounts {
if mount.MountPath == toPath {
glog.V(4).Info("Found PVC user mount")
volName := "home"
//if vol.Type == api.MountTypeDocker {
// volName = "docker"
//}
k8vm := v1.VolumeMount{Name: volName, MountPath: toPath, SubPath: fromPath}
template.Spec.Template.Spec.Containers[0].VolumeMounts = append(template.Spec.Template.Spec.Containers[0].VolumeMounts, k8vm)
found = true
}
}
if !found {
// Create any user-specified mounts
glog.V(4).Info("Creating user mount\n")
k8vm := v1.VolumeMount{Name: "home", MountPath: toPath, SubPath: fromPath}
if len(template.Spec.Template.Spec.Containers[0].VolumeMounts) == 0 {
template.Spec.Template.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{}
}
template.Spec.Template.Spec.Containers[0].VolumeMounts = append(template.Spec.Template.Spec.Containers[0].VolumeMounts, k8vm)
glog.V(4).Info("Added PVC user mount")
}
}
if len(spec.VolumeMounts) > 0 {
// Go back through the spec volume mounts and create emptyDirs where needed
idx := 0
for _, mount := range spec.VolumeMounts {
glog.V(4).Infof("Need volume for %s \n", stackService.Service)
// Docker volume should use HostPath, others can use emptyDir
if mount.Type == api.MountTypeDocker {
// TODO: Need to prevent non-NDS services from mounting the Docker socket
k8vol := v1.Volume{}
k8vol.Name = "docker"
k8hostPath := v1.HostPathVolumeSource{}
k8hostPath.Path = "/var/run/docker.sock"
k8vol.HostPath = &k8hostPath
k8vols = append(k8vols, k8vol)
} else {
found := false
for _, toPath := range stackService.VolumeMounts {
if toPath == mount.MountPath {
found = true
}
}
if !found {
glog.Warningf("Required volume not found, using emptyDir\n")
k8vol := v1.Volume{}
k8empty := v1.EmptyDirVolumeSource{}
k8vol.Name = fmt.Sprintf("empty%d", idx)
idx++
k8vol.EmptyDir = &k8empty
k8vols = append(k8vols, k8vol)
}
}
}
}
}
template.Spec.Template.Spec.Volumes = k8vols
glog.V(4).Infof("Starting controller %s with volumes %s\n", name, template.Spec.Template.Spec.Volumes)
_, err := s.kube.StartController(userId, template)
if err != nil {
stackService.Status = "error"
stackService.StatusMessages = append(stackService.StatusMessages,
fmt.Sprintf("Error starting stack service: %s\n", err))
return false, err
}
// Give Kubernetes time to create the pods for the RC
time.Sleep(time.Second * 3)
// Wait for stack service to be in ready state
ready := 0
failed := 0
timeOut := defaultTimeout
if spec.ReadyProbe.Timeout > 0 {
timeOut = int(spec.ReadyProbe.Timeout)
}
timeWait := time.Second * 0
for (ready + failed) < len(stack.Services) {
stck, _ := s.etcd.GetStack(userId, stack.Id)
for _, ss := range stck.Services {
glog.V(4).Infof("Stack service %s: status=%s\n", ss.Id, ss.Status)
if ss.Status == "ready" {
ready++
} else if ss.Status == "error" {
failed++
}
}
if timeWait > time.Duration(timeOut)*time.Second {
stackService.StatusMessages = append(stackService.StatusMessages,
fmt.Sprintf("Service timed out after %d seconds\n", timeOut))
stackService.Status = "timeout"
// Update stack status
s.etcd.PutStack(userId, stack.Id, stack)
// Service has taken too long to startup
glog.V(4).Infof("Stack service %s reached timeout, stopping\n", stackService.Id)
err := s.kube.StopController(userId, stackService.Id)
if err != nil {
glog.Error(err)
}
failed++
break
}
time.Sleep(time.Second * 3)
timeWait += time.Second * 3
}
if failed > 0 {
return false, nil
} else {
return true, nil
}
}
func (s *Server) QuickstartStack(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
sid := r.Request.FormValue("key")
if len(sid) == 0 {
rest.Error(w, "You must specify a service key", http.StatusBadRequest)
return
}
if !s.serviceExists(userId, sid) {
rest.Error(w, "No such service", http.StatusNotFound)
return
}
stack, err := s.getStackByServiceId(userId, sid)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if stack == nil {
spec, err := s.etcd.GetServiceSpec(userId, sid)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Restrict to single service specs (i.e., no dependencies)
if s.hasRequiredDependencies(spec) {
rest.Error(w, "Cannot quickstart services with required dependencies", http.StatusUnprocessableEntity) // unprocessable
return
}
stack = &api.Stack{
Key: sid,
Name: spec.Label,
Secure: true,
}
stackService := api.StackService{
Service: sid,
ResourceLimits: api.ResourceLimits{
CPUMax: spec.ResourceLimits.CPUMax,
CPUDefault: spec.ResourceLimits.CPUDefault,
MemoryMax: spec.ResourceLimits.MemoryMax,
MemoryDefault: spec.ResourceLimits.MemoryDefault,
},
}
// Add the default service
stack.Services = append(stack.Services, stackService)
stack, err = s.addStack(userId, stack)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
if stack.Status != "starting" && stack.Status != "started" {
go s.startStack(userId, stack)
}
w.WriteJson(stack)
}
func (s *Server) StartStack(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
sid := r.PathParam("sid")
stack, _ := s.etcd.GetStack(userId, sid)
if stack == nil {
rest.NotFound(w, r)
return
}
glog.V(4).Infof("Starting stack %s", stack.Id)
glog.V(4).Infof("Stack status %s\n", stack.Status)
if stack.Status != stackStatus[Stopped] && stack.Status != stackStatus[Starting] {
// Can't start a stopping or started service
glog.V(4).Infof("Can't start a service with status %s\n", stack.Status)
w.WriteHeader(http.StatusConflict)
return
}
go s.startStack(userId, stack)
w.WriteHeader(http.StatusAccepted)
}
func (s *Server) startStack(userId string, stack *api.Stack) (*api.Stack, error) {
sid := stack.Id
stack.Status = stackStatus[Starting]
s.etcd.PutStack(userId, sid, stack)
stackServices := stack.Services
// Get the service/port mappinggs
addrPortMap := make(map[string]kube.ServiceAddrPort)
for _, stackService := range stackServices {
spec, specErr := s.etcd.GetServiceSpec(userId, stackService.Service)
if specErr != nil {
glog.Error(specErr)
} else {
name := fmt.Sprintf("%s-%s", stack.Id, spec.Key)
svc, svcErr := s.kube.GetService(userId, name)
if svcErr == nil {
addrPort := kube.ServiceAddrPort{
Name: stackService.Service,
Host: svc.Spec.ClusterIP,
Port: svc.Spec.Ports[0].Port,
NodePort: svc.Spec.Ports[0].NodePort,
}
addrPortMap[stackService.Service] = addrPort
} else {
glog.Error(svcErr)
}
}
}
/*
Disabling network policies per https://github.com/nds-org/ndslabs/issues/286
if !s.kube.NetworkPolicyExists(userId, sid) {
glog.V(4).Infof("Creating network policy for %s %s\n", userId, sid)
_, err := s.kube.CreateNetworkPolicy(userId, sid, sid)
if err != nil {
glog.Errorf("Failed to start controller %s: Failed to create NetworkPolicy: %s\n", sid, err)
return stack, err
}
}
*/
// For each stack service, if no dependencies or dependency == started,
// start service. Otherwise wait
started := map[string]int{}
errors := map[string]int{}
glog.V(4).Infof("Starting services for %s %s\n", userId, sid)
for len(started) < len(stackServices) {
if len(errors) > 0 {
// Dependent service is in error, abort
glog.V(4).Infof("Aborting startup due to error\n")
break
}
stack, _ = s.getStackWithStatus(userId, sid)
for _, stackService := range stack.Services {
if stackService.Status == "error" {
errors[stackService.Service] = 1
break
}
if started[stackService.Service] == 1 {
continue
}
svc, _ := s.etcd.GetServiceSpec(userId, stackService.Service)
numDeps := 0
startedDeps := 0
for _, dep := range svc.Dependencies {
for _, ss := range stack.Services {
if dep.DependencyKey == ss.Service {
numDeps++
if ss.Status == "ready" {
startedDeps++
}
}
}
}
if numDeps == 0 || startedDeps == numDeps {
go s.startController(userId, stackService.Service, stack, &addrPortMap)
started[stackService.Service] = 1
}
}
time.Sleep(time.Second * 3)
}
ready := map[string]int{}
for len(ready) < len(started) && len(errors) == 0 {
stack, _ = s.getStackWithStatus(userId, sid)
for _, stackService := range stack.Services {
if stackService.Status == "ready" {
ready[stackService.Service] = 1
}
if stackService.Status == "error" || stackService.Status == "timeout" {
errors[stackService.Service] = 1
}
}
time.Sleep(time.Second * 3)
}
// To overcome the 503 error on ingress, wait 5 seconds before returning the endpoint
time.Sleep(time.Second * 5)
stack, _ = s.getStackWithStatus(userId, sid)
stack.Status = "started"
for _, stackService := range stack.Services {
if stackService.Status == "error" || stackService.Status == "timeout" {
stack.Status = "error"
}
}
glog.V(4).Infof("Stack %s started\n", sid)
s.etcd.PutStack(userId, sid, stack)
return stack, nil
}
func (s *Server) getStackWithStatus(userId string, sid string) (*api.Stack, error) {
stack, _ := s.etcd.GetStack(userId, sid)
if stack == nil {
return nil, nil
}
for i := range stack.Services {
stackService := &stack.Services[i]
stackService.Endpoints = []api.Endpoint{}
k8service, _ := s.kube.GetService(userId, stackService.Id)
if k8service == nil {
continue
}
glog.V(4).Infof("Stack service %s: status=%s\n", stackService.Id, stackService.Status)
// Get the port protocol for the service endpoint
spec, err := s.etcd.GetServiceSpec(userId, stackService.Service)
if err != nil {
glog.Error(err)
}
stackService.InternalIP = k8service.Spec.ClusterIP
for _, specPort := range spec.Ports {
for _, k8port := range k8service.Spec.Ports {
if specPort.Port == k8port.Port {
endpoint := api.Endpoint{}
endpoint.Port = specPort.Port
endpoint.Protocol = specPort.Protocol
endpoint.NodePort = k8port.NodePort
if s.useLoadBalancer() && spec.Access == api.AccessExternal {
if len(spec.Ports) == 1 {
endpoint.Host = fmt.Sprintf("%s.%s", stackService.Id, s.domain)
} else {
endpoint.Host = fmt.Sprintf("%s-%d.%s", stackService.Id, specPort.Port, s.domain)
}
endpoint.Path = specPort.ContextPath
endpoint.URL = endpoint.Host + specPort.ContextPath
}
stackService.Endpoints = append(stackService.Endpoints, endpoint)
}
}
}
// NDS-1154
if stackService.ResourceLimits == (api.ResourceLimits{}) {
stackService.ResourceLimits.CPUMax = spec.ResourceLimits.CPUMax
stackService.ResourceLimits.MemoryMax = spec.ResourceLimits.MemoryMax
stackService.ResourceLimits.CPUDefault = spec.ResourceLimits.CPUDefault
stackService.ResourceLimits.MemoryDefault = spec.ResourceLimits.MemoryDefault
}
}
return stack, nil
}
func (s *Server) StopStack(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
if s.IsAdmin(r) {
userId = r.Request.FormValue("userId")
_, err := s.etcd.GetAccount(userId)
if err != nil {
rest.NotFound(w, r)
return
}
}
sid := r.PathParam("sid")
stack, err := s.etcd.GetStack(userId, sid)
if stack == nil {
rest.NotFound(w, r)
return
}
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
go s.stopStack(userId, sid)
w.WriteHeader(http.StatusAccepted)
}
func (s *Server) stopStack(userId string, sid string) (*api.Stack, error) {
path := "/accounts/" + userId + "/stacks/" + sid
glog.V(4).Infof("Stopping stack %s\n", path)
stack, _ := s.etcd.GetStack(userId, sid)
glog.V(4).Infof("Stack status %s\n", stack.Status)
if stack.Status == stackStatus[Stopped] {
// Can't stop a stopped service
glog.V(4).Infof("Can't stop a stopped service")
return stack, nil
}
stack.Status = stackStatus[Stopping]
s.etcd.PutStack(userId, sid, stack)
// For each stack service, stop dependent services first.
stopped := map[string]int{}
for len(stopped) < len(stack.Services) {
stack, _ = s.getStackWithStatus(userId, sid)
for _, stackService := range stack.Services {
spec, _ := s.etcd.GetServiceSpec(userId, stackService.Service)
name := fmt.Sprintf("%s-%s", stack.Id, spec.Key)
if stopped[stackService.Service] == 1 {
continue
}
glog.V(4).Infof("Stopping service %s\n", stackService.Service)
numDeps := 0
stoppedDeps := 0
for _, ss := range stack.Services {
svc, _ := s.etcd.GetServiceSpec(userId, ss.Service)
for _, dep := range svc.Dependencies {
if dep.DependencyKey == stackService.Service {
numDeps++
if ss.Status == "stopped" || ss.Status == "" || ss.Status == "error" {
stoppedDeps++
}
}
}
}
if numDeps == 0 || stoppedDeps == numDeps {
stopped[stackService.Service] = 1
glog.V(4).Infof("Stopping controller %s\n", name)
err := s.kube.StopController(userId, name)
if err != nil {
glog.Error(err)
}
}
}
time.Sleep(time.Second * 3)
}
podStatus := make(map[string]string)
pods, _ := s.kube.GetPods(userId, "stack", stack.Id)
for _, pod := range pods.Items {
label := pod.Labels["service"]
glog.V(4).Infof("Pod %s %d\n", label, len(pod.Status.Conditions))
if len(pod.Status.Conditions) > 0 {
podStatus[label] = string(pod.Status.Phase)
}
}
for i := range stack.Services {
stackService := &stack.Services[i]
stackService.Status = podStatus[stackService.Service]
stackService.StatusMessages = []string{}
stackService.Endpoints = nil
}
stack.Status = stackStatus[Stopped]
s.etcd.PutStack(userId, sid, stack)
stack, _ = s.getStackWithStatus(userId, sid)
/*
Disabling network policies per https://github.com/nds-org/ndslabs/issues/286
err := s.kube.DeleteNetworkPolicy(userId, sid)
if err != nil {
glog.Errorf("Failed to delete network policy: %s\n", err)
}
*/
return stack, nil
}
func (s *Server) GetLogs(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
ssid := r.PathParam("ssid")
lines := r.Request.FormValue("lines")
if !s.stackServiceExists(userId, ssid) {
rest.Error(w, "No such service", http.StatusNotFound)
return
}
tailLines, err := strconv.Atoi(lines)
sid := ssid[0:strings.LastIndex(ssid, "-")]
logs, err := s.getLogs(userId, sid, ssid, tailLines)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
} else {
w.WriteJson(&logs)
}
}
func (s *Server) GetConfigs(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
services := r.Request.FormValue("services")
sids := strings.Split(services, ",")
configs := make(map[string][]api.Config)
for _, sid := range sids {
if !s.serviceExists(userId, sid) {
rest.Error(w, "No such service", http.StatusNotFound)
return
}
spec, err := s.etcd.GetServiceSpec(userId, sid)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
} else {
configs[sid] = spec.Config
}
}
w.WriteJson(&configs)
}
func (s *Server) getLogs(userId string, sid string, ssid string, tailLines int) (string, error) {
glog.V(4).Infof("Getting logs for %s %s %d", sid, ssid, tailLines)
stack, err := s.etcd.GetStack(userId, sid)
if err != nil {
return "", err
}
pods, err := s.kube.GetPods(userId, "stack", stack.Id)
if err != nil {
return "", err
}
log := ""
for _, ss := range stack.Services {
if ss.Id == ssid {
log += fmt.Sprintf("KUBERNETES LOG\n=====================\n")
for _, msg := range ss.StatusMessages {
log += msg + "\n"
}
log += fmt.Sprintf("\nSERVICE LOG\n=====================\n")
for _, pod := range pods.Items {
if pod.Labels["name"] == ssid {
podLog, err := s.kube.GetLog(userId, pod.Name, tailLines)
if err != nil {
return "", err
} else {
log += podLog
}
}
}
}
}
return log, nil
}
func (s *Server) addServiceFile(path string) error {
if path[len(path)-4:len(path)] != "json" {
return nil
}
glog.V(4).Infof("Adding %s", path)
service := api.ServiceSpec{}
data, err := ioutil.ReadFile(path)
if err != nil {
fmt.Println(err)
return err
}
err = json.Unmarshal(data, &service)
if err != nil {
fmt.Println(err)
return err
}
s.etcd.PutGlobalService(service.Key, &service)
return nil
}
func (s *Server) loadSpecs(path string) error {
files, err := ioutil.ReadDir(path)
if err != nil {
return err
}
for _, file := range files {
if file.IsDir() {
if file.Name() != "vocab" && file.Name() != "schemas" {
s.loadSpecs(fmt.Sprintf("%s/%s", path, file.Name()))
}
} else {
s.addServiceFile(fmt.Sprintf("%s/%s", path, file.Name()))
}
}
return nil
}
func (s *Server) HandlePodEvent(eventType string, pod *v1.Pod) {
if pod.Namespace != "default" && pod.Namespace != systemNamespace {
glog.V(4).Infof("HandlePodEvent %s", eventType)
//name := pod.Name
userId := pod.Namespace
sid := pod.ObjectMeta.Labels["stack"]
ssid := pod.ObjectMeta.Labels["name"]
//phase := pod.Status.Phase
if len(sid) > 0 {
// Get stack service from Pod name
stack, err := s.etcd.GetStack(userId, sid)
if err != nil {
glog.Errorf("Error getting stack: %s\n", err)
return
}
var stackService *api.StackService
for i := range stack.Services {
if stack.Services[i].Id == ssid {
stackService = &stack.Services[i]
}
}
if stackService == nil {
glog.Errorf("No such stack service: %s\n", ssid)
return
}
// This is a Pod event
ready := false
if len(pod.Status.Conditions) > 0 {
for _, condition := range pod.Status.Conditions {
if condition.Type == "Ready" {
ready = (condition.Status == "True")
}
}
if len(pod.Status.ContainerStatuses) > 0 {
// The pod was terminated, this is an error
if pod.Status.ContainerStatuses[0].State.Terminated != nil {
reason := pod.Status.ContainerStatuses[0].State.Terminated.Reason
message := pod.Status.ContainerStatuses[0].State.Terminated.Message
stackService.Status = "error"
stackService.StatusMessages = append(stackService.StatusMessages,
fmt.Sprintf("Reason=%s, Message=%s", reason, message))
}
} else {
reason := pod.Status.Conditions[0].Reason
message := pod.Status.Conditions[0].Message
stackService.StatusMessages = append(stackService.StatusMessages,
fmt.Sprintf("Reason=%s, Message=%s", reason, message))
}
}
fmt.Sprintf("DEBUG READY %v\n", ready)
if ready {
stackService.Status = "ready"
} else {
if eventType == "ADDED" {
stackService.Status = "starting"
} else if eventType == "DELETED" {
if stackService.Status == "timeout" {
stackService.Status = "error"
} else {
stackService.Status = "stopped"
}
}
}
message := ""
if len(stackService.StatusMessages) > 0 {
message = stackService.StatusMessages[len(stackService.StatusMessages)-1]
}
glog.V(4).Infof("Namespace: %s, Pod: %s, Status: %s, StatusMessage: %s\n", userId, pod.Name,
stackService.Status, message)
s.etcd.PutStack(userId, sid, stack)
}
}
}
func (s *Server) HandleReplicationControllerEvent(eventType string, event *v1.Event, rc *v1.ReplicationController) {
if rc.Namespace != "default" && rc.Namespace != systemNamespace {
glog.V(4).Infof("HandleReplicationControllerEvent %s", eventType)
userId := rc.Namespace
sid := rc.ObjectMeta.Labels["stack"]
ssid := rc.ObjectMeta.Labels["name"]
// Get stack service from Pod name
stack, err := s.etcd.GetStack(userId, sid)
if err != nil {
glog.Errorf("Error getting stack: %s\n", err)
return
}
var stackService *api.StackService
for i := range stack.Services {
if stack.Services[i].Id == ssid {
stackService = &stack.Services[i]
}
}
if event != nil {
if event.Type == "Warning" {
// This is an error
stackService.Status = "error"
}
stackService.StatusMessages = append(stackService.StatusMessages,
fmt.Sprintf("Reason=%s, Message=%s", event.Reason, event.Message))
glog.V(4).Infof("Namespace: %s, ReplicationController: %s, Status: %s, StatusMessage: %s\n", userId, rc.Name,
stackService.Status, stackService.StatusMessages[len(stackService.StatusMessages)-1])
}
s.etcd.PutStack(userId, sid, stack)
}
}
func (s *Server) addVocabulary(path string) error {
if path[len(path)-4:len(path)] != "json" {
return nil
}
glog.V(4).Infof("Adding vocabulary %s", path)
vocab := api.Vocabulary{}
data, err := ioutil.ReadFile(path)
if err != nil {
fmt.Println(err)
return err
}
err = json.Unmarshal(data, &vocab)
if err != nil {
fmt.Println(err)
return err
}
s.etcd.PutVocabulary(vocab.Name, &vocab)
return nil
}
func (s *Server) GetVocabulary(w rest.ResponseWriter, r *rest.Request) {
name := r.PathParam("name")
vocab, err := s.etcd.GetVocabulary(name)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
} else {
w.WriteJson(&vocab)
}
}
func (s *Server) useNodePort() bool {
return s.ingress == config.IngressTypeNodePort
}
func (s *Server) useLoadBalancer() bool {
return s.ingress == config.IngressTypeLoadBalancer
}
func (s *Server) ChangePassword(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
data := make(map[string]string)
err := r.DecodeJsonPayload(&data)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
}
_, err = s.etcd.ChangePassword(userId, data["password"])
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
}
w.WriteHeader(http.StatusOK)
}
func (s *Server) ResetPassword(w rest.ResponseWriter, r *rest.Request) {
userId := r.PathParam("userId")
if len(userId) == 0 {
userId = r.Request.FormValue("userId")
}
if strings.Contains(userId, "@") {
account := s.getAccountByEmail(userId)
if account != nil {
userId = account.Namespace
}
}
token, err := s.getTemporaryToken(userId)
if err != nil {
glog.Error(err)
w.WriteHeader(http.StatusOK)
return
}
account, err := s.etcd.GetAccount(userId)
if err != nil {
glog.Error(err)
w.WriteHeader(http.StatusOK)
return
}
if account.Status == api.AccountStatusUnverified {
verifyUrl := s.origin + "/landing/?t=" + account.Token + "&u=" + account.Namespace
err = s.email.SendVerificationEmail(account.Name, account.EmailAddress, verifyUrl)
} else {
resetUrl := s.origin + "/login/recover?t=" + token
err = s.email.SendRecoveryEmail(account.Name, account.EmailAddress, resetUrl, (account.Status == api.AccountStatusUnapproved))
}
if err != nil {
glog.Error(err)
w.WriteHeader(http.StatusOK)
return
}
w.WriteHeader(http.StatusOK)
}
func (s *Server) getTemporaryToken(userId string) (string, error) {
token := jwtbase.New(jwtbase.GetSigningMethod(s.jwt.SigningAlgorithm))
if s.jwt.PayloadFunc != nil {
for key, value := range s.jwt.PayloadFunc(userId) {
token.Claims[key] = value
}
}
token.Claims["id"] = userId
token.Claims["exp"] = time.Now().Add(time.Minute * 30).Unix()
token.Claims["orig_iat"] = time.Now().Unix()
tokenString, err := token.SignedString(s.jwt.Key)
if err != nil {
return "", err
}
return tokenString, nil
}
func (s *Server) createAdminUser(password string) error {
glog.V(4).Infof("Creating admin user")
if !s.accountExists(adminUser) {
account := &api.Account{
Name: adminUser,
Namespace: adminUser,
Description: s.Config.Name + " administrator",
Password: password,
ResourceLimits: api.AccountResourceLimits{
CPUMax: s.Config.DefaultLimits.CpuMax,
CPUDefault: s.Config.DefaultLimits.CpuDefault,
MemoryMax: s.Config.DefaultLimits.MemMax,
MemoryDefault: s.Config.DefaultLimits.MemDefault,
StorageQuota: s.Config.DefaultLimits.StorageDefault,
},
}
err := s.etcd.PutAccount(adminUser, account, true)
if err != nil {
glog.Error(err)
return err
}
err = s.setupAccount(account)
if err != nil {
glog.Error(err)
return err
}
} else {
account, err := s.etcd.GetAccount(adminUser)
if err != nil {
glog.Error(err)
return err
}
account.Password = password
err = s.etcd.PutAccount(adminUser, account, true)
if err != nil {
glog.Error(err)
return err
}
}
err := s.createLMABasicAuthSecret()
if err != nil {
glog.Error(err)
return err
}
return nil
}
//func (s *Server) DownloadClient(w http.ResponseWriter, r *http.Request) {
func (s *Server) DownloadClient(w rest.ResponseWriter, r *rest.Request) {
ops := r.URL.Query().Get("os")
if ops != "darwin" && ops != "linux" {
html := "<html><body>" +
"<a href=\"download?os=darwin\">ndslabsctl-darwin-amd64</a><br/>" +
"<a href=\"download?os=linux\">ndslabsctl-linux-amd64</a><br/>" +
"</body></html>"
w.(http.ResponseWriter).Write([]byte(html))
} else {
w.Header().Set("Content-Disposition", "attachment; filename=ndslabsctl")
w.Header().Set("Content-Type", r.Header.Get("Content-Type"))
reader, err := os.Open("/ndslabsctl/ndslabsctl-" + ops + "-amd64")
if err != nil {
glog.Error(err)
return
}
defer reader.Close()
io.Copy(w.(http.ResponseWriter), reader)
}
}
func (s *Server) PostSupport(w rest.ResponseWriter, r *rest.Request) {
userId := s.getUser(r)
request := api.SupportRequest{}
err := r.DecodeJsonPayload(&request)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
account, err := s.etcd.GetAccount(userId)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = s.email.SendSupportEmail(account.Name, account.EmailAddress, string(request.Type), request.Message, request.Anonymous)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
return
}
func (s *Server) GetContact(w rest.ResponseWriter, r *rest.Request) {
w.WriteJson(map[string]string{
"email": s.Config.Support.Email,
"forum": s.Config.Support.Forum,
"chat": s.Config.Support.Chat,
})
}
func (s *Server) getAppDataDir(stackService string) string {
return fmt.Sprintf("%s-%s", stackService, s.kube.RandomString(5))
}
func (s *Server) checkDependencies(uid string, service *api.ServiceSpec) (string, bool) {
for _, dependency := range service.Dependencies {
if !s.serviceExists(uid, dependency.DependencyKey) {
return dependency.DependencyKey, false
}
}
return "", true
}
func (s *Server) hasRequiredDependencies(service *api.ServiceSpec) bool {
for _, dep := range service.Dependencies {
if dep.Required {
return true
}
}
return false
}
// Make sure that conig.useFrom dependencies exist
func (s *Server) checkConfigs(uid string, service *api.ServiceSpec) (string, bool) {
for _, config := range service.Config {
if len(config.UseFrom) > 0 {
useFrom := strings.Split(config.UseFrom, ".")
if !s.serviceExists(uid, useFrom[0]) {
return useFrom[0], false
}
}
if len(config.SetTo) > 0 {
setTo := strings.Split(config.SetTo, ".")
if !s.serviceExists(uid, setTo[0]) {
return setTo[0], false
}
}
}
return "", true
}
func (s *Server) GetHealthz(w rest.ResponseWriter, r *rest.Request) {
// Confirm access to Etcd
_, err := s.etcd.GetAccount(adminUser)
if err != nil {
rest.Error(w, "etcd not available", http.StatusInternalServerError)
return
}
// Confirm access to Kubernetes API
_, err = s.kube.GetNamespace(adminUser)
if err != nil {
rest.Error(w, "Kubernetes API not available", http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
// Import an account. Admin only. Assumes account does not exist.
func (s *Server) ImportAccount(w rest.ResponseWriter, r *rest.Request) {
if !s.IsAdmin(r) {
rest.Error(w, "", http.StatusForbidden)
return
}
exportPkg := api.ExportPackage{}
err := r.DecodeJsonPayload(&exportPkg)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
account := exportPkg.Account
if s.accountExists(account.Namespace) {
w.WriteHeader(http.StatusConflict)
return
}
err = s.etcd.PutAccount(account.Namespace, &account, false)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = s.setupAccount(&account)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteJson(&account)
}
// Export account. Admin only.
func (s *Server) ExportAccount(w rest.ResponseWriter, r *rest.Request) {
userId := r.PathParam("userId")
if !s.IsAdmin(r) {
rest.Error(w, "", http.StatusForbidden)
return
}
account, err := s.etcd.GetAccount(userId)
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
w.WriteJson(&err)
return
}
account.ResourceUsage = api.ResourceUsage{}
account.Token = ""
w.WriteJson(api.ExportPackage{
Account: *account,
})
}
// Shutdown all running stacks for all users. Admin only.
func (s *Server) StopAllStacks(w rest.ResponseWriter, r *rest.Request) {
if !s.IsAdmin(r) {
rest.Error(w, "", http.StatusForbidden)
return
}
accounts, err := s.etcd.GetAccounts()
if err != nil {
glog.Error(err)
rest.Error(w, err.Error(), http.StatusInternalServerError)
w.WriteJson(&err)
} else {
for _, account := range *accounts {
stacks, err := s.etcd.GetStacks(account.Namespace)
for _, stack := range *stacks {
glog.V(4).Infof("Stopping stack %s for account %s\n", stack.Id, account.Namespace)
_, err = s.stopStack(account.Namespace, stack.Id)
if err == nil {
glog.V(4).Infof("Stack %s stopped \n", stack.Id)
} else {
glog.Error(err)
}
}
}
}
w.WriteHeader(http.StatusOK)
}
func readConfig(path string) (*config.Config, error) {
if path[len(path)-4:len(path)] != "json" {
return nil, fmt.Errorf("Invalid config json")
}
glog.V(4).Infof("Using config %s", path)
config := config.Config{}
data, err := ioutil.ReadFile(path)
if err != nil {
fmt.Println(err)
return nil, err
}
err = json.Unmarshal(data, &config)
if err != nil {
fmt.Println(err)
return nil, err
}
return &config, nil
}
func (s *Server) shutdownInactiveServices() {
for {
accounts, err := s.etcd.GetAccounts()
if err != nil {
glog.Error(err)
}
for _, account := range *accounts {
// InactiveTimeout in hours
timeout := time.Duration(account.InactiveTimeout) * time.Minute
diff := time.Duration(time.Now().Unix()-account.LastLogin) * time.Second
if account.LastLogin > 0 && account.InactiveTimeout > 0 &&
diff.Seconds() > timeout.Seconds() {
stacks, err := s.etcd.GetStacks(account.Namespace)
if err != nil {
glog.Error(err)
}
for _, stack := range *stacks {
if stack.Status != stackStatus[Stopped] {
glog.Infof("Stopping stack %s for %s due to inactivity\n", stack.Id, account.Namespace)
_, err = s.stopStack(account.Namespace, stack.Id)
if err == nil {
glog.V(4).Infof("Stack %s stopped \n", stack.Id)
} else {
glog.Error(err)
}
}
}
}
}
time.Sleep(1 * time.Minute)
}
}
func (s *Server) PutLogLevel(w rest.ResponseWriter, r *rest.Request) {
if !s.IsAdmin(r) {
rest.Error(w, "", http.StatusForbidden)
return
}
level := r.PathParam("level")
_, err := strconv.Atoi(level)
if err == nil {
glog.Infof("Setting log level to %s\n", level)
flag.Lookup("v").Value.Set(level)
} else {
glog.Infof("Invalid log level %s\n", level)
}
}
func (s *Server) getAccountByEmail(email string) *api.Account {
accounts, _ := s.etcd.GetAccounts()
if accounts == nil {
return nil
}
for _, account := range *accounts {
if account.EmailAddress == strings.ToLower(email) {
return &account
}
}
return nil
}
// NDS-970
func (s *Server) getVolPath(mount *api.VolumeMount, ssid string) string {
if len(mount.DefaultPath) == 0 {
return fmt.Sprintf("AppData/%s", s.getAppDataDir(ssid))
} else {
return mount.DefaultPath
}
}
// Check if host exists in ingresses for namespace
func (s *Server) checkIngress(uid string, host string) (bool, error) {
glog.V(4).Infof("Checking ingress for %s %s", uid, host)
ingresses, err := s.kube.GetIngresses(uid)
if err != nil {
glog.Error(err)
return false, err
}
if ingresses != nil {
for _, ingress := range ingresses.Items {
if ingress.Spec.Rules[0].Host == host {
glog.V(4).Infof("Found ingress with host %s", ingress.Spec.Rules[0].Host)
return true, nil
}
}
}
return false, nil
}
// FIXME: Revisit this after adding PVC support
// See https://github.com/nds-org/ndslabs/issues/262
// Write the oauth2 payload to the users home directory for access from applications
func (s *Server) writeAuthPayload(userId string, tokens map[string]string) error {
/*path := s.getHomeVolume().Path + "/" + userId + "/.globus"
os.MkdirAll(path, 0777)
json, err := json.MarshalIndent(tokens, "", " ")
if err != nil {
return err
}
err = ioutil.WriteFile(path+"/oauth2.json", json, 0777)
if err != nil {
return err
}*/
return nil
}
// Register a user via oauth
func (s *Server) RegisterUserOauth(w http.ResponseWriter, r *http.Request) {
rd := r.FormValue("rd")
if rd == "" {
rd = "https://www." + s.domain + "/dashboard"
}
accessToken := r.Header.Get("X-Forwarded-Access-Token")
otherTokenStr := r.Header.Get("X-Forwarded-Other-Tokens")
email := r.Header.Get("X-Forwarded-Email")
user := r.Header.Get("X-Forwarded-User")
if accessToken == "" || email == "" || user == "" {
glog.Warning("No oauth header found")
w.WriteHeader(http.StatusUnauthorized)
return
}
tokens := make(map[string]string)
otherTokens := strings.Split(otherTokenStr, " ")
for _, kvpair := range otherTokens {
kv := strings.Split(kvpair, "=")
tokens[kv[0]] = kv[1]
}
err := s.writeAuthPayload(user, tokens)
if err != nil {
glog.Error(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
glog.Infof("Creating/updating account for %s %s %s\n", user, email, accessToken)
glog.Infof("Other tokens %s\n", otherTokens)
account := s.getAccountByEmail(email)
if account == nil {
act := api.Account{
Name: user,
Description: "Oauth shadow account",
Namespace: user,
EmailAddress: email,
Password: s.kube.RandomString(10),
Organization: "",
Created: time.Now().Unix(),
LastLogin: time.Now().Unix(),
NextURL: rd,
}
act.Status = api.AccountStatusApproved
err := s.etcd.PutAccount(act.Namespace, &act, true)
if err != nil {
glog.Error(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
err = s.setupAccount(&act)
if err != nil {
glog.Error(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
} else {
account.LastLogin = time.Now().Unix()
account.NextURL = rd
err := s.etcd.PutAccount(account.Namespace, account, true)
if err != nil {
glog.Error(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
}
token, err := s.getTemporaryToken(user)
if err != nil {
glog.Error(err)
w.WriteHeader(http.StatusOK)
return
}
glog.Infof("Setting Cookie\n")
//expiration := time.Now().Add(365 * 24 * time.Hour)
http.SetCookie(w, &http.Cookie{Name: "token", Value: token, Domain: s.domain, Path: "/"})
http.SetCookie(w, &http.Cookie{Name: "namespace", Value: user, Domain: s.domain, Path: "/"})
glog.Infof("Redirecting to %s\n", rd)
http.Redirect(w, r, rd, 301)
return
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
manager/manager.go
|
package manager
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"strconv"
"sync"
"time"
"github.com/LyridInc/go-sdk"
sdkModel "github.com/LyridInc/go-sdk/model"
"github.com/go-kit/kit/log/level"
"lyrid-sd/logger"
"lyrid-sd/model"
"lyrid-sd/route"
"lyrid-sd/utils"
)
type NodeManager struct {
StartPort int
NextPortAvailable int
RouteMap map[string]model.Router
Apps []*sdkModel.App
}
type customSD struct {
Targets []string `json:"targets"`
Labels map[string]string `json:"labels"`
}
var instance *NodeManager
var once sync.Once
func GetInstance() *NodeManager {
once.Do(func() {
instance = &NodeManager{}
})
return instance
}
func (manager *NodeManager) Init() {
manager.RouteMap = make(map[string]model.Router)
config := model.GetConfig()
if config.Discovery_Max_Port_Used > config.Discovery_Port_Start {
manager.StartPort = config.Discovery_Max_Port_Used
} else {
manager.StartPort = config.Discovery_Port_Start
}
manager.NextPortAvailable = manager.StartPort
manager.Apps = sdk.GetInstance().GetApps()
}
func (manager *NodeManager) ReRoute() {
// Close created route
level.Info(logger.GetInstance().Logger).Log("Message", "Re reoute")
for _, r := range manager.RouteMap {
r.Close()
r = nil
}
manager.RouteMap = make(map[string]model.Router)
config := model.GetConfig()
manager.StartPort = config.Discovery_Port_Start
manager.NextPortAvailable = manager.StartPort
}
func getUsedPort(id string, sd []customSD) int {
for _, item := range sd {
if item.Labels[route.LabelName("id")] == id && len(item.Labels[route.LabelName("port")]) > 0 {
port, _ := strconv.Atoi(item.Labels[route.LabelName("port")])
return port
}
}
return 0
}
func isReserved(p int, sd []customSD) bool {
for _, item := range sd {
if len(item.Labels[route.LabelName("port")]) > 0 {
port, _ := strconv.Atoi(item.Labels[route.LabelName("port")])
if port == p {
return true
}
}
}
return false
}
func (manager *NodeManager) Run(ctx context.Context) {
config := model.GetConfig()
duration, _ := time.ParseDuration(config.Discovery_Poll_Interval)
for c := time.Tick(duration); ; {
var sd []customSD
if len(manager.RouteMap) == 0 {
// first run, check old used ports on config file
jsonFile, err := os.Open(os.Getenv("CONFIG_DIR") + "/lyrid_sd.json")
defer jsonFile.Close()
if err == nil {
byteValue, _ := ioutil.ReadAll(jsonFile)
_ = json.Unmarshal([]byte(byteValue), &sd)
}
}
list := manager.GetExporterList()
config := model.GetConfig()
maxPortused := config.Discovery_Max_Port_Used
for _, endpoint := range list {
if manager.RouteMap[endpoint.ID] == nil {
// route to this id doesn't exist
level.Info(logger.GetInstance().Logger).Log("Message", "Route to ID doesn't exist", "EndpointID", endpoint.ID)
r := route.Router{ID: endpoint.ID, URL: endpoint.URL, AdditionalLabels: endpoint.AdditionalLabels}
if sd == nil {
r.Initialize(strconv.Itoa(manager.NextPortAvailable))
manager.NextPortAvailable++
maxPortused = manager.NextPortAvailable
} else {
port := getUsedPort(endpoint.ID, sd)
if port != 0 {
r.Initialize(strconv.Itoa(port))
} else {
for ok := true; ok; ok = isReserved(port, sd) {
port = manager.NextPortAvailable
manager.NextPortAvailable++
}
r.Initialize(strconv.Itoa(port))
}
if port > maxPortused {
maxPortused = port
} else {
maxPortused = manager.NextPortAvailable
}
}
go r.Run()
manager.RouteMap[endpoint.ID] = &r
// notify Discovery Engine to create target over in the in json file
} else {
// update labels
manager.RouteMap[endpoint.ID].Update(&endpoint)
}
}
config = model.GetConfig()
if config.Discovery_Max_Port_Used < maxPortused {
config.Discovery_Max_Port_Used = maxPortused
model.WriteConfig(config)
}
select {
case <-c:
continue
case <-ctx.Done():
return
}
}
}
func (manager *NodeManager) ExecuteFunction(body string) ([]byte, error){
response, err := sdk.GetInstance().ExecuteFunctionByName(model.GetConfig().Noc_App_Name, os.Getenv("NOC_MODULE_NAME"), os.Getenv("NOC_TAG"), os.Getenv("NOC_FUNCTION_NAME"), body)
level.Debug(logger.GetInstance().Logger).Log("Response", response)
return response, err
}
func (manager *NodeManager) Add(r model.Router) {
manager.RouteMap[r.GetPort()] = r
}
func (manager *NodeManager) GetExporterList() []model.ExporterEndpoint {
exporter_list := make([]model.ExporterEndpoint, 0)
listExporterBody := utils.JsonEncode(model.LyFnInputParams{Command: "ListExporter"})
response, err := manager.ExecuteFunction(listExporterBody)
//response, err := sdk.GetInstance().ExecuteFunction(os.Getenv("FUNCTION_ID"), "LYR", utils.JsonEncode(model.LyFnInputParams{Command: "ListExporter"}))
if err != nil {
level.Error(logger.GetInstance().Logger).Log("Error", err)
}
var jsonresp map[string][]model.ExporterEndpoint
json.Unmarshal([]byte(response), &jsonresp)
if jsonresp["ReturnPayload"] != nil {
exporter_list = jsonresp["ReturnPayload"]
}
return exporter_list
}
func (manager *NodeManager) GetMetricsFromEndpoint(id string) {
//Result []*dto.MetricFamily
}
|
[
"\"CONFIG_DIR\"",
"\"NOC_MODULE_NAME\"",
"\"NOC_TAG\"",
"\"NOC_FUNCTION_NAME\"",
"\"FUNCTION_ID\""
] |
[] |
[
"CONFIG_DIR",
"FUNCTION_ID",
"NOC_MODULE_NAME",
"NOC_FUNCTION_NAME",
"NOC_TAG"
] |
[]
|
["CONFIG_DIR", "FUNCTION_ID", "NOC_MODULE_NAME", "NOC_FUNCTION_NAME", "NOC_TAG"]
|
go
| 5 | 0 | |
hyperopt/tests/unit/test_tpe.py
|
from past.utils import old_div
from functools import partial
import os
import unittest
import nose
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
pass
from hyperopt import pyll
from hyperopt.pyll import scope
from hyperopt import Trials
from hyperopt.base import miscs_to_idxs_vals, STATUS_OK
from hyperopt import hp
from hyperopt.tpe import adaptive_parzen_normal_orig
from hyperopt.tpe import GMM1
from hyperopt.tpe import GMM1_lpdf
from hyperopt.tpe import LGMM1
from hyperopt.tpe import LGMM1_lpdf
import hyperopt.rand as rand
import hyperopt.tpe as tpe
import hyperopt.atpe as atpe
from hyperopt import fmin
from .test_domains import domain_constructor, CasePerDomain, NonCategoricalCasePerDomain
DO_SHOW = int(os.getenv("HYPEROPT_SHOW", "0"))
def passthrough(x):
return x
def test_adaptive_parzen_normal_orig():
rng = np.random.default_rng(123)
prior_mu = 7
prior_sigma = 2
mus = rng.standard_normal(10) + 5
weights2, mus2, sigmas2 = adaptive_parzen_normal_orig(
mus, 3.3, prior_mu, prior_sigma
)
print(weights2)
print(mus2)
print(sigmas2)
assert len(weights2) == len(mus2) == len(sigmas2) == 11
assert np.all(weights2[0] > weights2[1:])
assert mus2[0] == 7
assert np.all(mus2[1:] == mus)
assert sigmas2[0] == 2
class TestGMM1(unittest.TestCase):
def setUp(self):
self.rng = np.random.default_rng(234)
def test_mu_is_used_correctly(self):
assert np.allclose(10, GMM1([1], [10.0], [0.0000001], rng=self.rng))
def test_sigma_is_used_correctly(self):
samples = GMM1([1], [0.0], [10.0], size=[1000], rng=self.rng)
assert 9 < np.std(samples) < 11
def test_mus_make_variance(self):
samples = GMM1(
[0.5, 0.5], [0.0, 1.0], [0.000001, 0.000001], rng=self.rng, size=[1000]
)
print(samples.shape)
# import matplotlib.pyplot as plt
# plt.hist(samples)
# plt.show()
assert 0.45 < np.mean(samples) < 0.55, np.mean(samples)
assert 0.2 < np.var(samples) < 0.3, np.var(samples)
def test_weights(self):
samples = GMM1(
[0.9999, 0.0001],
[0.0, 1.0],
[0.000001, 0.000001],
rng=self.rng,
size=[1000],
)
assert samples.shape == (1000,)
# import matplotlib.pyplot as plt
# plt.hist(samples)
# plt.show()
assert -0.001 < np.mean(samples) < 0.001, np.mean(samples)
assert np.var(samples) < 0.0001, np.var(samples)
def test_mat_output(self):
samples = GMM1(
[0.9999, 0.0001],
[0.0, 1.0],
[0.000001, 0.000001],
rng=self.rng,
size=[40, 20],
)
assert samples.shape == (40, 20)
assert -0.001 < np.mean(samples) < 0.001, np.mean(samples)
assert np.var(samples) < 0.0001, np.var(samples)
def test_lpdf_scalar_one_component(self):
# x # weights # mu # sigma
llval = GMM1_lpdf(1.0, [1.0], [1.0], [2.0])
assert llval.shape == ()
assert np.allclose(llval, np.log(old_div(1.0, np.sqrt(2 * np.pi * 2.0 ** 2))))
def test_lpdf_scalar_N_components(self):
llval = GMM1_lpdf(
1.0, # x
[0.25, 0.25, 0.5], # weights
[0.0, 1.0, 2.0], # mu
[1.0, 2.0, 5.0], # sigma
)
print(llval)
a = 0.25 / np.sqrt(2 * np.pi * 1.0 ** 2) * np.exp(-0.5 * (1.0) ** 2)
a += old_div(0.25, np.sqrt(2 * np.pi * 2.0 ** 2))
a += (
0.5
/ np.sqrt(2 * np.pi * 5.0 ** 2)
* np.exp(-0.5 * (old_div(1.0, 5.0)) ** 2)
)
def test_lpdf_vector_N_components(self):
llval = GMM1_lpdf(
[1.0, 0.0], # x
[0.25, 0.25, 0.5], # weights
[0.0, 1.0, 2.0], # mu
[1.0, 2.0, 5.0], # sigma
)
# case x = 1.0
a = 0.25 / np.sqrt(2 * np.pi * 1.0 ** 2) * np.exp(-0.5 * (1.0) ** 2)
a += old_div(0.25, np.sqrt(2 * np.pi * 2.0 ** 2))
a += (
0.5
/ np.sqrt(2 * np.pi * 5.0 ** 2)
* np.exp(-0.5 * (old_div(1.0, 5.0)) ** 2)
)
assert llval.shape == (2,)
assert np.allclose(llval[0], np.log(a))
# case x = 0.0
a = old_div(0.25, np.sqrt(2 * np.pi * 1.0 ** 2))
a += (
0.25
/ np.sqrt(2 * np.pi * 2.0 ** 2)
* np.exp(-0.5 * (old_div(1.0, 2.0)) ** 2)
)
a += (
0.5
/ np.sqrt(2 * np.pi * 5.0 ** 2)
* np.exp(-0.5 * (old_div(2.0, 5.0)) ** 2)
)
assert np.allclose(llval[1], np.log(a))
def test_lpdf_matrix_N_components(self):
llval = GMM1_lpdf(
[[1.0, 0.0, 0.0], [0, 0, 1], [0, 0, 1000]],
[0.25, 0.25, 0.5], # weights
[0.0, 1.0, 2.0], # mu
[1.0, 2.0, 5.0], # sigma
)
print(llval)
assert llval.shape == (3, 3)
a = 0.25 / np.sqrt(2 * np.pi * 1.0 ** 2) * np.exp(-0.5 * (1.0) ** 2)
a += old_div(0.25, np.sqrt(2 * np.pi * 2.0 ** 2))
a += (
0.5
/ np.sqrt(2 * np.pi * 5.0 ** 2)
* np.exp(-0.5 * (old_div(1.0, 5.0)) ** 2)
)
assert np.allclose(llval[0, 0], np.log(a))
assert np.allclose(llval[1, 2], np.log(a))
# case x = 0.0
a = old_div(0.25, np.sqrt(2 * np.pi * 1.0 ** 2))
a += (
0.25
/ np.sqrt(2 * np.pi * 2.0 ** 2)
* np.exp(-0.5 * (old_div(1.0, 2.0)) ** 2)
)
a += (
0.5
/ np.sqrt(2 * np.pi * 5.0 ** 2)
* np.exp(-0.5 * (old_div(2.0, 5.0)) ** 2)
)
assert np.allclose(llval[0, 1], np.log(a))
assert np.allclose(llval[0, 2], np.log(a))
assert np.allclose(llval[1, 0], np.log(a))
assert np.allclose(llval[1, 1], np.log(a))
assert np.allclose(llval[2, 0], np.log(a))
assert np.allclose(llval[2, 1], np.log(a))
assert np.isfinite(llval[2, 2])
class TestGMM1Math(unittest.TestCase):
def setUp(self):
self.rng = np.random.default_rng(234)
self.weights = [0.1, 0.3, 0.4, 0.2]
self.mus = [1.0, 2.0, 3.0, 4.0]
self.sigmas = [0.1, 0.4, 0.8, 2.0]
self.q = None
self.low = None
self.high = None
self.n_samples = 10001
self.samples_per_bin = 500
self.show = False
# -- triggers error if test case forgets to call work()
self.worked = False
def tearDown(self):
assert self.worked
def work(self):
self.worked = True
kwargs = dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
q=self.q,
)
samples = GMM1(rng=self.rng, size=(self.n_samples,), **kwargs)
samples = np.sort(samples)
edges = samples[:: self.samples_per_bin]
# print samples
pdf = np.exp(GMM1_lpdf(edges[:-1], **kwargs))
dx = edges[1:] - edges[:-1]
y = 1 / dx / len(dx)
if self.show:
plt.scatter(edges[:-1], y)
plt.plot(edges[:-1], pdf)
plt.show()
err = (pdf - y) ** 2
print(np.max(err))
print(np.mean(err))
print(np.median(err))
if not self.show:
assert np.max(err) < 0.1
assert np.mean(err) < 0.01
assert np.median(err) < 0.01
def test_basic(self):
self.work()
def test_bounded(self):
self.low = 2.5
self.high = 3.5
self.work()
class TestQGMM1Math(unittest.TestCase):
def setUp(self):
self.rng = np.random.default_rng(234)
self.weights = [0.1, 0.3, 0.4, 0.2]
self.mus = [1.0, 2.0, 3.0, 4.0]
self.sigmas = [0.1, 0.4, 0.8, 2.0]
self.low = None
self.high = None
self.n_samples = 1001
self.show = DO_SHOW # or put a string
# -- triggers error if test case forgets to call work()
self.worked = False
def tearDown(self):
assert self.worked
def work(self, **kwargs):
self.__dict__.update(kwargs)
del kwargs
self.worked = True
gkwargs = dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
q=self.q,
)
samples = old_div(GMM1(rng=self.rng, size=(self.n_samples,), **gkwargs), self.q)
print("drew", len(samples), "samples")
assert np.all(samples == samples.astype("int"))
min_max = int(samples.min()), int(samples.max())
counts = np.bincount(samples.astype("int") - min_max[0])
print(counts)
xcoords = np.arange(min_max[0], min_max[1] + 1) * self.q
prob = np.exp(GMM1_lpdf(xcoords, **gkwargs))
assert counts.sum() == self.n_samples
y = old_div(counts, float(self.n_samples))
if self.show:
plt.scatter(xcoords, y, c="r", label="empirical")
plt.scatter(xcoords, prob, c="b", label="predicted")
plt.legend()
plt.title(str(self.show))
plt.show()
err = (prob - y) ** 2
print(np.max(err))
print(np.mean(err))
print(np.median(err))
if self.show:
raise nose.SkipTest()
else:
assert np.max(err) < 0.1
assert np.mean(err) < 0.01
assert np.median(err) < 0.01
def test_basic_1(self):
self.work(q=1)
def test_basic_2(self):
self.work(q=2)
def test_basic_pt5(self):
self.work(q=0.5)
def test_bounded_1(self):
self.work(q=1, low=2, high=4)
def test_bounded_2(self):
self.work(q=2, low=2, high=4)
def test_bounded_1b(self):
self.work(q=1, low=1, high=4.1)
def test_bounded_2b(self):
self.work(q=2, low=1, high=4.1)
def test_bounded_3(self):
self.work(
weights=[0.14285714, 0.28571429, 0.28571429, 0.28571429],
mus=[5.505, 7.0, 2.0, 10.0],
sigmas=[8.99, 5.0, 8.0, 8.0],
q=1,
low=1.01,
high=10,
n_samples=10000,
# show='bounded_3',
)
def test_bounded_3b(self):
self.work(
weights=[0.33333333, 0.66666667],
mus=[5.505, 5.0],
sigmas=[8.99, 5.19],
q=1,
low=1.01,
high=10,
n_samples=10000,
# show='bounded_3b',
)
class TestLGMM1Math(unittest.TestCase):
def setUp(self):
self.rng = np.random.default_rng(234)
self.weights = [0.1, 0.3, 0.4, 0.2]
self.mus = [-2.0, 1.0, 0.0, 3.0]
self.sigmas = [0.1, 0.4, 0.8, 2.0]
self.low = None
self.high = None
self.n_samples = 10001
self.samples_per_bin = 200
self.show = False
# -- triggers error if test case forgets to call work()
self.worked = False
def tearDown(self):
assert self.worked
@property
def LGMM1_kwargs(self):
return dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
)
def LGMM1_lpdf(self, samples):
return self.LGMM1(samples, **self.LGMM1_kwargs)
def work(self, **kwargs):
self.__dict__.update(kwargs)
self.worked = True
samples = LGMM1(rng=self.rng, size=(self.n_samples,), **self.LGMM1_kwargs)
samples = np.sort(samples)
edges = samples[:: self.samples_per_bin]
centers = 0.5 * edges[:-1] + 0.5 * edges[1:]
print(edges)
pdf = np.exp(LGMM1_lpdf(centers, **self.LGMM1_kwargs))
dx = edges[1:] - edges[:-1]
y = 1 / dx / len(dx)
if self.show:
plt.scatter(centers, y)
plt.plot(centers, pdf)
plt.show()
err = (pdf - y) ** 2
print(np.max(err))
print(np.mean(err))
print(np.median(err))
if not self.show:
assert np.max(err) < 0.1
assert np.mean(err) < 0.01
assert np.median(err) < 0.01
def test_basic(self):
self.work()
def test_bounded(self):
self.work(low=2, high=4)
class TestQLGMM1Math(unittest.TestCase):
def setUp(self):
self.rng = np.random.default_rng(234)
self.weights = [0.1, 0.3, 0.4, 0.2]
self.mus = [-2, 0.0, -3.0, 1.0]
self.sigmas = [2.1, 0.4, 0.8, 2.1]
self.low = None
self.high = None
self.n_samples = 1001
self.show = DO_SHOW
# -- triggers error if test case forgets to call work()
self.worked = False
def tearDown(self):
assert self.worked
@property
def kwargs(self):
return dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
q=self.q,
)
def QLGMM1_lpdf(self, samples):
return self.LGMM1(samples, **self.kwargs)
def work(self, **kwargs):
self.__dict__.update(kwargs)
self.worked = True
samples = old_div(
LGMM1(rng=self.rng, size=(self.n_samples,), **self.kwargs), self.q
)
# -- we've divided the LGMM1 by self.q to get ints here
assert np.all(samples == samples.astype("int"))
min_max = int(samples.min()), int(samples.max())
print("SAMPLES RANGE", min_max)
counts = np.bincount(samples.astype("int") - min_max[0])
# print samples
# print counts
xcoords = np.arange(min_max[0], min_max[1] + 0.5) * self.q
prob = np.exp(LGMM1_lpdf(xcoords, **self.kwargs))
print(xcoords)
print(prob)
assert counts.sum() == self.n_samples
y = old_div(counts, float(self.n_samples))
if self.show:
plt.scatter(xcoords, y, c="r", label="empirical")
plt.scatter(xcoords, prob, c="b", label="predicted")
plt.legend()
plt.show()
# -- calculate errors on the low end, don't take a mean
# over all the range spanned by a few outliers.
err = ((prob - y) ** 2)[:20]
print(np.max(err))
print(np.mean(err))
print(np.median(err))
if self.show:
raise nose.SkipTest()
else:
assert np.max(err) < 0.1
assert np.mean(err) < 0.01
assert np.median(err) < 0.01
def test_basic_1(self):
self.work(q=1)
def test_basic_2(self):
self.work(q=2)
def test_basic_pt5(self):
self.work(q=0.5)
def test_basic_pt125(self):
self.work(q=0.125)
def test_bounded_1(self):
self.work(q=1, low=2, high=4)
def test_bounded_2(self):
self.work(q=2, low=2, high=4)
def test_bounded_1b(self):
self.work(q=1, low=1, high=4.1)
def test_bounded_2b(self):
self.work(q=2, low=1, high=4.1)
class TestSuggest(unittest.TestCase, CasePerDomain):
def work(self):
# -- smoke test that things simply run,
# for each type of several search spaces.
trials = Trials()
fmin(
passthrough,
space=self.bandit.expr,
algo=partial(tpe.suggest, n_EI_candidates=3),
trials=trials,
max_evals=10,
)
class TestSuggestAtpe(unittest.TestCase, NonCategoricalCasePerDomain):
def work(self):
trials = Trials()
fmin(
passthrough,
space=self.bandit.expr,
algo=atpe.suggest,
trials=trials,
max_evals=10,
)
class TestOpt(unittest.TestCase, CasePerDomain):
thresholds = dict(
quadratic1=1e-5,
q1_lognormal=0.01,
distractor=-1.96,
gauss_wave=-2.0,
gauss_wave2=-2.0,
n_arms=-2.5,
many_dists=0.0005,
branin=0.7,
)
LEN = dict(
# -- running a long way out tests overflow/underflow
# to some extent
quadratic1=1000,
many_dists=200,
distractor=100,
# XXX
q1_lognormal=250,
gauss_wave2=75, # -- boosted from 50 on Nov/2013 after new
# sampling order made thresh test fail.
branin=200,
)
gammas = dict(distractor=0.05)
prior_weights = dict(distractor=0.01)
n_EIs = dict(
# XXX
# -- this can be low in a few dimensions
quadratic1=5,
# -- lower number encourages exploration
# XXX: this is a damned finicky way to get TPE
# to solve the Distractor problem
distractor=15,
)
def setUp(self):
self.olderr = np.seterr("raise")
np.seterr(under="ignore")
def tearDown(self, *args):
np.seterr(**self.olderr)
def work(self):
bandit = self.bandit
assert bandit.name is not None
algo = partial(
tpe.suggest,
gamma=self.gammas.get(bandit.name, tpe._default_gamma),
prior_weight=self.prior_weights.get(bandit.name, tpe._default_prior_weight),
n_EI_candidates=self.n_EIs.get(bandit.name, tpe._default_n_EI_candidates),
)
LEN = self.LEN.get(bandit.name, 50)
trials = Trials()
fmin(
passthrough,
space=bandit.expr,
algo=algo,
trials=trials,
max_evals=LEN,
rstate=np.random.default_rng(np.random.PCG64(0)),
catch_eval_exceptions=False,
)
assert len(trials) == LEN
if 1:
rtrials = Trials()
fmin(
passthrough,
space=bandit.expr,
algo=rand.suggest,
trials=rtrials,
max_evals=LEN,
)
print("RANDOM MINS", list(sorted(rtrials.losses()))[:6])
if 0:
plt.subplot(2, 2, 1)
plt.scatter(list(range(LEN)), trials.losses())
plt.title("TPE losses")
plt.subplot(2, 2, 2)
plt.scatter(list(range(LEN)), ([s["x"] for s in trials.specs]))
plt.title("TPE x")
plt.subplot(2, 2, 3)
plt.title("RND losses")
plt.scatter(list(range(LEN)), rtrials.losses())
plt.subplot(2, 2, 4)
plt.title("RND x")
plt.scatter(list(range(LEN)), ([s["x"] for s in rtrials.specs]))
plt.show()
if 0:
plt.hist([t["x"] for t in self.experiment.trials], bins=20)
print("TPE MINS", list(sorted(trials.losses()))[:6])
thresh = self.thresholds[bandit.name]
print("Thresh", thresh)
assert min(trials.losses()) < thresh
@domain_constructor(loss_target=0)
def opt_q_uniform(target):
rng = np.random.default_rng(123)
x = hp.quniform("x", 1.01, 10, 1)
return {
"loss": (x - target) ** 2 + scope.normal(0, 1, rng=rng),
"status": STATUS_OK,
}
class TestOptQUniform:
show_steps = False
show_vars = DO_SHOW
LEN = 25
def work(self, **kwargs):
self.__dict__.update(kwargs)
bandit = opt_q_uniform(self.target)
prior_weight = 2.5
gamma = 0.20
algo = partial(
tpe.suggest,
prior_weight=prior_weight,
n_startup_jobs=2,
n_EI_candidates=128,
gamma=gamma,
)
trials = Trials()
fmin(
passthrough, space=bandit.expr, algo=algo, trials=trials, max_evals=self.LEN
)
if self.show_vars:
import hyperopt.plotting
hyperopt.plotting.main_plot_vars(trials, bandit, do_show=1)
idxs, vals = miscs_to_idxs_vals(trials.miscs)
idxs = idxs["x"]
vals = vals["x"]
losses = trials.losses()
from hyperopt.tpe import ap_split_trials
from hyperopt.tpe import adaptive_parzen_samplers
qu = scope.quniform(1.01, 10, 1)
fn = adaptive_parzen_samplers["quniform"]
fn_kwargs = dict(size=(4,), rng=np.random)
s_below = pyll.Literal()
s_above = pyll.Literal()
b_args = [s_below, prior_weight] + qu.pos_args
b_post = fn(*b_args, **fn_kwargs)
a_args = [s_above, prior_weight] + qu.pos_args
a_post = fn(*a_args, **fn_kwargs)
# print b_post
# print a_post
fn_lpdf = getattr(scope, a_post.name + "_lpdf")
print(fn_lpdf)
# calculate the llik of b_post under both distributions
a_kwargs = {n: a for n, a in a_post.named_args if n not in ("rng", "size")}
b_kwargs = {n: a for n, a in b_post.named_args if n not in ("rng", "size")}
below_llik = fn_lpdf(*([b_post] + b_post.pos_args), **b_kwargs)
above_llik = fn_lpdf(*([b_post] + a_post.pos_args), **a_kwargs)
new_node = scope.broadcast_best(b_post, below_llik, above_llik)
print("=" * 80)
do_show = self.show_steps
for ii in range(2, 9):
if ii > len(idxs):
break
print("-" * 80)
print("ROUND", ii)
print("-" * 80)
all_vals = [2, 3, 4, 5, 6, 7, 8, 9, 10]
below, above = ap_split_trials(
idxs[:ii], vals[:ii], idxs[:ii], losses[:ii], gamma
)
below = below.astype("int")
above = above.astype("int")
print("BB0", below)
print("BB1", above)
# print 'BELOW', zip(range(100), np.bincount(below, minlength=11))
# print 'ABOVE', zip(range(100), np.bincount(above, minlength=11))
memo = {b_post: all_vals, s_below: below, s_above: above}
bl, al, nv = pyll.rec_eval([below_llik, above_llik, new_node], memo=memo)
# print bl - al
print("BB2", dict(list(zip(all_vals, bl - al))))
print("BB3", dict(list(zip(all_vals, bl))))
print("BB4", dict(list(zip(all_vals, al))))
print("ORIG PICKED", vals[ii])
print("PROPER OPT PICKS:", nv)
# assert np.allclose(below, [3, 3, 9])
# assert len(below) + len(above) == len(vals)
if do_show:
plt.subplot(8, 1, ii)
# plt.scatter(all_vals,
# np.bincount(below, minlength=11)[2:], c='b')
# plt.scatter(all_vals,
# np.bincount(above, minlength=11)[2:], c='c')
plt.scatter(all_vals, bl, c="g")
plt.scatter(all_vals, al, c="r")
if do_show:
plt.show()
def test4(self):
self.work(target=4, LEN=100)
def test2(self):
self.work(target=2, LEN=100)
def test6(self):
self.work(target=6, LEN=100)
def test10(self):
self.work(target=10, LEN=100)
|
[] |
[] |
[
"HYPEROPT_SHOW"
] |
[]
|
["HYPEROPT_SHOW"]
|
python
| 1 | 0 | |
cmd/readhistory/main.go
|
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"strings"
"github.com/ecc1/medtronic"
"github.com/ecc1/nightscout"
)
var (
verbose = flag.Bool("v", false, "print record details")
model = flag.Int("m", 523, "pump model")
nsFlag = flag.Bool("t", false, "format as Nightscout treatments")
timeBlank = strings.Repeat(" ", len(medtronic.UserTimeLayout))
)
func main() {
flag.Parse()
family := medtronic.Family(*model % 100)
for _, file := range flag.Args() {
f, err := os.Open(file)
if err != nil {
log.Fatal(err)
}
data, err := readBytes(f)
_ = f.Close()
if err != nil {
log.Fatal(err)
}
readHistory(data, family)
}
}
func readBytes(r io.Reader) ([]byte, error) {
var data []byte
for {
var b byte
n, err := fmt.Fscanf(r, "%02x", &b)
if n == 0 {
break
}
if err != nil {
return data, err
}
data = append(data, b)
}
return data, nil
}
func readHistory(data []byte, family medtronic.Family) {
records, err := medtronic.DecodeHistory(data, family)
if err != nil {
fmt.Printf("Error: %v\n", err)
*verbose = true
}
if *verbose {
fmt.Println(nightscout.JSON(records))
} else if *nsFlag {
medtronic.ReverseHistory(records)
fmt.Println(nightscout.JSON(medtronic.Treatments(records)))
} else {
for _, r := range records {
printRecord(r)
}
}
}
func printRecord(r medtronic.HistoryRecord) {
t := r.Time
tStr := timeBlank
if !t.IsZero() {
tStr = t.Format(medtronic.UserTimeLayout)
}
fmt.Printf("%s %v\n", tStr, r.Type())
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
piweatherrock/weather.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Jim Kemp <[email protected]>
# Copyright (c) 2017 Gene Liverman <[email protected]>
# Distributed under the MIT License (https://opensource.org/licenses/MIT)
# standard imports
import datetime
import os
import platform
import signal
import sys
import time
import json
import logging
import logging.handlers
# third party imports
import pygame
import requests
import locale
#now = datetime.datetime.now()
locale.setlocale(locale.LC_TIME, "")
url_holfuy = "http://api.holfuy.com/live/?s={s}&pw={pw}&&m=JSON&tu=C&su=knots&batt"
url_owm = "https://api.openweathermap.org/data/2.5/onecall?"
# globals
UNICODE_DEGREE = u'\xb0'
def exit_gracefully(signum, frame):
sys.exit(0)
signal.signal(signal.SIGTERM, exit_gracefully)
class Weather:
"""
Fetches weather reports from Dark Sky for displaying on a screen.
"""
def __init__(self, config_file):
with open(config_file, "r") as f:
self.config = json.load(f)
self.last_update_check = 0
self.weather = {}
self.wind = {}
self.get_forecast()
if platform.system() == 'Darwin':
pygame.display.init()
driver = pygame.display.get_driver()
print(f"Using the {driver} driver.")
else:
# Based on "Python GUI in Linux frame buffer"
# http://www.karoltomala.com/blog/?p=679
disp_no = os.getenv("DISPLAY")
if disp_no:
print(f"X Display = {disp_no}")
# Check which frame buffer drivers are available
# Start with fbcon since directfb hangs with composite output
drivers = ['x11', 'fbcon', 'directfb', 'svgalib']
found = False
for driver in drivers:
# Make sure that SDL_VIDEODRIVER is set
if not os.getenv('SDL_VIDEODRIVER'):
os.putenv('SDL_VIDEODRIVER', driver)
try:
pygame.display.init()
except pygame.error:
print("Driver: {driver} failed.")
continue
found = True
break
if not found:
print("No suitable video driver found!")
#size = (800,480)
size = (pygame.display.Info().current_w,
pygame.display.Info().current_h)
self.sizing(size)
# Clear the screen to start
self.screen.fill((0, 0, 0))
# Initialise font support
pygame.font.init()
# Render the screen
pygame.mouse.set_visible(0)
pygame.display.update()
self.subwindow_text_height = 0.059 # text hight meteo daily
self.time_date_text_height = 0.13 # text hight data
self.time_date_small_text_height = 0.072 # text hight wind
self.time_date_y_position = 8
self.time_date_small_y_position = 18
def __del__(self):
"Destructor to make sure pygame shuts down, etc."
def sizing(self, size):
"""
Set various asplect of the app related to the screen size of
the display and/or window.
"""
print(f"Framebuffer Size: {size[0]} x {size[1]}")
if self.config["fullscreen"]:
self.screen = pygame.display.set_mode(size, pygame.NOFRAME)
self.xmax = pygame.display.Info().current_w -5 # - 35 Why not use full screen in "fullescreen"?
self.ymax = pygame.display.Info().current_h # - 5 Why not use full screen in "fullescreen"?
else:
self.screen = pygame.display.set_mode(size, pygame.RESIZABLE)
pygame.display.set_caption('PiWeatherRock')
self.xmax = pygame.display.get_surface().get_width() - 15
self.ymax = pygame.display.get_surface().get_height() - 5
print(self.config["units"])
if self.xmax <= 1024:
self.icon_size = '128'
else:
self.icon_size = '256'
def get_forecast(self):
"""
Gets updated information if the 'update_freq' amount of time has
passed since last querying the api.
"""
if (time.time() - self.last_update_check) > self.config["update_freq"]:
self.last_update_check = time.time()
try:
querystring_owm = {
"lat": self.config["lat"],
"lon": self.config["lon"],
"units": self.config["units"],
"apikey": self.config["ds_api_key"],
"exclude": "minutely"
}
self.weather = requests.request("GET", url_owm, params=querystring_owm).json()
if self.config["holfuy_api_key"] == "null":
url = 'https://www.windguru.cz/int/iapi.php?q=station_data_current&id_station={s}&date_format=Y-m-d%20H%3Ai%3As%20T&_mha=f4d18b6c'.format(s=self.config["id_station"])
url_h = 'https://www.windguru.cz/station/{s}'.format(s=self.config["id_station"])
headers = {'Referer' : url_h}
self.wind = requests.get(url, headers = headers).json()
else:
querystring_h = {
"s": self.config["id_station"],
"pw": self.config["holfuy_api_key"]
}
self.wind = requests.request("GET", url_holfuy, params=querystring_h).json()
except requests.exceptions.RequestException as e:
print(f"Request exception: {e}")
return False
except AttributeError as e:
print(f"Attribute error: {e}")
return False
return True
def screen_cap(self):
"""
Save a jpg image of the screen
"""
pygame.image.save(self.screen, "screenshot.jpeg")
print("Screen capture complete.")
|
[] |
[] |
[
"SDL_VIDEODRIVER",
"DISPLAY"
] |
[]
|
["SDL_VIDEODRIVER", "DISPLAY"]
|
python
| 2 | 0 | |
Assets/main.py
|
# Assets 1.0
# Called from PubSub Topic
# Create CRON schedule to send a PubSub to call the Function to refresh the asset inventory
# Use GCS function template to read from GCS into HEC
import os
import time
def hello_pubsub(event, context):
from google.cloud import asset_v1
parent_id = os.environ['PARENT']
dump_file_path = os.environ['GCS_FILE_PATH']
now = time.time()
client = asset_v1.AssetServiceClient()
output_config = asset_v1.OutputConfig()
output_config.gcs_destination.uri = dump_file_path+str(now)+".json"
content_type = asset_v1.ContentType.RESOURCE
response = client.export_assets(
request={
"parent": parent_id,
"content_type": content_type,
"output_config": output_config
}
)
|
[] |
[] |
[
"PARENT",
"GCS_FILE_PATH"
] |
[]
|
["PARENT", "GCS_FILE_PATH"]
|
python
| 2 | 0 | |
test/conformance/tests/init.go
|
package tests
import (
"os"
testclient "github.com/k8snetworkplumbingwg/sriov-network-operator/test/util/client"
)
var (
clients *testclient.ClientSet
operatorNamespace string
)
func init() {
operatorNamespace = os.Getenv("OPERATOR_NAMESPACE")
if operatorNamespace == "" {
operatorNamespace = "openshift-sriov-network-operator"
}
clients = testclient.New("")
}
|
[
"\"OPERATOR_NAMESPACE\""
] |
[] |
[
"OPERATOR_NAMESPACE"
] |
[]
|
["OPERATOR_NAMESPACE"]
|
go
| 1 | 0 | |
posthog/utils.py
|
import base64
import dataclasses
import datetime
import datetime as dt
import gzip
import hashlib
import json
import os
import re
import shutil
import subprocess
import sys
import time
import uuid
from enum import Enum
from itertools import count
from typing import (
Any,
Dict,
Generator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from urllib.parse import urljoin, urlparse
import lzstring
import pytz
from dateutil import parser
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.core.cache import cache
from django.db.models.query import QuerySet
from django.db.utils import DatabaseError
from django.http import HttpRequest, HttpResponse
from django.template.loader import get_template
from django.utils import timezone
from rest_framework.request import Request
from sentry_sdk import configure_scope
from posthog.constants import AnalyticsDBMS, AvailableFeature
from posthog.exceptions import RequestParsingError
from posthog.redis import get_client
DATERANGE_MAP = {
"minute": datetime.timedelta(minutes=1),
"hour": datetime.timedelta(hours=1),
"day": datetime.timedelta(days=1),
"week": datetime.timedelta(weeks=1),
"month": datetime.timedelta(days=31),
}
ANONYMOUS_REGEX = r"^([a-z0-9]+\-){4}([a-z0-9]+)$"
# https://stackoverflow.com/questions/4060221/how-to-reliably-open-a-file-in-the-same-directory-as-a-python-script
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def format_label_date(date: datetime.datetime, interval: str) -> str:
labels_format = "%-d-%b-%Y"
if interval == "hour" or interval == "minute":
labels_format += " %H:%M"
return date.strftime(labels_format)
def absolute_uri(url: Optional[str] = None) -> str:
"""
Returns an absolutely-formatted URL based on the `SITE_URL` config.
"""
if not url:
return settings.SITE_URL
return urljoin(settings.SITE_URL.rstrip("/") + "/", url.lstrip("/"))
def get_previous_week(at: Optional[datetime.datetime] = None) -> Tuple[datetime.datetime, datetime.datetime]:
"""
Returns a pair of datetimes, representing the start and end of the week preceding to the passed date's week.
`at` is the datetime to use as a reference point.
"""
if not at:
at = timezone.now()
period_end: datetime.datetime = datetime.datetime.combine(
at - datetime.timedelta(timezone.now().weekday() + 1), datetime.time.max, tzinfo=pytz.UTC,
) # very end of the previous Sunday
period_start: datetime.datetime = datetime.datetime.combine(
period_end - datetime.timedelta(6), datetime.time.min, tzinfo=pytz.UTC,
) # very start of the previous Monday
return (period_start, period_end)
def get_previous_day(at: Optional[datetime.datetime] = None) -> Tuple[datetime.datetime, datetime.datetime]:
"""
Returns a pair of datetimes, representing the start and end of the preceding day.
`at` is the datetime to use as a reference point.
"""
if not at:
at = timezone.now()
period_end: datetime.datetime = datetime.datetime.combine(
at - datetime.timedelta(days=1), datetime.time.max, tzinfo=pytz.UTC,
) # very end of the previous day
period_start: datetime.datetime = datetime.datetime.combine(
period_end, datetime.time.min, tzinfo=pytz.UTC,
) # very start of the previous day
return (period_start, period_end)
def relative_date_parse(input: str) -> datetime.datetime:
try:
return datetime.datetime.strptime(input, "%Y-%m-%d").replace(tzinfo=pytz.UTC)
except ValueError:
pass
# when input also contains the time for intervals "hour" and "minute"
# the above try fails. Try one more time from isoformat.
try:
return parser.isoparse(input).replace(tzinfo=pytz.UTC)
except ValueError:
pass
regex = r"\-?(?P<number>[0-9]+)?(?P<type>[a-z])(?P<position>Start|End)?"
match = re.search(regex, input)
date = timezone.now()
if not match:
return date
if match.group("type") == "h":
date -= relativedelta(hours=int(match.group("number")))
return date.replace(minute=0, second=0, microsecond=0)
elif match.group("type") == "d":
if match.group("number"):
date -= relativedelta(days=int(match.group("number")))
elif match.group("type") == "w":
if match.group("number"):
date -= relativedelta(weeks=int(match.group("number")))
elif match.group("type") == "m":
if match.group("number"):
date -= relativedelta(months=int(match.group("number")))
if match.group("position") == "Start":
date -= relativedelta(day=1)
if match.group("position") == "End":
date -= relativedelta(day=31)
elif match.group("type") == "y":
if match.group("number"):
date -= relativedelta(years=int(match.group("number")))
if match.group("position") == "Start":
date -= relativedelta(month=1, day=1)
if match.group("position") == "End":
date -= relativedelta(month=12, day=31)
return date.replace(hour=0, minute=0, second=0, microsecond=0)
def request_to_date_query(filters: Dict[str, Any], exact: Optional[bool]) -> Dict[str, datetime.datetime]:
if filters.get("date_from"):
date_from: Optional[datetime.datetime] = relative_date_parse(filters["date_from"])
if filters["date_from"] == "all":
date_from = None
else:
date_from = datetime.datetime.today() - relativedelta(days=7)
date_from = date_from.replace(hour=0, minute=0, second=0, microsecond=0)
date_to = None
if filters.get("date_to"):
date_to = relative_date_parse(filters["date_to"])
resp = {}
if date_from:
resp["timestamp__gte"] = date_from.replace(tzinfo=pytz.UTC)
if date_to:
days = 1 if not exact else 0
resp["timestamp__lte"] = (date_to + relativedelta(days=days)).replace(tzinfo=pytz.UTC)
return resp
def get_git_branch() -> Optional[str]:
"""
Returns the symbolic name of the current active branch. Will return None in case of failure.
Example: get_git_branch()
=> "master"
"""
try:
return (
subprocess.check_output(["git", "rev-parse", "--symbolic-full-name", "--abbrev-ref", "HEAD"])
.decode("utf-8")
.strip()
)
except Exception:
return None
def get_git_commit() -> Optional[str]:
"""
Returns the short hash of the last commit.
Example: get_git_commit()
=> "4ff54c8d"
"""
try:
return subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).decode("utf-8").strip()
except Exception:
return None
def render_template(template_name: str, request: HttpRequest, context: Dict = {}) -> HttpResponse:
from loginas.utils import is_impersonated_session
template = get_template(template_name)
context["opt_out_capture"] = os.getenv("OPT_OUT_CAPTURE", False) or is_impersonated_session(request)
context["self_capture"] = settings.SELF_CAPTURE
if os.environ.get("SENTRY_DSN"):
context["sentry_dsn"] = os.environ["SENTRY_DSN"]
if settings.DEBUG and not settings.TEST:
context["debug"] = True
context["git_rev"] = get_git_commit()
context["git_branch"] = get_git_branch()
if settings.E2E_TESTING:
context["e2e_testing"] = True
if settings.SELF_CAPTURE:
api_token = get_self_capture_api_token(request)
if api_token:
context["js_posthog_api_key"] = f"'{api_token}'"
context["js_posthog_host"] = "window.location.origin"
else:
context["js_posthog_api_key"] = "'sTMFPsFhdP1Ssg'"
context["js_posthog_host"] = "'https://app.posthog.com'"
context["js_capture_internal_metrics"] = settings.CAPTURE_INTERNAL_METRICS
context["js_url"] = settings.JS_URL
posthog_app_context: Dict[str, Any] = {
"persisted_feature_flags": settings.PERSISTED_FEATURE_FLAGS,
}
# Set the frontend app context
if not request.GET.get("no-preloaded-app-context"):
from posthog.api.team import TeamSerializer
from posthog.api.user import User, UserSerializer
from posthog.views import preflight_check
posthog_app_context = {
"current_user": None,
"current_team": None,
"preflight": json.loads(preflight_check(request).getvalue()),
"default_event_name": get_default_event_name(),
**posthog_app_context,
}
if request.user.pk:
user_serialized = UserSerializer(request.user, context={"request": request}, many=False)
posthog_app_context["current_user"] = user_serialized.data
team = cast(User, request.user).team
if team:
team_serialized = TeamSerializer(team, context={"request": request}, many=False)
posthog_app_context["current_team"] = team_serialized.data
context["posthog_app_context"] = json.dumps(posthog_app_context, default=json_uuid_convert)
html = template.render(context, request=request)
return HttpResponse(html)
def get_self_capture_api_token(request: HttpRequest) -> Optional[str]:
from posthog.models import Team
# Get the current user's team (or first team in the instance) to set self capture configs
team: Optional[Team] = None
try:
team = request.user.team # type: ignore
except (Team.DoesNotExist, AttributeError):
team = Team.objects.only("api_token").first()
if team:
return team.api_token
return None
def get_default_event_name():
from posthog.models import EventDefinition
if EventDefinition.objects.filter(name="$pageview").exists():
return "$pageview"
elif EventDefinition.objects.filter(name="$screen").exists():
return "$screen"
return "$pageview"
def json_uuid_convert(o):
if isinstance(o, uuid.UUID):
return str(o)
def friendly_time(seconds: float):
minutes, seconds = divmod(seconds, 60.0)
hours, minutes = divmod(minutes, 60.0)
return "{hours}{minutes}{seconds}".format(
hours=f"{int(hours)} hours " if hours > 0 else "",
minutes=f"{int(minutes)} minutes " if minutes > 0 else "",
seconds=f"{int(seconds)} seconds" if seconds > 0 or (minutes == 0 and hours == 0) else "",
).strip()
def append_data(dates_filled: List, interval=None, math="sum") -> Dict[str, Any]:
append: Dict[str, Any] = {}
append["data"] = []
append["labels"] = []
append["days"] = []
days_format = "%Y-%m-%d"
if interval == "hour" or interval == "minute":
days_format += " %H:%M:%S"
for item in dates_filled:
date = item[0]
value = item[1]
append["days"].append(date.strftime(days_format))
append["labels"].append(format_label_date(date, interval))
append["data"].append(value)
if math == "sum":
append["count"] = sum(append["data"])
return append
def get_ip_address(request: HttpRequest) -> str:
"""use requestobject to fetch client machine's IP Address"""
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR") # Real IP address of client Machine
return ip
def dict_from_cursor_fetchall(cursor):
columns = [col[0] for col in cursor.description]
return [dict(zip(columns, row)) for row in cursor.fetchall()]
def convert_property_value(input: Union[str, bool, dict, list, int]) -> str:
if isinstance(input, bool):
if input is True:
return "true"
return "false"
if isinstance(input, dict) or isinstance(input, list):
return json.dumps(input, sort_keys=True)
return str(input)
def get_compare_period_dates(
date_from: datetime.datetime, date_to: datetime.datetime,
) -> Tuple[datetime.datetime, datetime.datetime]:
new_date_to = date_from
diff = date_to - date_from
new_date_from = date_from - diff
return new_date_from, new_date_to
def cors_response(request, response):
if not request.META.get("HTTP_ORIGIN"):
return response
url = urlparse(request.META["HTTP_ORIGIN"])
response["Access-Control-Allow-Origin"] = f"{url.scheme}://{url.netloc}"
response["Access-Control-Allow-Credentials"] = "true"
response["Access-Control-Allow-Methods"] = "GET, POST, OPTIONS"
response["Access-Control-Allow-Headers"] = "X-Requested-With"
return response
def generate_cache_key(stringified: str) -> str:
return "cache_" + hashlib.md5(stringified.encode("utf-8")).hexdigest()
def get_celery_heartbeat() -> Union[str, int]:
last_heartbeat = get_client().get("POSTHOG_HEARTBEAT")
worker_heartbeat = int(time.time()) - int(last_heartbeat) if last_heartbeat else -1
if 0 <= worker_heartbeat < 300:
return worker_heartbeat
return "offline"
def base64_decode(data):
"""
Decodes base64 bytes into string taking into account necessary transformations to match client libraries.
"""
if not isinstance(data, str):
data = data.decode()
data = base64.b64decode(data.replace(" ", "+") + "===")
return data.decode("utf8", "surrogatepass").encode("utf-16", "surrogatepass")
# Used by non-DRF endpoins from capture.py and decide.py (/decide, /batch, /capture, etc)
def load_data_from_request(request):
data = None
if request.method == "POST":
if request.content_type in ["", "text/plain", "application/json"]:
data = request.body
else:
data = request.POST.get("data")
else:
data = request.GET.get("data")
if not data:
return None
# add the data in sentry's scope in case there's an exception
with configure_scope() as scope:
scope.set_context("data", data)
scope.set_tag("origin", request.META.get("REMOTE_HOST", "unknown"))
scope.set_tag("referer", request.META.get("HTTP_REFERER", "unknown"))
compression = (
request.GET.get("compression") or request.POST.get("compression") or request.headers.get("content-encoding", "")
)
compression = compression.lower()
if compression == "gzip" or compression == "gzip-js":
if data == b"undefined":
raise RequestParsingError(
"data being loaded from the request body for decompression is the literal string 'undefined'"
)
try:
data = gzip.decompress(data)
except (EOFError, OSError) as error:
raise RequestParsingError("Failed to decompress data. %s" % (str(error)))
if compression == "lz64":
if not isinstance(data, str):
data = data.decode()
data = data.replace(" ", "+")
data = lzstring.LZString().decompressFromBase64(data)
if not data:
raise RequestParsingError("Failed to decompress data.")
data = data.encode("utf-16", "surrogatepass").decode("utf-16")
base64_decoded = None
try:
base64_decoded = base64_decode(data)
except Exception:
pass
if base64_decoded:
data = base64_decoded
try:
# parse_constant gets called in case of NaN, Infinity etc
# default behaviour is to put those into the DB directly
# but we just want it to return None
data = json.loads(data, parse_constant=lambda x: None)
except (json.JSONDecodeError, UnicodeDecodeError) as error_main:
raise RequestParsingError("Invalid JSON: %s" % (str(error_main)))
# TODO: data can also be an array, function assumes it's either None or a dictionary.
return data
class SingletonDecorator:
def __init__(self, klass):
self.klass = klass
self.instance = None
def __call__(self, *args, **kwds):
if self.instance is None:
self.instance = self.klass(*args, **kwds)
return self.instance
def get_machine_id() -> str:
"""A MAC address-dependent ID. Useful for PostHog instance analytics."""
# MAC addresses are 6 bits long, so overflow shouldn't happen
# hashing here as we don't care about the actual address, just it being rather consistent
return hashlib.md5(uuid.getnode().to_bytes(6, "little")).hexdigest()
def get_table_size(table_name) -> str:
from django.db import connection
query = (
f'SELECT pg_size_pretty(pg_total_relation_size(relid)) AS "size" '
f"FROM pg_catalog.pg_statio_user_tables "
f"WHERE relname = '{table_name}'"
)
cursor = connection.cursor()
cursor.execute(query)
return dict_from_cursor_fetchall(cursor)[0]["size"]
def get_table_approx_count(table_name) -> str:
from django.db import connection
query = f"SELECT reltuples::BIGINT as \"approx_count\" FROM pg_class WHERE relname = '{table_name}'"
cursor = connection.cursor()
cursor.execute(query)
return compact_number(dict_from_cursor_fetchall(cursor)[0]["approx_count"])
def compact_number(value: Union[int, float]) -> str:
"""Return a number in a compact format, with a SI suffix if applicable.
Client-side equivalent: utils.tsx#compactNumber.
"""
value = float(f"{value:.3g}")
magnitude = 0
while abs(value) >= 1000:
magnitude += 1
value /= 1000.0
return f"{value:f}".rstrip("0").rstrip(".") + ["", "K", "M", "B", "T", "P", "E", "Z", "Y"][magnitude]
def is_postgres_alive() -> bool:
from posthog.models import User
try:
User.objects.count()
return True
except DatabaseError:
return False
def is_redis_alive() -> bool:
try:
get_redis_info()
return True
except BaseException:
return False
def is_celery_alive() -> bool:
try:
return get_celery_heartbeat() != "offline"
except BaseException:
return False
def is_plugin_server_alive() -> bool:
try:
ping = get_client().get("@posthog-plugin-server/ping")
return bool(ping and parser.isoparse(ping) > timezone.now() - relativedelta(seconds=30))
except BaseException:
return False
def get_plugin_server_version() -> Optional[str]:
cache_key_value = get_client().get("@posthog-plugin-server/version")
if cache_key_value:
return cache_key_value.decode("utf-8")
return None
def get_plugin_server_job_queues() -> Optional[List[str]]:
cache_key_value = get_client().get("@posthog-plugin-server/enabled-job-queues")
if cache_key_value:
qs = cache_key_value.decode("utf-8").replace('"', "")
return qs.split(",")
return None
def get_redis_info() -> Mapping[str, Any]:
return get_client().info()
def get_redis_queue_depth() -> int:
return get_client().llen("celery")
def queryset_to_named_query(qs: QuerySet, prepend: str = "") -> Tuple[str, dict]:
raw, params = qs.query.sql_with_params()
arg_count = 0
counter = count(arg_count)
new_string = re.sub(r"%s", lambda _: f"%({prepend}_arg_{str(next(counter))})s", raw)
named_params = {}
for idx, param in enumerate(params):
named_params.update({f"{prepend}_arg_{idx}": param})
return new_string, named_params
def is_clickhouse_enabled() -> bool:
return settings.EE_AVAILABLE and settings.PRIMARY_DB == AnalyticsDBMS.CLICKHOUSE
def get_instance_realm() -> str:
"""
Returns the realm for the current instance. `cloud` or `hosted` or `hosted-clickhouse`.
"""
if settings.MULTI_TENANCY:
return "cloud"
elif is_clickhouse_enabled():
return "hosted-clickhouse"
else:
return "hosted"
def get_can_create_org() -> bool:
"""Returns whether a new organization can be created in the current instance.
Organizations can be created only in the following cases:
- if on PostHog Cloud
- if running end-to-end tests
- if there's no organization yet
- if an appropriate license is active and MULTI_ORG_ENABLED is True
"""
from posthog.models.organization import Organization
if (
settings.MULTI_TENANCY
or settings.E2E_TESTING
or not Organization.objects.filter(for_internal_metrics=False).exists()
):
return True
if settings.MULTI_ORG_ENABLED:
try:
from ee.models.license import License
except ImportError:
pass
else:
license = License.objects.first_valid()
if license is not None and AvailableFeature.ZAPIER in license.available_features:
return True
else:
print_warning(["You have configured MULTI_ORG_ENABLED, but not the required premium PostHog plan!"])
return False
def get_available_social_auth_providers() -> Dict[str, bool]:
output: Dict[str, bool] = {
"github": bool(settings.SOCIAL_AUTH_GITHUB_KEY and settings.SOCIAL_AUTH_GITHUB_SECRET),
"gitlab": bool(settings.SOCIAL_AUTH_GITLAB_KEY and settings.SOCIAL_AUTH_GITLAB_SECRET),
"google-oauth2": False,
"saml": False,
}
# Get license information
bypass_license: bool = settings.MULTI_TENANCY
license = None
try:
from ee.models.license import License
except ImportError:
pass
else:
license = License.objects.first_valid()
if getattr(settings, "SOCIAL_AUTH_GOOGLE_OAUTH2_KEY", None) and getattr(
settings, "SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET", None,
):
if bypass_license or (license is not None and AvailableFeature.GOOGLE_LOGIN in license.available_features):
output["google-oauth2"] = True
else:
print_warning(["You have Google login set up, but not the required license!"])
if getattr(settings, "SAML_CONFIGURED", None):
if bypass_license or (license is not None and AvailableFeature.SAML in license.available_features):
output["saml"] = True
else:
print_warning(["You have SAML set up, but not the required license!"])
return output
def flatten(l: Union[List, Tuple]) -> Generator:
for el in l:
if isinstance(el, list):
yield from flatten(el)
else:
yield el
def get_daterange(
start_date: Optional[datetime.datetime], end_date: Optional[datetime.datetime], frequency: str
) -> List[Any]:
"""
Returns list of a fixed frequency Datetime objects between given bounds.
Parameters:
start_date: Left bound for generating dates.
end_date: Right bound for generating dates.
frequency: Possible options => minute, hour, day, week, month
"""
delta = DATERANGE_MAP[frequency]
if not start_date or not end_date:
return []
time_range = []
if frequency != "minute" and frequency != "hour":
start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0)
end_date = end_date.replace(hour=0, minute=0, second=0, microsecond=0)
if frequency == "week":
start_date -= datetime.timedelta(days=(start_date.weekday() + 1) % 7)
if frequency != "month":
while start_date <= end_date:
time_range.append(start_date)
start_date += delta
else:
if start_date.day != 1:
start_date = (start_date.replace(day=1)).replace(day=1)
while start_date <= end_date:
time_range.append(start_date)
start_date = (start_date.replace(day=1) + delta).replace(day=1)
return time_range
def get_safe_cache(cache_key: str):
try:
cached_result = cache.get(cache_key) # cache.get is safe in most cases
return cached_result
except Exception: # if it errors out, the cache is probably corrupted
try:
cache.delete(cache_key) # in that case, try to delete the cache
except Exception:
pass
return None
def is_anonymous_id(distinct_id: str) -> bool:
# Our anonymous ids are _not_ uuids, but a random collection of strings
return bool(re.match(ANONYMOUS_REGEX, distinct_id))
def mask_email_address(email_address: str) -> str:
"""
Grabs an email address and returns it masked in a human-friendly way to protect PII.
Example: [email protected] -> t********[email protected]
"""
index = email_address.find("@")
if index == -1:
raise ValueError("Please provide a valid email address.")
if index == 1:
# Username is one letter, mask it differently
return f"*{email_address[index:]}"
return f"{email_address[0]}{'*' * (index - 2)}{email_address[index-1:]}"
def is_valid_regex(value: Any) -> bool:
try:
re.compile(value)
return True
except re.error:
return False
def get_absolute_path(to: str) -> str:
"""
Returns an absolute path in the FS based on posthog/posthog (back-end root folder)
"""
return os.path.join(__location__, to)
class GenericEmails:
"""
List of generic emails that we don't want to use to filter out test accounts.
"""
def __init__(self):
with open(get_absolute_path("helpers/generic_emails.txt"), "r") as f:
self.emails = {x.rstrip(): True for x in f}
def is_generic(self, email: str) -> bool:
at_location = email.find("@")
if at_location == -1:
return False
return self.emails.get(email[(at_location + 1) :], False)
def get_available_timezones_with_offsets() -> Dict[str, float]:
now = dt.datetime.now()
result = {}
for tz in pytz.common_timezones:
try:
offset = pytz.timezone(tz).utcoffset(now)
except Exception:
offset = pytz.timezone(tz).utcoffset(now + dt.timedelta(hours=2))
if offset is None:
continue
offset_hours = int(offset.total_seconds()) / 3600
result[tz] = offset_hours
return result
def should_refresh(request: Request) -> bool:
key = "refresh"
return (request.query_params.get(key, "") or request.GET.get(key, "")).lower() == "true" or request.data.get(
key, False
) == True
def str_to_bool(value: Any) -> bool:
"""Return whether the provided string (or any value really) represents true. Otherwise false.
Just like plugin server stringToBoolean.
"""
if not value:
return False
return str(value).lower() in ("y", "yes", "t", "true", "on", "1")
def print_warning(warning_lines: Sequence[str]):
highlight_length = min(max(map(len, warning_lines)) // 2, shutil.get_terminal_size().columns)
print(
"\n".join(("", "🔻" * highlight_length, *warning_lines, "🔺" * highlight_length, "",)), file=sys.stderr,
)
def get_helm_info_env() -> dict:
try:
return json.loads(os.getenv("HELM_INSTALL_INFO", "{}"))
except Exception:
return {}
def format_query_params_absolute_url(
request: Request,
offset: Optional[int] = None,
limit: Optional[int] = None,
offset_alias: Optional[str] = "offset",
limit_alias: Optional[str] = "limit",
) -> Optional[str]:
OFFSET_REGEX = re.compile(fr"([&?]{offset_alias}=)(\d+)")
LIMIT_REGEX = re.compile(fr"([&?]{limit_alias}=)(\d+)")
url_to_format = request.get_raw_uri()
if not url_to_format:
return None
if offset:
if OFFSET_REGEX.search(url_to_format):
url_to_format = OFFSET_REGEX.sub(fr"\g<1>{offset}", url_to_format)
else:
url_to_format = url_to_format + ("&" if "?" in url_to_format else "?") + f"{offset_alias}={offset}"
if limit:
if LIMIT_REGEX.search(url_to_format):
url_to_format = LIMIT_REGEX.sub(fr"\g<1>{limit}", url_to_format)
else:
url_to_format = url_to_format + ("&" if "?" in url_to_format else "?") + f"{limit_alias}={limit}"
return url_to_format
def get_milliseconds_between_dates(d1: dt.datetime, d2: dt.datetime) -> int:
return abs(int((d1 - d2).total_seconds() * 1000))
def encode_get_request_params(data: Dict[str, Any]) -> Dict[str, str]:
return {
key: encode_value_as_param(value=value)
for key, value in data.items()
# NOTE: we cannot encode `None` as a GET parameter, so we simply omit it
if value is not None
}
class DataclassJSONEncoder(json.JSONEncoder):
def default(self, o):
if dataclasses.is_dataclass(o):
return dataclasses.asdict(o)
return super().default(o)
def encode_value_as_param(value: Union[str, list, dict]) -> str:
if isinstance(value, (list, dict)):
return json.dumps(value, cls=DataclassJSONEncoder)
elif isinstance(value, Enum):
return value.value
else:
return value
|
[] |
[] |
[
"HELM_INSTALL_INFO",
"SENTRY_DSN",
"OPT_OUT_CAPTURE"
] |
[]
|
["HELM_INSTALL_INFO", "SENTRY_DSN", "OPT_OUT_CAPTURE"]
|
python
| 3 | 0 | |
column/string_string_lc_test.go
|
package column_test
import (
"context"
"fmt"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/vahid-sohrabloo/chconn"
"github.com/vahid-sohrabloo/chconn/column"
)
func TestSringStringLC(t *testing.T) {
t.Parallel()
connString := os.Getenv("CHX_TEST_TCP_CONN_STRING")
conn, err := chconn.Connect(context.Background(), connString)
require.NoError(t, err)
res, err := conn.Exec(context.Background(), `DROP TABLE IF EXISTS test_lc_string`)
require.NoError(t, err)
require.Nil(t, res)
res, err = conn.Exec(context.Background(), `CREATE TABLE test_lc_string (
string_lc LowCardinality(String),
string_lc_nullable LowCardinality(Nullable(String)),
string_lc_array Array(LowCardinality(String)),
string_lc_array_nullable Array(LowCardinality(Nullable(String)))
) Engine=Memory`)
require.NoError(t, err)
require.Nil(t, res)
col := column.NewString(false)
colLC := column.NewLC(col)
colNil := column.NewString(true)
colNilLC := column.NewLC(colNil)
colArrayValues := column.NewString(false)
collArrayLC := column.NewLC(colArrayValues)
colArray := column.NewArray(collArrayLC)
colArrayValuesNil := column.NewString(true)
collArrayLCNil := column.NewLC(colArrayValuesNil)
colArrayNil := column.NewArray(collArrayLCNil)
var colInsert []string
var colInsertArray [][]string
var colInsertArrayNil [][]*string
var colNilInsert []*string
rows := 10
for i := 1; i <= rows; i++ {
val := fmt.Sprintf("%d", i)
valArray := []string{val, fmt.Sprintf("%d", i+1)}
valArrayNil := []*string{&val, nil}
col.AppendStringDict(val)
colInsert = append(colInsert, val)
// // example insert array
colInsertArray = append(colInsertArray, valArray)
colArray.AppendLen(len(valArray))
for _, v := range valArray {
colArrayValues.AppendStringDict(v)
}
// example insert nullable array
colInsertArrayNil = append(colInsertArrayNil, valArrayNil)
colArrayNil.AppendLen(len(valArrayNil))
for _, v := range valArrayNil {
colArrayValuesNil.AppendStringDictP(v)
}
// example add nullable
if i%2 == 0 {
colNilInsert = append(colNilInsert, &val)
if i <= rows/2 {
// example to add by pointer
colNil.AppendStringDictP(&val)
} else {
// example to without pointer
colNil.AppendStringDict(val)
}
} else {
colNilInsert = append(colNilInsert, nil)
if i <= rows/2 {
// example to add by pointer
colNil.AppendDictP(nil)
} else {
// example to add without pointer
colNil.AppendDictNil()
}
}
}
err = conn.Insert(context.Background(), `INSERT INTO
test_lc_string(string_lc,string_lc_nullable,string_lc_array,string_lc_array_nullable)
VALUES`,
colLC,
colNilLC,
colArray,
colArrayNil,
)
require.NoError(t, err)
// example read all
selectStmt, err := conn.Select(context.Background(), `SELECT
string_lc,string_lc_nullable,string_lc_array,string_lc_array_nullable
FROM test_lc_string`)
require.NoError(t, err)
require.True(t, conn.IsBusy())
colRead := column.NewString(false)
colLCRead := column.NewLC(colRead)
colNilRead := column.NewString(true)
colNilLCRead := column.NewLC(colNilRead)
colArrayReadData := column.NewString(false)
colArrayLCRead := column.NewLC(colArrayReadData)
colArrayRead := column.NewArray(colArrayLCRead)
colArrayReadDataNil := column.NewString(true)
colArrayLCReadNil := column.NewLC(colArrayReadDataNil)
colArrayReadNil := column.NewArray(colArrayLCReadNil)
var colDataDict []string
var colDataKeys []int
var colData []string
var colNilDataDict []string
var colNilDataKeys []int
var colNilData []*string
var colArrayDataDict []string
var colArrayData [][]string
var colArrayDataDictNil []string
var colArrayDataNil [][]*string
var colArrayLens []int
for selectStmt.Next() {
err = selectStmt.ReadColumns(colLCRead, colNilLCRead, colArrayRead, colArrayReadNil)
require.NoError(t, err)
colRead.ReadAllString(&colDataDict)
colLCRead.ReadAll(&colDataKeys)
for _, k := range colDataKeys {
colData = append(colData, colDataDict[k])
}
colNilRead.ReadAllString(&colNilDataDict)
colNilLCRead.ReadAll(&colNilDataKeys)
for _, k := range colNilDataKeys {
// 0 means nil
if k == 0 {
colNilData = append(colNilData, nil)
} else {
colNilData = append(colNilData, &colNilDataDict[k])
}
}
// read array
colArrayLens = colArrayLens[:0]
colArrayRead.ReadAll(&colArrayLens)
colArrayReadData.ReadAllString(&colArrayDataDict)
for _, l := range colArrayLens {
arr := make([]int, l)
arrData := make([]string, l)
colArrayLCRead.Fill(arr)
for i, k := range arr {
arrData[i] = colArrayDataDict[k]
}
colArrayData = append(colArrayData, arrData)
}
// read array nil
colArrayLens = colArrayLens[:0]
colArrayReadNil.ReadAll(&colArrayLens)
colArrayReadDataNil.ReadAllString(&colArrayDataDictNil)
for _, l := range colArrayLens {
arr := make([]int, l)
arrData := make([]*string, l)
colArrayLCReadNil.Fill(arr)
for i, k := range arr {
// 0 means nil
if k == 0 {
arrData[i] = nil
} else {
arrData[i] = &colArrayDataDictNil[k]
}
}
colArrayDataNil = append(colArrayDataNil, arrData)
}
}
require.NoError(t, selectStmt.Err())
assert.Equal(t, colInsert, colData)
assert.Equal(t, colNilInsert, colNilData)
assert.Equal(t, colInsertArray, colArrayData)
assert.Equal(t, colInsertArrayNil, colArrayDataNil)
selectStmt.Close()
// example one by one
selectStmt, err = conn.Select(context.Background(), `SELECT
string_lc,string_lc_nullable
FROM test_lc_string`)
require.NoError(t, err)
require.True(t, conn.IsBusy())
colRead = column.NewString(false)
colLCRead = column.NewLC(colRead)
colNilRead = column.NewString(true)
colNilLCRead = column.NewLC(colNilRead)
colDataDict = colDataDict[:0]
colData = colData[:0]
colNilDataDict = colNilDataDict[:0]
colNilData = colNilData[:0]
for selectStmt.Next() {
err = selectStmt.ReadColumns(colLCRead, colNilLCRead)
require.NoError(t, err)
colRead.ReadAllString(&colDataDict)
for colLCRead.Next() {
colData = append(colData, colDataDict[colLCRead.Value()])
}
colNilRead.ReadAllString(&colNilDataDict)
for colNilLCRead.Next() {
k := colNilLCRead.Value()
// 0 means nil
if k == 0 {
colNilData = append(colNilData, nil)
} else {
colNilData = append(colNilData, &colNilDataDict[k])
}
}
}
require.NoError(t, selectStmt.Err())
selectStmt.Close()
assert.Equal(t, colInsert, colData)
assert.Equal(t, colNilInsert, colNilData)
conn.RawConn().Close()
}
|
[
"\"CHX_TEST_TCP_CONN_STRING\""
] |
[] |
[
"CHX_TEST_TCP_CONN_STRING"
] |
[]
|
["CHX_TEST_TCP_CONN_STRING"]
|
go
| 1 | 0 | |
utils/file_backend_test.go
|
// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package utils
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/mattermost/mattermost-server/mlog"
"github.com/mattermost/mattermost-server/model"
)
type FileBackendTestSuite struct {
suite.Suite
settings model.FileSettings
backend FileBackend
}
func TestLocalFileBackendTestSuite(t *testing.T) {
// Setup a global logger to catch tests logging outside of app context
// The global logger will be stomped by apps initalizing but that's fine for testing. Ideally this won't happen.
mlog.InitGlobalLogger(mlog.NewLogger(&mlog.LoggerConfiguration{
EnableConsole: true,
ConsoleJson: true,
ConsoleLevel: "error",
EnableFile: false,
}))
dir, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(dir)
suite.Run(t, &FileBackendTestSuite{
settings: model.FileSettings{
DriverName: model.NewString(model.IMAGE_DRIVER_LOCAL),
Directory: dir,
},
})
}
func TestS3FileBackendTestSuite(t *testing.T) {
runBackendTest(t, false)
}
func TestS3FileBackendTestSuiteWithEncryption(t *testing.T) {
runBackendTest(t, true)
}
func runBackendTest(t *testing.T, encrypt bool) {
s3Host := os.Getenv("CI_HOST")
if s3Host == "" {
s3Host = "dockerhost"
}
s3Port := os.Getenv("CI_MINIO_PORT")
if s3Port == "" {
s3Port = "9001"
}
s3Endpoint := fmt.Sprintf("%s:%s", s3Host, s3Port)
suite.Run(t, &FileBackendTestSuite{
settings: model.FileSettings{
DriverName: model.NewString(model.IMAGE_DRIVER_S3),
AmazonS3AccessKeyId: model.MINIO_ACCESS_KEY,
AmazonS3SecretAccessKey: model.MINIO_SECRET_KEY,
AmazonS3Bucket: model.MINIO_BUCKET,
AmazonS3Endpoint: s3Endpoint,
AmazonS3SSL: model.NewBool(false),
AmazonS3SSE: model.NewBool(encrypt),
},
})
}
func (s *FileBackendTestSuite) SetupTest() {
TranslationsPreInit()
backend, err := NewFileBackend(&s.settings, true)
require.Nil(s.T(), err)
s.backend = backend
}
func (s *FileBackendTestSuite) TestConnection() {
s.Nil(s.backend.TestConnection())
}
func (s *FileBackendTestSuite) TestReadWriteFile() {
b := []byte("test")
path := "tests/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path)
read, err := s.backend.ReadFile(path)
s.Nil(err)
readString := string(read)
s.EqualValues(readString, "test")
}
func (s *FileBackendTestSuite) TestReadWriteFileImage() {
b := []byte("testimage")
path := "tests/" + model.NewId() + ".png"
written, err := s.backend.WriteFile(bytes.NewReader(b), path)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path)
read, err := s.backend.ReadFile(path)
s.Nil(err)
readString := string(read)
s.EqualValues(readString, "testimage")
}
func (s *FileBackendTestSuite) TestFileExists() {
b := []byte("testimage")
path := "tests/" + model.NewId() + ".png"
_, err := s.backend.WriteFile(bytes.NewReader(b), path)
s.Nil(err)
defer s.backend.RemoveFile(path)
res, err := s.backend.FileExists(path)
s.Nil(err)
s.True(res)
res, err = s.backend.FileExists("tests/idontexist.png")
s.Nil(err)
s.False(res)
}
func (s *FileBackendTestSuite) TestCopyFile() {
b := []byte("test")
path1 := "tests/" + model.NewId()
path2 := "tests/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path1)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path1)
err = s.backend.CopyFile(path1, path2)
s.Nil(err)
defer s.backend.RemoveFile(path2)
_, err = s.backend.ReadFile(path1)
s.Nil(err)
_, err = s.backend.ReadFile(path2)
s.Nil(err)
}
func (s *FileBackendTestSuite) TestCopyFileToDirectoryThatDoesntExist() {
b := []byte("test")
path1 := "tests/" + model.NewId()
path2 := "tests/newdirectory/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path1)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path1)
err = s.backend.CopyFile(path1, path2)
s.Nil(err)
defer s.backend.RemoveFile(path2)
_, err = s.backend.ReadFile(path1)
s.Nil(err)
_, err = s.backend.ReadFile(path2)
s.Nil(err)
}
func (s *FileBackendTestSuite) TestMoveFile() {
b := []byte("test")
path1 := "tests/" + model.NewId()
path2 := "tests/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path1)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path1)
s.Nil(s.backend.MoveFile(path1, path2))
defer s.backend.RemoveFile(path2)
_, err = s.backend.ReadFile(path1)
s.Error(err)
_, err = s.backend.ReadFile(path2)
s.Nil(err)
}
func (s *FileBackendTestSuite) TestRemoveFile() {
b := []byte("test")
path := "tests/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
s.Nil(s.backend.RemoveFile(path))
_, err = s.backend.ReadFile(path)
s.Error(err)
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/foo")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/bar")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/asdf")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
s.Nil(s.backend.RemoveDirectory("tests2"))
}
func (s *FileBackendTestSuite) TestListDirectory() {
b := []byte("test")
path1 := "19700101/" + model.NewId()
path2 := "19800101/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path1)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path1)
written, err = s.backend.WriteFile(bytes.NewReader(b), path2)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path2)
paths, err := s.backend.ListDirectory("")
s.Nil(err)
found1 := false
found2 := false
for _, path := range *paths {
if path == "19700101" {
found1 = true
} else if path == "19800101" {
found2 = true
}
}
s.True(found1)
s.True(found2)
}
func (s *FileBackendTestSuite) TestRemoveDirectory() {
b := []byte("test")
written, err := s.backend.WriteFile(bytes.NewReader(b), "tests2/foo")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/bar")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/aaa")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
s.Nil(s.backend.RemoveDirectory("tests2"))
_, err = s.backend.ReadFile("tests2/foo")
s.Error(err)
_, err = s.backend.ReadFile("tests2/bar")
s.Error(err)
_, err = s.backend.ReadFile("tests2/asdf")
s.Error(err)
}
|
[
"\"CI_HOST\"",
"\"CI_MINIO_PORT\""
] |
[] |
[
"CI_HOST",
"CI_MINIO_PORT"
] |
[]
|
["CI_HOST", "CI_MINIO_PORT"]
|
go
| 2 | 0 | |
data_steward/common.py
|
# Python imports
import os
# Project imports
from constants.bq_utils import VALIDATION_DATASET_REGEX
from constants.validation.participants.identity_match import REPORT_DIRECTORY_REGEX
import jinja2
# AOU required PII tables
PII_WILDCARD = 'pii*'
PII_NAME = 'pii_name'
PII_EMAIL = 'pii_email'
PII_PHONE_NUMBER = 'pii_phone_number'
PII_ADDRESS = 'pii_address'
PII_MRN = 'pii_mrn'
PARTICIPANT_MATCH = 'participant_match'
PII_TABLES = [
PII_NAME, PII_EMAIL, PII_PHONE_NUMBER, PII_ADDRESS, PII_MRN,
PARTICIPANT_MATCH
]
# AOU required CDM tables
CARE_SITE = 'care_site'
CONDITION_OCCURRENCE = 'condition_occurrence'
DEATH = 'death'
DEVICE_EXPOSURE = 'device_exposure'
DRUG_EXPOSURE = 'drug_exposure'
FACT_RELATIONSHIP = 'fact_relationship'
LOCATION = 'location'
MEASUREMENT = 'measurement'
NOTE = 'note'
OBSERVATION = 'observation'
PERSON = 'person'
PROCEDURE_OCCURRENCE = 'procedure_occurrence'
PROVIDER = 'provider'
SPECIMEN = 'specimen'
VISIT_OCCURRENCE = 'visit_occurrence'
AOU_REQUIRED = [
CARE_SITE, CONDITION_OCCURRENCE, DEATH, DEVICE_EXPOSURE, DRUG_EXPOSURE,
FACT_RELATIONSHIP, LOCATION, MEASUREMENT, NOTE, OBSERVATION, PERSON,
PROCEDURE_OCCURRENCE, PROVIDER, SPECIMEN, VISIT_OCCURRENCE
]
# Standardized clinical data tables in OMOP. All should contain a person_id column. See
# https://github.com/OHDSI/CommonDataModel/wiki/Standardized-Clinical-Data-Tables
# Clinical tables which do not have a corresponding mapping table.
MAPPED_CLINICAL_DATA_TABLES = [
VISIT_OCCURRENCE, CONDITION_OCCURRENCE, DRUG_EXPOSURE, MEASUREMENT,
PROCEDURE_OCCURRENCE, OBSERVATION, DEVICE_EXPOSURE, SPECIMEN
]
# Clinical tables which do not have a corresponding mapping table.
UNMAPPED_CLINICAL_DATA_TABLES = [DEATH]
# All clinical tables.
CLINICAL_DATA_TABLES = MAPPED_CLINICAL_DATA_TABLES + UNMAPPED_CLINICAL_DATA_TABLES
# other CDM tables
ATTRIBUTE_DEFINITION = 'attribute_definition'
COHORT_DEFINITION = 'cohort_definition'
CONDITION_ERA = 'condition_era'
DRUG_ERA = 'drug_era'
DOSE_ERA = 'dose_era'
DRUG_COST = 'drug_cost'
VISIT_COST = 'visit_cost'
DEVICE_COST = 'device_cost'
PROCEDURE_COST = 'procedure_cost'
OBSERVATION_PERIOD = 'observation_period'
PAYER_PLAN_PERIOD = 'payer_plan_period'
OTHER_CDM_TABLES = [
ATTRIBUTE_DEFINITION, COHORT_DEFINITION, CONDITION_ERA, DRUG_ERA, DOSE_ERA,
DRUG_COST, VISIT_COST, DEVICE_COST, PROCEDURE_COST, OBSERVATION_PERIOD,
PAYER_PLAN_PERIOD
]
CDM_TABLES = AOU_REQUIRED + OTHER_CDM_TABLES
AOU_REQUIRED_FILES = [table + '.csv' for table in AOU_REQUIRED]
PII_FILES = [table + '.csv' for table in PII_TABLES]
SUBMISSION_FILES = AOU_REQUIRED_FILES + PII_FILES
RESULTS_HTML = 'results.html'
PROCESSED_TXT = 'processed.txt'
LOG_JSON = 'log.json'
ACHILLES_HEEL_REPORT = 'achillesheel'
PERSON_REPORT = 'person'
DATA_DENSITY_REPORT = 'datadensity'
ALL_REPORTS = [ACHILLES_HEEL_REPORT, PERSON_REPORT, DATA_DENSITY_REPORT]
ALL_REPORT_FILES = [report + '.json' for report in ALL_REPORTS]
# Wearables
ACTIVITY_SUMMARY = 'activity_summary'
HEART_RATE_MINUTE_LEVEL = 'heart_rate_minute_level'
HEART_RATE_SUMMARY = 'heart_rate_summary'
STEPS_INTRADAY = 'steps_intraday'
FITBIT_TABLES = [
ACTIVITY_SUMMARY, HEART_RATE_MINUTE_LEVEL, HEART_RATE_SUMMARY,
STEPS_INTRADAY
]
# Vocabulary
CONCEPT = 'concept'
CONCEPT_ANCESTOR = 'concept_ancestor'
CONCEPT_CLASS = 'concept_class'
CONCEPT_RELATIONSHIP = 'concept_relationship'
CONCEPT_SYNONYM = 'concept_synonym'
DOMAIN = 'domain'
DRUG_STRENGTH = 'drug_strength'
RELATIONSHIP = 'relationship'
SOURCE_TO_CONCEPT_MAP = 'source_to_concept_map'
VOCABULARY = 'vocabulary'
VOCABULARY_TABLES = [
CONCEPT, CONCEPT_ANCESTOR, CONCEPT_CLASS, CONCEPT_RELATIONSHIP,
CONCEPT_SYNONYM, DOMAIN, DRUG_STRENGTH, RELATIONSHIP, VOCABULARY
]
# Achilles
ACHILLES_ANALYSIS = 'achilles_analysis'
ACHILLES_RESULTS = 'achilles_results'
ACHILLES_RESULTS_DIST = 'achilles_results_dist'
ACHILLES_TABLES = [ACHILLES_ANALYSIS, ACHILLES_RESULTS, ACHILLES_RESULTS_DIST]
ACHILLES_HEEL_RESULTS = 'achilles_heel_results'
ACHILLES_RESULTS_DERIVED = 'achilles_results_derived'
ACHILLES_HEEL_TABLES = [ACHILLES_HEEL_RESULTS, ACHILLES_RESULTS_DERIVED]
REQUIRED_TABLES = ['person']
REQUIRED_FILES = [table + '.csv' for table in REQUIRED_TABLES]
ACHILLES_EXPORT_PREFIX_STRING = "curation_report/data/"
IGNORE_STRING_LIST = [ACHILLES_EXPORT_PREFIX_STRING]
ACHILLES_EXPORT_DATASOURCES_JSON = ACHILLES_EXPORT_PREFIX_STRING + 'datasources.json'
# latest vocabulary dataset name in test and prod
VOCABULARY_DATASET = os.environ.get('VOCABULARY_DATASET')
CLINICAL = 'clinical'
ACHILLES = 'achilles'
CDM_COMPONENTS = [CLINICAL, VOCABULARY, ACHILLES]
UNKNOWN_FILE = 'Unknown file'
# fact relationship id constants
MEASUREMENT_DOMAIN_CONCEPT_ID = 21
OBSERVATION_DOMAIN_CONCEPT_ID = 27
PERSON_DOMAIN_CONCEPT_ID = 56
# ID Spaces
#
# The following constants are added to values in all ID (or "primary key") fields to prevent
# collisions during union/combine phases
# Values for ID fields for each HPO are summed with a factor of ID_CONSTANT_FACTOR
ID_CONSTANT_FACTOR = 1000000000000000
# Added to value in all ID fields in records coming from the RDR
RDR_ID_CONSTANT = ID_CONSTANT_FACTOR
PARTICIPANT_DIR = 'participant/'
IGNORE_DIRECTORIES = [
PARTICIPANT_DIR,
REPORT_DIRECTORY_REGEX,
VALIDATION_DATASET_REGEX,
]
OBSERVATION_TO_MEASUREMENT_CONCEPT_ID = 581410
MEASUREMENT_TO_OBSERVATION_CONCEPT_ID = 581411
PARENT_TO_CHILD_MEASUREMENT_CONCEPT_ID = 581436
CHILD_TO_PARENT_MEASUREMENT_CONCEPT_ID = 581437
DIASTOLIC_TO_SYSTOLIC_CONCEPT_ID = 46233682
SYSTOLIC_TO_DIASTOLIC_CONCEPT_ID = 46233683
LATEST_REPORTS_JSON = 'latest_reports.json'
LATEST_RESULTS_JSON = 'latest_results.json'
REPORT_FOR_ACHILLES = 'achilles'
REPORT_FOR_RESULTS = 'results'
LOG_YEAR = '2019'
DELIMITER = '\t'
LINE_TERMINATOR = '\n'
TRANSFORM_FILES = 'transform_files'
APPEND_VOCABULARY = 'append_vocabulary'
APPEND_CONCEPTS = 'append_concepts'
ADD_AOU_VOCABS = 'add_aou_vocabs'
ERRORS = 'errors'
AOU_GEN_ID = 'AoU_General'
AOU_GEN_NAME = 'AoU_General'
AOU_GEN_VOCABULARY_CONCEPT_ID = '2000000000'
AOU_GEN_VOCABULARY_REFERENCE = 'https://docs.google.com/document/d/10Gji9VW5-RTysM-yAbRa77rXqVfDfO2li2U4LxUQH9g'
AOU_CUSTOM_ID = 'AoU_Custom'
AOU_CUSTOM_NAME = 'AoU_Custom'
AOU_CUSTOM_VOCABULARY_CONCEPT_ID = '2100000000'
AOU_CUSTOM_VOCABULARY_REFERENCE = 'https://precisionmedicineinitiative.atlassian.net/browse/DC-618'
OMOP_VOCABULARY_CONCEPT_ID = '44819096'
ERROR_APPENDING = 'Appending to {in_path} which already contains rows for {vocab_id}'
VERSION_TEMPLATE = 'insert version info here'
VOCABULARY_UPDATES = {
AOU_GEN_ID: [
AOU_GEN_ID, AOU_GEN_NAME, AOU_GEN_VOCABULARY_REFERENCE,
VERSION_TEMPLATE, AOU_GEN_VOCABULARY_CONCEPT_ID
],
AOU_CUSTOM_ID: [
AOU_CUSTOM_ID, AOU_CUSTOM_NAME, AOU_CUSTOM_VOCABULARY_REFERENCE,
VERSION_TEMPLATE, AOU_CUSTOM_VOCABULARY_CONCEPT_ID
]
}
COMBINED = 'combined'
UNIONED_EHR = 'unioned_ehr'
DEID = 'deid'
EHR = 'ehr'
RDR = 'rdr'
RELEASE = 'release'
OTHER = 'other'
MAPPING = 'mapping'
MAPPING_PREFIX = '_mapping_'
EXT = 'ext'
EXT_SUFFIX = '_ext'
DEID_MAP = '_deid_map'
MAX_DEID_DATE_SHIFT = 364
PID_RID_MAPPING = 'pid_rid_mapping'
PRIMARY_PID_RID_MAPPING = 'primary_pid_rid_mapping'
SITE_MASKING_TABLE_ID = 'site_maskings'
PIPELINE_TABLES = 'pipeline_tables'
COPE_SURVEY_MAP = 'cope_survey_semantic_version_map'
ZIP3_LOOKUP = 'zip3_lookup'
# Participant Summary
DRC_OPS = 'drc_ops'
PS_API_VALUES = 'ps_api_values'
# JINJA
JINJA_ENV = jinja2.Environment(
# block tags on their own lines
# will not cause extra white space
trim_blocks=True,
lstrip_blocks=True,
# syntax highlighting should be better
# with these comment delimiters
comment_start_string='--',
comment_end_string=' --',
# in jinja2 autoescape is for html; jinjasql supports autoescape for sql
# TODO Look into jinjasql for sql templating
autoescape=False)
|
[] |
[] |
[
"VOCABULARY_DATASET"
] |
[]
|
["VOCABULARY_DATASET"]
|
python
| 1 | 0 | |
api/cmd/portainer/main.go
|
package main // import "github.com/portainer/portainer"
import (
"encoding/json"
"os"
"strings"
"time"
"github.com/portainer/portainer"
"github.com/portainer/portainer/bolt"
"github.com/portainer/portainer/cli"
"github.com/portainer/portainer/cron"
"github.com/portainer/portainer/crypto"
"github.com/portainer/portainer/docker"
"github.com/portainer/portainer/exec"
"github.com/portainer/portainer/filesystem"
"github.com/portainer/portainer/git"
"github.com/portainer/portainer/http"
"github.com/portainer/portainer/http/client"
"github.com/portainer/portainer/jwt"
"github.com/portainer/portainer/ldap"
"github.com/portainer/portainer/libcompose"
"log"
)
func initCLI() *portainer.CLIFlags {
var cli portainer.CLIService = &cli.Service{}
flags, err := cli.ParseFlags(portainer.APIVersion)
if err != nil {
log.Fatal(err)
}
err = cli.ValidateFlags(flags)
if err != nil {
log.Fatal(err)
}
return flags
}
func initFileService(dataStorePath string) portainer.FileService {
fileService, err := filesystem.NewService(dataStorePath, "")
if err != nil {
log.Fatal(err)
}
return fileService
}
func initStore(dataStorePath string, fileService portainer.FileService) *bolt.Store {
store, err := bolt.NewStore(dataStorePath, fileService)
if err != nil {
log.Fatal(err)
}
err = store.Open()
if err != nil {
log.Fatal(err)
}
err = store.Init()
if err != nil {
log.Fatal(err)
}
err = store.MigrateData()
if err != nil {
log.Fatal(err)
}
return store
}
func initComposeStackManager(dataStorePath string) portainer.ComposeStackManager {
return libcompose.NewComposeStackManager(dataStorePath)
}
func initSwarmStackManager(assetsPath string, dataStorePath string, signatureService portainer.DigitalSignatureService, fileService portainer.FileService) (portainer.SwarmStackManager, error) {
return exec.NewSwarmStackManager(assetsPath, dataStorePath, signatureService, fileService)
}
func initJWTService(authenticationEnabled bool) portainer.JWTService {
if authenticationEnabled {
jwtService, err := jwt.NewService()
if err != nil {
log.Fatal(err)
}
return jwtService
}
return nil
}
func initDigitalSignatureService() portainer.DigitalSignatureService {
return crypto.NewECDSAService(os.Getenv("AGENT_SECRET"))
}
func initCryptoService() portainer.CryptoService {
return &crypto.Service{}
}
func initLDAPService() portainer.LDAPService {
return &ldap.Service{}
}
func initGitService() portainer.GitService {
return &git.Service{}
}
func initClientFactory(signatureService portainer.DigitalSignatureService) *docker.ClientFactory {
return docker.NewClientFactory(signatureService)
}
func initSnapshotter(clientFactory *docker.ClientFactory) portainer.Snapshotter {
return docker.NewSnapshotter(clientFactory)
}
func initJobScheduler() portainer.JobScheduler {
return cron.NewJobScheduler()
}
func loadSnapshotSystemSchedule(jobScheduler portainer.JobScheduler, snapshotter portainer.Snapshotter, scheduleService portainer.ScheduleService, endpointService portainer.EndpointService, settingsService portainer.SettingsService) error {
settings, err := settingsService.Settings()
if err != nil {
return err
}
schedules, err := scheduleService.SchedulesByJobType(portainer.SnapshotJobType)
if err != nil {
return err
}
var snapshotSchedule *portainer.Schedule
if len(schedules) == 0 {
snapshotJob := &portainer.SnapshotJob{}
snapshotSchedule = &portainer.Schedule{
ID: portainer.ScheduleID(scheduleService.GetNextIdentifier()),
Name: "system_snapshot",
CronExpression: "@every " + settings.SnapshotInterval,
Recurring: true,
JobType: portainer.SnapshotJobType,
SnapshotJob: snapshotJob,
Created: time.Now().Unix(),
}
} else {
snapshotSchedule = &schedules[0]
}
snapshotJobContext := cron.NewSnapshotJobContext(endpointService, snapshotter)
snapshotJobRunner := cron.NewSnapshotJobRunner(snapshotSchedule, snapshotJobContext)
err = jobScheduler.ScheduleJob(snapshotJobRunner)
if err != nil {
return err
}
if len(schedules) == 0 {
return scheduleService.CreateSchedule(snapshotSchedule)
}
return nil
}
func loadEndpointSyncSystemSchedule(jobScheduler portainer.JobScheduler, scheduleService portainer.ScheduleService, endpointService portainer.EndpointService, flags *portainer.CLIFlags) error {
if *flags.ExternalEndpoints == "" {
return nil
}
log.Println("Using external endpoint definition. Endpoint management via the API will be disabled.")
schedules, err := scheduleService.SchedulesByJobType(portainer.EndpointSyncJobType)
if err != nil {
return err
}
if len(schedules) != 0 {
return nil
}
endpointSyncJob := &portainer.EndpointSyncJob{}
endpointSyncSchedule := &portainer.Schedule{
ID: portainer.ScheduleID(scheduleService.GetNextIdentifier()),
Name: "system_endpointsync",
CronExpression: "@every " + *flags.SyncInterval,
Recurring: true,
JobType: portainer.EndpointSyncJobType,
EndpointSyncJob: endpointSyncJob,
Created: time.Now().Unix(),
}
endpointSyncJobContext := cron.NewEndpointSyncJobContext(endpointService, *flags.ExternalEndpoints)
endpointSyncJobRunner := cron.NewEndpointSyncJobRunner(endpointSyncSchedule, endpointSyncJobContext)
err = jobScheduler.ScheduleJob(endpointSyncJobRunner)
if err != nil {
return err
}
return scheduleService.CreateSchedule(endpointSyncSchedule)
}
func loadSchedulesFromDatabase(jobScheduler portainer.JobScheduler, jobService portainer.JobService, scheduleService portainer.ScheduleService, endpointService portainer.EndpointService, fileService portainer.FileService) error {
schedules, err := scheduleService.Schedules()
if err != nil {
return err
}
for _, schedule := range schedules {
if schedule.JobType == portainer.ScriptExecutionJobType {
jobContext := cron.NewScriptExecutionJobContext(jobService, endpointService, fileService)
jobRunner := cron.NewScriptExecutionJobRunner(&schedule, jobContext)
err = jobScheduler.ScheduleJob(jobRunner)
if err != nil {
return err
}
}
}
return nil
}
func initStatus(endpointManagement, snapshot bool, flags *portainer.CLIFlags) *portainer.Status {
return &portainer.Status{
Analytics: !*flags.NoAnalytics,
Authentication: !*flags.NoAuth,
EndpointManagement: endpointManagement,
Snapshot: snapshot,
Version: portainer.APIVersion,
}
}
func initDockerHub(dockerHubService portainer.DockerHubService) error {
_, err := dockerHubService.DockerHub()
if err == portainer.ErrObjectNotFound {
dockerhub := &portainer.DockerHub{
Authentication: false,
Username: "",
Password: "",
}
return dockerHubService.UpdateDockerHub(dockerhub)
} else if err != nil {
return err
}
return nil
}
func initSettings(settingsService portainer.SettingsService, flags *portainer.CLIFlags) error {
_, err := settingsService.Settings()
if err == portainer.ErrObjectNotFound {
settings := &portainer.Settings{
LogoURL: *flags.Logo,
AuthenticationMethod: portainer.AuthenticationInternal,
LDAPSettings: portainer.LDAPSettings{
AutoCreateUsers: true,
TLSConfig: portainer.TLSConfiguration{},
SearchSettings: []portainer.LDAPSearchSettings{
portainer.LDAPSearchSettings{},
},
GroupSearchSettings: []portainer.LDAPGroupSearchSettings{
portainer.LDAPGroupSearchSettings{},
},
},
OAuthSettings: portainer.OAuthSettings{},
AllowBindMountsForRegularUsers: true,
AllowPrivilegedModeForRegularUsers: true,
EnableHostManagementFeatures: false,
SnapshotInterval: *flags.SnapshotInterval,
}
if *flags.Templates != "" {
settings.TemplatesURL = *flags.Templates
}
if *flags.Labels != nil {
settings.BlackListedLabels = *flags.Labels
} else {
settings.BlackListedLabels = make([]portainer.Pair, 0)
}
return settingsService.UpdateSettings(settings)
} else if err != nil {
return err
}
return nil
}
func initTemplates(templateService portainer.TemplateService, fileService portainer.FileService, templateURL, templateFile string) error {
if templateURL != "" {
log.Printf("Portainer started with the --templates flag. Using external templates, template management will be disabled.")
return nil
}
existingTemplates, err := templateService.Templates()
if err != nil {
return err
}
if len(existingTemplates) != 0 {
log.Printf("Templates already registered inside the database. Skipping template import.")
return nil
}
templatesJSON, err := fileService.GetFileContent(templateFile)
if err != nil {
log.Println("Unable to retrieve template definitions via filesystem")
return err
}
var templates []portainer.Template
err = json.Unmarshal(templatesJSON, &templates)
if err != nil {
log.Println("Unable to parse templates file. Please review your template definition file.")
return err
}
for _, template := range templates {
err := templateService.CreateTemplate(&template)
if err != nil {
return err
}
}
return nil
}
func retrieveFirstEndpointFromDatabase(endpointService portainer.EndpointService) *portainer.Endpoint {
endpoints, err := endpointService.Endpoints()
if err != nil {
log.Fatal(err)
}
return &endpoints[0]
}
func loadAndParseKeyPair(fileService portainer.FileService, signatureService portainer.DigitalSignatureService) error {
private, public, err := fileService.LoadKeyPair()
if err != nil {
return err
}
return signatureService.ParseKeyPair(private, public)
}
func generateAndStoreKeyPair(fileService portainer.FileService, signatureService portainer.DigitalSignatureService) error {
private, public, err := signatureService.GenerateKeyPair()
if err != nil {
return err
}
privateHeader, publicHeader := signatureService.PEMHeaders()
return fileService.StoreKeyPair(private, public, privateHeader, publicHeader)
}
func initKeyPair(fileService portainer.FileService, signatureService portainer.DigitalSignatureService) error {
existingKeyPair, err := fileService.KeyPairFilesExist()
if err != nil {
log.Fatal(err)
}
if existingKeyPair {
return loadAndParseKeyPair(fileService, signatureService)
}
return generateAndStoreKeyPair(fileService, signatureService)
}
func createTLSSecuredEndpoint(flags *portainer.CLIFlags, endpointService portainer.EndpointService, snapshotter portainer.Snapshotter) error {
tlsConfiguration := portainer.TLSConfiguration{
TLS: *flags.TLS,
TLSSkipVerify: *flags.TLSSkipVerify,
}
if *flags.TLS {
tlsConfiguration.TLSCACertPath = *flags.TLSCacert
tlsConfiguration.TLSCertPath = *flags.TLSCert
tlsConfiguration.TLSKeyPath = *flags.TLSKey
} else if !*flags.TLS && *flags.TLSSkipVerify {
tlsConfiguration.TLS = true
}
endpointID := endpointService.GetNextIdentifier()
endpoint := &portainer.Endpoint{
ID: portainer.EndpointID(endpointID),
Name: "primary",
URL: *flags.EndpointURL,
GroupID: portainer.EndpointGroupID(1),
Type: portainer.DockerEnvironment,
TLSConfig: tlsConfiguration,
AuthorizedUsers: []portainer.UserID{},
AuthorizedTeams: []portainer.TeamID{},
Extensions: []portainer.EndpointExtension{},
Tags: []string{},
Status: portainer.EndpointStatusUp,
Snapshots: []portainer.Snapshot{},
}
if strings.HasPrefix(endpoint.URL, "tcp://") {
tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(tlsConfiguration.TLSCACertPath, tlsConfiguration.TLSCertPath, tlsConfiguration.TLSKeyPath, tlsConfiguration.TLSSkipVerify)
if err != nil {
return err
}
agentOnDockerEnvironment, err := client.ExecutePingOperation(endpoint.URL, tlsConfig)
if err != nil {
return err
}
if agentOnDockerEnvironment {
endpoint.Type = portainer.AgentOnDockerEnvironment
}
}
return snapshotAndPersistEndpoint(endpoint, endpointService, snapshotter)
}
func createUnsecuredEndpoint(endpointURL string, endpointService portainer.EndpointService, snapshotter portainer.Snapshotter) error {
if strings.HasPrefix(endpointURL, "tcp://") {
_, err := client.ExecutePingOperation(endpointURL, nil)
if err != nil {
return err
}
}
endpointID := endpointService.GetNextIdentifier()
endpoint := &portainer.Endpoint{
ID: portainer.EndpointID(endpointID),
Name: "primary",
URL: endpointURL,
GroupID: portainer.EndpointGroupID(1),
Type: portainer.DockerEnvironment,
TLSConfig: portainer.TLSConfiguration{},
AuthorizedUsers: []portainer.UserID{},
AuthorizedTeams: []portainer.TeamID{},
Extensions: []portainer.EndpointExtension{},
Tags: []string{},
Status: portainer.EndpointStatusUp,
Snapshots: []portainer.Snapshot{},
}
return snapshotAndPersistEndpoint(endpoint, endpointService, snapshotter)
}
func snapshotAndPersistEndpoint(endpoint *portainer.Endpoint, endpointService portainer.EndpointService, snapshotter portainer.Snapshotter) error {
snapshot, err := snapshotter.CreateSnapshot(endpoint)
endpoint.Status = portainer.EndpointStatusUp
if err != nil {
log.Printf("http error: endpoint snapshot error (endpoint=%s, URL=%s) (err=%s)\n", endpoint.Name, endpoint.URL, err)
}
if snapshot != nil {
endpoint.Snapshots = []portainer.Snapshot{*snapshot}
}
return endpointService.CreateEndpoint(endpoint)
}
func initEndpoint(flags *portainer.CLIFlags, endpointService portainer.EndpointService, snapshotter portainer.Snapshotter) error {
if *flags.EndpointURL == "" {
return nil
}
endpoints, err := endpointService.Endpoints()
if err != nil {
return err
}
if len(endpoints) > 0 {
log.Println("Instance already has defined endpoints. Skipping the endpoint defined via CLI.")
return nil
}
if *flags.TLS || *flags.TLSSkipVerify {
return createTLSSecuredEndpoint(flags, endpointService, snapshotter)
}
return createUnsecuredEndpoint(*flags.EndpointURL, endpointService, snapshotter)
}
func initJobService(dockerClientFactory *docker.ClientFactory) portainer.JobService {
return docker.NewJobService(dockerClientFactory)
}
func initExtensionManager(fileService portainer.FileService, extensionService portainer.ExtensionService) (portainer.ExtensionManager, error) {
extensionManager := exec.NewExtensionManager(fileService, extensionService)
extensions, err := extensionService.Extensions()
if err != nil {
return nil, err
}
for _, extension := range extensions {
err := extensionManager.EnableExtension(&extension, extension.License.LicenseKey)
if err != nil {
log.Printf("Unable to enable extension: %s [extension: %s]", err.Error(), extension.Name)
extension.Enabled = false
extension.License.Valid = false
extensionService.Persist(&extension)
}
}
return extensionManager, nil
}
func terminateIfNoAdminCreated(userService portainer.UserService) {
timer1 := time.NewTimer(5 * time.Minute)
<-timer1.C
users, err := userService.UsersByRole(portainer.AdministratorRole)
if err != nil {
log.Fatal(err)
}
if len(users) == 0 {
log.Fatal("No administrator account was created after 5 min. Shutting down the Portainer instance for security reasons.")
return
}
}
func main() {
flags := initCLI()
fileService := initFileService(*flags.Data)
store := initStore(*flags.Data, fileService)
defer store.Close()
jwtService := initJWTService(!*flags.NoAuth)
ldapService := initLDAPService()
gitService := initGitService()
cryptoService := initCryptoService()
digitalSignatureService := initDigitalSignatureService()
err := initKeyPair(fileService, digitalSignatureService)
if err != nil {
log.Fatal(err)
}
extensionManager, err := initExtensionManager(fileService, store.ExtensionService)
if err != nil {
log.Fatal(err)
}
clientFactory := initClientFactory(digitalSignatureService)
jobService := initJobService(clientFactory)
snapshotter := initSnapshotter(clientFactory)
endpointManagement := true
if *flags.ExternalEndpoints != "" {
endpointManagement = false
}
swarmStackManager, err := initSwarmStackManager(*flags.Assets, *flags.Data, digitalSignatureService, fileService)
if err != nil {
log.Fatal(err)
}
composeStackManager := initComposeStackManager(*flags.Data)
err = initTemplates(store.TemplateService, fileService, *flags.Templates, *flags.TemplateFile)
if err != nil {
log.Fatal(err)
}
err = initSettings(store.SettingsService, flags)
if err != nil {
log.Fatal(err)
}
jobScheduler := initJobScheduler()
err = loadSchedulesFromDatabase(jobScheduler, jobService, store.ScheduleService, store.EndpointService, fileService)
if err != nil {
log.Fatal(err)
}
err = loadEndpointSyncSystemSchedule(jobScheduler, store.ScheduleService, store.EndpointService, flags)
if err != nil {
log.Fatal(err)
}
if *flags.Snapshot {
err = loadSnapshotSystemSchedule(jobScheduler, snapshotter, store.ScheduleService, store.EndpointService, store.SettingsService)
if err != nil {
log.Fatal(err)
}
}
jobScheduler.Start()
err = initDockerHub(store.DockerHubService)
if err != nil {
log.Fatal(err)
}
applicationStatus := initStatus(endpointManagement, *flags.Snapshot, flags)
err = initEndpoint(flags, store.EndpointService, snapshotter)
if err != nil {
log.Fatal(err)
}
adminPasswordHash := ""
if *flags.AdminPasswordFile != "" {
content, err := fileService.GetFileContent(*flags.AdminPasswordFile)
if err != nil {
log.Fatal(err)
}
adminPasswordHash, err = cryptoService.Hash(string(content))
if err != nil {
log.Fatal(err)
}
} else if *flags.AdminPassword != "" {
adminPasswordHash = *flags.AdminPassword
}
if adminPasswordHash != "" {
users, err := store.UserService.UsersByRole(portainer.AdministratorRole)
if err != nil {
log.Fatal(err)
}
if len(users) == 0 {
log.Printf("Creating admin user with password hash %s", adminPasswordHash)
user := &portainer.User{
Username: "admin",
Role: portainer.AdministratorRole,
Password: adminPasswordHash,
}
err := store.UserService.CreateUser(user)
if err != nil {
log.Fatal(err)
}
} else {
log.Println("Instance already has an administrator user defined. Skipping admin password related flags.")
}
}
if !*flags.NoAuth {
go terminateIfNoAdminCreated(store.UserService)
}
var server portainer.Server = &http.Server{
Status: applicationStatus,
BindAddress: *flags.Addr,
AssetsPath: *flags.Assets,
AuthDisabled: *flags.NoAuth,
EndpointManagement: endpointManagement,
UserService: store.UserService,
TeamService: store.TeamService,
TeamMembershipService: store.TeamMembershipService,
EndpointService: store.EndpointService,
EndpointGroupService: store.EndpointGroupService,
ExtensionService: store.ExtensionService,
ResourceControlService: store.ResourceControlService,
SettingsService: store.SettingsService,
RegistryService: store.RegistryService,
DockerHubService: store.DockerHubService,
StackService: store.StackService,
ScheduleService: store.ScheduleService,
TagService: store.TagService,
TemplateService: store.TemplateService,
WebhookService: store.WebhookService,
SwarmStackManager: swarmStackManager,
ComposeStackManager: composeStackManager,
ExtensionManager: extensionManager,
CryptoService: cryptoService,
JWTService: jwtService,
FileService: fileService,
LDAPService: ldapService,
GitService: gitService,
SignatureService: digitalSignatureService,
JobScheduler: jobScheduler,
Snapshotter: snapshotter,
SSL: *flags.SSL,
SSLCert: *flags.SSLCert,
SSLKey: *flags.SSLKey,
DockerClientFactory: clientFactory,
JobService: jobService,
}
log.Printf("Starting Portainer %s on %s", portainer.APIVersion, *flags.Addr)
err = server.Start()
if err != nil {
log.Fatal(err)
}
}
|
[
"\"AGENT_SECRET\""
] |
[] |
[
"AGENT_SECRET"
] |
[]
|
["AGENT_SECRET"]
|
go
| 1 | 0 | |
src/dcm/pkg/module/userpermissions_test.go
|
package module_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
dcm "github.com/open-ness/EMCO/src/dcm/pkg/module"
"github.com/open-ness/EMCO/src/orchestrator/pkg/infra/db"
orch "github.com/open-ness/EMCO/src/orchestrator/pkg/module"
)
var _ = Describe("Userpermissions", func() {
var (
mdb *db.MockDB
client *dcm.UserPermissionClient
)
BeforeEach(func() {
client = dcm.NewUserPermissionClient()
mdb = new(db.MockDB)
mdb.Err = nil
mdb.Items = []map[string]map[string][]byte{}
db.DBconn = mdb
})
Describe("User permission operations", func() {
Context("from an empty database", func() {
BeforeEach(func() {
// create project in mocked db
okey := orch.ProjectKey{
ProjectName: "project",
}
p := orch.Project{}
p.MetaData = orch.ProjectMetaData{
Name: "project",
Description: "",
UserData1: "",
UserData2: "",
}
mdb.Insert("orchestrator", okey, nil, "projectmetadata", p)
// create logical cloud in mocked db
lkey := dcm.LogicalCloudKey{
Project: "project",
LogicalCloudName: "logicalcloud",
}
lc := dcm.LogicalCloud{}
lc.MetaData = dcm.MetaDataList{
LogicalCloudName: "logicalcloud",
Description: "",
UserData1: "",
UserData2: "",
}
lc.Specification = dcm.Spec{
NameSpace: "testns",
Level: "1",
}
mdb.Insert("orchestrator", lkey, nil, "logicalcloud", lc)
})
It("creation should succeed and return the resource created", func() {
up := _createTestUserPermission("testup", "testns")
userPermission, err := client.CreateUserPerm("project", "logicalcloud", up)
Expect(err).ShouldNot(HaveOccurred())
Expect(userPermission.MetaData.UserPermissionName).To(Equal("testup"))
Expect(userPermission.Specification.Namespace).To(Equal("testns"))
Expect(userPermission.Specification.APIGroups).To(Equal([]string{"", "apps"}))
Expect(userPermission.Specification.Resources).To(Equal([]string{"deployments", "pods"}))
Expect(userPermission.Specification.Verbs).To(Equal([]string{"get", "list"}))
})
It("creation should succeed and return the resource created (cluster-wide)", func() {
up := _createTestUserPermission("testup", "")
userPermission, err := client.CreateUserPerm("project", "logicalcloud", up)
Expect(err).ShouldNot(HaveOccurred())
Expect(userPermission.MetaData.UserPermissionName).To(Equal("testup"))
Expect(userPermission.Specification.Namespace).To(Equal(""))
Expect(userPermission.Specification.APIGroups).To(Equal([]string{"", "apps"}))
Expect(userPermission.Specification.Resources).To(Equal([]string{"deployments", "pods"}))
Expect(userPermission.Specification.Verbs).To(Equal([]string{"get", "list"}))
})
It("get should fail and not return anything", func() {
userPermission, err := client.GetUserPerm("project", "logicalcloud", "testup")
Expect(err).Should(HaveOccurred())
Expect(userPermission).To(Equal(dcm.UserPermission{}))
})
It("create followed by get should return what was created", func() {
up := _createTestUserPermission("testup", "testns")
_, _ = client.CreateUserPerm("project", "logicalcloud", up)
userPermission, err := client.GetUserPerm("project", "logicalcloud", "testup")
Expect(err).ShouldNot(HaveOccurred())
Expect(userPermission).To(Equal(up))
})
It("create followed by get-all should return only what was created", func() {
up := _createTestUserPermission("testup", "testns")
_, _ = client.CreateUserPerm("project", "logicalcloud", up)
userPermissions, err := client.GetAllUserPerms("project", "logicalcloud")
Expect(err).ShouldNot(HaveOccurred())
Expect(len(userPermissions)).To(Equal(1))
Expect(userPermissions[0]).To(Equal(up))
})
It("three creates followed by get-all should return all that was created", func() {
up1 := _createTestUserPermission("testup1", "testns")
up2 := _createTestUserPermission("testup2", "testns")
up3 := _createTestUserPermission("testup3", "testns")
_, _ = client.CreateUserPerm("project", "logicalcloud", up1)
_, _ = client.CreateUserPerm("project", "logicalcloud", up2)
_, _ = client.CreateUserPerm("project", "logicalcloud", up3)
userPermissions, err := client.GetAllUserPerms("project", "logicalcloud")
Expect(err).ShouldNot(HaveOccurred())
Expect(len(userPermissions)).To(Equal(3))
Expect(userPermissions[0]).To(Equal(up1))
Expect(userPermissions[1]).To(Equal(up2))
Expect(userPermissions[2]).To(Equal(up3))
})
It("delete after creation should succeed and database remain empty", func() {
up := _createTestUserPermission("testup", "testns")
_, _ = client.CreateUserPerm("project", "logicalcloud", up)
err := client.DeleteUserPerm("project", "logicalcloud", "testup")
Expect(err).ShouldNot(HaveOccurred())
userPermissions, err := client.GetAllUserPerms("project", "logicalcloud")
Expect(len(userPermissions)).To(Equal(0))
})
// will uncomment after general mockdb issues resolved
// It("delete when nothing exists should fail", func() {
// err := client.DeleteUserPerm("project", "logicalcloud", "testup")
// Expect(err).Should(HaveOccurred())
// })
It("update after creation should succeed and return updated resource", func() {
up := _createTestUserPermission("testup", "testns")
_, _ = client.CreateUserPerm("project", "logicalcloud", up)
up.Specification.APIGroups = []string{"", "apps", "k8splugin.io"}
userPermission, err := client.UpdateUserPerm("project", "logicalcloud", "testup", up)
Expect(err).ShouldNot(HaveOccurred())
Expect(userPermission.MetaData.UserPermissionName).To(Equal("testup"))
Expect(userPermission.Specification.APIGroups).To(Equal([]string{"", "apps", "k8splugin.io"}))
Expect(userPermission.Specification.Resources).To(Equal([]string{"deployments", "pods"}))
Expect(userPermission.Specification.Verbs).To(Equal([]string{"get", "list"}))
})
It("create followed by updating the name is disallowed and should fail", func() {
up := _createTestUserPermission("testup", "testns")
_, _ = client.CreateUserPerm("project", "logicalcloud", up)
up.MetaData.UserPermissionName = "updated"
userPermission, err := client.UpdateUserPerm("project", "logicalcloud", "testup", up)
Expect(err).Should(HaveOccurred())
Expect(userPermission).To(Equal(dcm.UserPermission{}))
})
})
})
})
// _createTestUserPermission is an helper function to reduce code duplication
func _createTestUserPermission(name string, namespace string) dcm.UserPermission {
up := dcm.UserPermission{}
up.MetaData = dcm.UPMetaDataList{
UserPermissionName: name,
Description: "",
UserData1: "",
UserData2: "",
}
up.Specification = dcm.UPSpec{
Namespace: namespace,
APIGroups: []string{"", "apps"},
Resources: []string{"deployments", "pods"},
Verbs: []string{"get", "list"},
}
return up
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
lantz/ino/common.py
|
# -*- coding: utf-8 -*-
"""
lantz.ino.common
~~~~~~~~~~~~~~~~
Common functions and definitions.
The lantz.ino package provides helper classes and methods to work with Arduino.
:copyright: 2018 by The Lantz Authors
:license: BSD, see LICENSE for more details.
"""
from collections import namedtuple
import os
import pickle
import yaml
def write_user_timestamp(folder):
hfile = os.path.join(folder, 'inodriver_user.h')
cppfile = os.path.join(folder, 'inodriver_user.cpp')
hfile_ts = os.path.getmtime(hfile)
cppfile_ts = os.path.getmtime(cppfile)
with open(os.path.join('.ts.pickle'), 'wb') as fo:
pickle.dump((hfile_ts, cppfile_ts), fo)
def read_user_timestamp(folder):
hfile = os.path.join(folder, 'inodriver_user.h')
cppfile = os.path.join(folder, 'inodriver_user.cpp')
hfile_ts = os.path.getmtime(hfile)
cppfile_ts = os.path.getmtime(cppfile)
try:
with open(os.path.join('.ts.pickle'), 'rb') as fi:
return pickle.load(fi), (hfile_ts, cppfile_ts)
except FileNotFoundError:
return (None, None), (hfile_ts, cppfile_ts)
def user_local_matches_remote(sketch_folder):
last, current = read_user_timestamp(sketch_folder)
return last == current
class Packfile(namedtuple('Packfile', 'sketch_folder class_spec fqbn port usbID')):
@classmethod
def from_defaults(cls, sketch_folder, class_spec):
return cls(sketch_folder, class_spec, '', '', '')
@classmethod
def from_file(cls, filename):
with open(filename, 'r', encoding='utf-8') as fi:
data = yaml.load(fi)
return cls(*map(data.get, cls._fields))
def to_file(self, filename):
with open(filename, mode='w', encoding='utf-8') as fo:
yaml.dump(dict(self._asdict()), fo, default_flow_style=False)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
bootstrap.py
|
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = """\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
"""
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option(
"-t",
"--accept-buildout-test-releases",
dest="accept_buildout_test_releases",
action="store_true",
default=False,
help=(
"Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."
),
)
parser.add_option(
"-c",
"--config-file",
help=("Specify the path to the buildout configuration " "file to be used."),
)
parser.add_option(
"-f", "--find-links", help=("Specify a URL to search for buildout releases")
)
options, args = parser.parse_args()
######################################################################
# load/install setuptools
to_reload = False
try:
import pkg_resources
import setuptools
except ImportError:
ez = {}
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
# XXX use a more permanent ez_setup.py URL when available.
exec(
urlopen("https://bitbucket.org/pypa/setuptools/raw/0.7.2/ez_setup.py").read(),
ez,
)
setup_args = dict(to_dir=tmpeggs, download_delay=0)
ez["use_setuptools"](**setup_args)
if to_reload:
reload(pkg_resources)
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [
sys.executable,
"-c",
"from setuptools.command.easy_install import main; main()",
"-mZqNxd",
tmpeggs,
]
find_links = os.environ.get(
"bootstrap-testing-find-links",
options.find_links
or (
"http://downloads.buildout.org/"
if options.accept_buildout_test_releases
else None
),
)
if find_links:
cmd.extend(["-f", find_links])
setuptools_path = ws.find(pkg_resources.Requirement.parse("setuptools")).location
requirement = "zc.buildout"
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = "*final-", "*final"
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == "*") and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = "==".join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception("Failed to execute command:\n%s", repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if "=" not in a]:
args.append("bootstrap")
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ["-c", options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
|
[] |
[] |
[
"bootstrap-testing-find-links"
] |
[]
|
["bootstrap-testing-find-links"]
|
python
| 1 | 0 | |
salt/grains/core.py
|
# -*- coding: utf-8 -*-
"""
The static grains, these are the core, or built in grains.
When grains are loaded they are not loaded in the same way that modules are
loaded, grain functions are detected and executed, the functions MUST
return a dict which will be applied to the main grains dict. This module
will always be executed first, so that any grains loaded here in the core
module can be overwritten just by returning dict keys with the same value
as those returned here
"""
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import locale
import logging
import os
import platform
import re
import socket
import sys
import time
import uuid
from errno import EACCES, EPERM
import salt.exceptions
import salt.log
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
import salt.modules.smbios
import salt.utils.args
import salt.utils.dns
import salt.utils.files
import salt.utils.network
import salt.utils.path
import salt.utils.pkg.rpm
import salt.utils.platform
import salt.utils.stringutils
from distro import linux_distribution
from salt.ext import six
from salt.ext.six.moves import range
try:
import dateutil.tz # pylint: disable=import-error
_DATEUTIL_TZ = True
except ImportError:
_DATEUTIL_TZ = False
log = logging.getLogger(__name__)
HAS_WMI = False
if salt.utils.platform.is_windows():
import salt.utils.win_osinfo
# attempt to import the python wmi module
# the Windows minion uses WMI for some of its grains
try:
import wmi # pylint: disable=import-error
import salt.utils.winapi
import win32api
import salt.utils.win_reg
HAS_WMI = True
except ImportError:
log.exception(
"Unable to import Python wmi module, some core grains " "will be missing"
)
__proxyenabled__ = ["*"]
__FQDN__ = None
__salt__ = {
"cmd.run": salt.modules.cmdmod._run_quiet,
"cmd.retcode": salt.modules.cmdmod._retcode_quiet,
"cmd.run_all": salt.modules.cmdmod._run_all_quiet,
"smbios.records": salt.modules.smbios.records,
"smbios.get": salt.modules.smbios.get,
}
HAS_UNAME = hasattr(os, "uname")
_INTERFACES = {}
# Possible value for h_errno defined in netdb.h
HOST_NOT_FOUND = 1
NO_DATA = 4
def _windows_cpudata():
"""
Return some CPU information on Windows minions
"""
# Provides:
# num_cpus
# cpu_model
grains = {}
if "NUMBER_OF_PROCESSORS" in os.environ:
# Cast to int so that the logic isn't broken when used as a
# conditional in templating. Also follows _linux_cpudata()
try:
grains["num_cpus"] = int(os.environ["NUMBER_OF_PROCESSORS"])
except ValueError:
grains["num_cpus"] = 1
grains["cpu_model"] = salt.utils.win_reg.read_value(
hive="HKEY_LOCAL_MACHINE",
key="HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
vname="ProcessorNameString",
).get("vdata")
return grains
def _linux_cpudata():
"""
Return some CPU information for Linux minions
"""
# Provides:
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cpuinfo = "/proc/cpuinfo"
# Parse over the cpuinfo file
if os.path.isfile(cpuinfo):
with salt.utils.files.fopen(cpuinfo, "r") as _fp:
for line in _fp:
comps = line.split(":")
if not len(comps) > 1:
continue
key = comps[0].strip()
val = comps[1].strip()
if key == "processor":
grains["num_cpus"] = int(val) + 1
# head -2 /proc/cpuinfo
# vendor_id : IBM/S390
# # processors : 2
elif key == "# processors":
grains["num_cpus"] = int(val)
elif key == "vendor_id":
grains["cpu_model"] = val
elif key == "model name":
grains["cpu_model"] = val
elif key == "flags":
grains["cpu_flags"] = val.split()
elif key == "Features":
grains["cpu_flags"] = val.split()
# ARM support - /proc/cpuinfo
#
# Processor : ARMv6-compatible processor rev 7 (v6l)
# BogoMIPS : 697.95
# Features : swp half thumb fastmult vfp edsp java tls
# CPU implementer : 0x41
# CPU architecture: 7
# CPU variant : 0x0
# CPU part : 0xb76
# CPU revision : 7
#
# Hardware : BCM2708
# Revision : 0002
# Serial : 00000000
elif key == "Processor":
grains["cpu_model"] = val.split("-")[0]
grains["num_cpus"] = 1
if "num_cpus" not in grains:
grains["num_cpus"] = 0
if "cpu_model" not in grains:
grains["cpu_model"] = "Unknown"
if "cpu_flags" not in grains:
grains["cpu_flags"] = []
return grains
def _linux_gpu_data():
"""
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
"""
if __opts__.get("enable_lspci", True) is False:
return {}
if __opts__.get("enable_gpu_grains", True) is False:
return {}
lspci = salt.utils.path.which("lspci")
if not lspci:
log.debug(
"The `lspci` binary is not available on the system. GPU grains "
"will not be available."
)
return {}
# dominant gpu vendors to search for (MUST be lowercase for matching below)
known_vendors = [
"nvidia",
"amd",
"ati",
"intel",
"cirrus logic",
"vmware",
"matrox",
"aspeed",
]
gpu_classes = ("vga compatible controller", "3d controller")
devs = []
try:
lspci_out = __salt__["cmd.run"]("{0} -vmm".format(lspci))
cur_dev = {}
error = False
# Add a blank element to the lspci_out.splitlines() list,
# otherwise the last device is not evaluated as a cur_dev and ignored.
lspci_list = lspci_out.splitlines()
lspci_list.append("")
for line in lspci_list:
# check for record-separating empty lines
if line == "":
if cur_dev.get("Class", "").lower() in gpu_classes:
devs.append(cur_dev)
cur_dev = {}
continue
if re.match(r"^\w+:\s+.*", line):
key, val = line.split(":", 1)
cur_dev[key.strip()] = val.strip()
else:
error = True
log.debug("Unexpected lspci output: '%s'", line)
if error:
log.warning(
"Error loading grains, unexpected linux_gpu_data output, "
"check that you have a valid shell configured and "
"permissions to run lspci command"
)
except OSError:
pass
gpus = []
for gpu in devs:
vendor_strings = re.split("[^A-Za-z0-9]", gpu["Vendor"].lower())
# default vendor to 'unknown', overwrite if we match a known one
vendor = "unknown"
for name in known_vendors:
# search for an 'expected' vendor name in the list of strings
if name in vendor_strings:
vendor = name
break
gpus.append({"vendor": vendor, "model": gpu["Device"]})
grains = {}
grains["num_gpus"] = len(gpus)
grains["gpus"] = gpus
return grains
def _netbsd_gpu_data():
"""
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
"""
known_vendors = [
"nvidia",
"amd",
"ati",
"intel",
"cirrus logic",
"vmware",
"matrox",
"aspeed",
]
gpus = []
try:
pcictl_out = __salt__["cmd.run"]("pcictl pci0 list")
for line in pcictl_out.splitlines():
for vendor in known_vendors:
vendor_match = re.match(
r"[0-9:]+ ({0}) (.+) \(VGA .+\)".format(vendor), line, re.IGNORECASE
)
if vendor_match:
gpus.append(
{
"vendor": vendor_match.group(1),
"model": vendor_match.group(2),
}
)
except OSError:
pass
grains = {}
grains["num_gpus"] = len(gpus)
grains["gpus"] = gpus
return grains
def _osx_gpudata():
"""
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
"""
gpus = []
try:
pcictl_out = __salt__["cmd.run"]("system_profiler SPDisplaysDataType")
for line in pcictl_out.splitlines():
fieldname, _, fieldval = line.partition(": ")
if fieldname.strip() == "Chipset Model":
vendor, _, model = fieldval.partition(" ")
vendor = vendor.lower()
gpus.append({"vendor": vendor, "model": model})
except OSError:
pass
grains = {}
grains["num_gpus"] = len(gpus)
grains["gpus"] = gpus
return grains
def _bsd_cpudata(osdata):
"""
Return CPU information for BSD-like systems
"""
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
sysctl = salt.utils.path.which("sysctl")
arch = salt.utils.path.which("arch")
cmds = {}
if sysctl:
cmds.update(
{
"num_cpus": "{0} -n hw.ncpu".format(sysctl),
"cpuarch": "{0} -n hw.machine".format(sysctl),
"cpu_model": "{0} -n hw.model".format(sysctl),
}
)
if arch and osdata["kernel"] == "OpenBSD":
cmds["cpuarch"] = "{0} -s".format(arch)
if osdata["kernel"] == "Darwin":
cmds["cpu_model"] = "{0} -n machdep.cpu.brand_string".format(sysctl)
cmds["cpu_flags"] = "{0} -n machdep.cpu.features".format(sysctl)
grains = dict([(k, __salt__["cmd.run"](v)) for k, v in six.iteritems(cmds)])
if "cpu_flags" in grains and isinstance(grains["cpu_flags"], six.string_types):
grains["cpu_flags"] = grains["cpu_flags"].split(" ")
if osdata["kernel"] == "NetBSD":
grains["cpu_flags"] = []
for line in __salt__["cmd.run"]("cpuctl identify 0").splitlines():
cpu_match = re.match(r"cpu[0-9]:\ features[0-9]?\ .+<(.+)>", line)
if cpu_match:
flag = cpu_match.group(1).split(",")
grains["cpu_flags"].extend(flag)
if osdata["kernel"] == "FreeBSD" and os.path.isfile("/var/run/dmesg.boot"):
grains["cpu_flags"] = []
# TODO: at least it needs to be tested for BSD other then FreeBSD
with salt.utils.files.fopen("/var/run/dmesg.boot", "r") as _fp:
cpu_here = False
for line in _fp:
if line.startswith("CPU: "):
cpu_here = True # starts CPU descr
continue
if cpu_here:
if not line.startswith(" "):
break # game over
if "Features" in line:
start = line.find("<")
end = line.find(">")
if start > 0 and end > 0:
flag = line[start + 1 : end].split(",")
grains["cpu_flags"].extend(flag)
try:
grains["num_cpus"] = int(grains["num_cpus"])
except ValueError:
grains["num_cpus"] = 1
return grains
def _sunos_cpudata():
"""
Return the CPU information for Solaris-like systems
"""
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
grains["cpu_flags"] = []
grains["cpuarch"] = __salt__["cmd.run"]("isainfo -k")
psrinfo = "/usr/sbin/psrinfo 2>/dev/null"
grains["num_cpus"] = len(
__salt__["cmd.run"](psrinfo, python_shell=True).splitlines()
)
kstat_info = "kstat -p cpu_info:*:*:brand"
for line in __salt__["cmd.run"](kstat_info).splitlines():
match = re.match(r"(\w+:\d+:\w+\d+:\w+)\s+(.+)", line)
if match:
grains["cpu_model"] = match.group(2)
isainfo = "isainfo -n -v"
for line in __salt__["cmd.run"](isainfo).splitlines():
match = re.match(r"^\s+(.+)", line)
if match:
cpu_flags = match.group(1).split()
grains["cpu_flags"].extend(cpu_flags)
return grains
def _aix_cpudata():
"""
Return CPU information for AIX systems
"""
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cmd = salt.utils.path.which("prtconf")
if cmd:
data = __salt__["cmd.run"]("{0}".format(cmd)) + os.linesep
for dest, regstring in (
("cpuarch", r"(?im)^\s*Processor\s+Type:\s+(\S+)"),
("cpu_flags", r"(?im)^\s*Processor\s+Version:\s+(\S+)"),
("cpu_model", r"(?im)^\s*Processor\s+Implementation\s+Mode:\s+(.*)"),
("num_cpus", r"(?im)^\s*Number\s+Of\s+Processors:\s+(\S+)"),
):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", "")
else:
log.error("The 'prtconf' binary was not found in $PATH.")
return grains
def _linux_memdata():
"""
Return the memory information for Linux-like systems
"""
grains = {"mem_total": 0, "swap_total": 0}
meminfo = "/proc/meminfo"
if os.path.isfile(meminfo):
with salt.utils.files.fopen(meminfo, "r") as ifile:
for line in ifile:
comps = line.rstrip("\n").split(":")
if not len(comps) > 1:
continue
if comps[0].strip() == "MemTotal":
# Use floor division to force output to be an integer
grains["mem_total"] = int(comps[1].split()[0]) // 1024
if comps[0].strip() == "SwapTotal":
# Use floor division to force output to be an integer
grains["swap_total"] = int(comps[1].split()[0]) // 1024
return grains
def _osx_memdata():
"""
Return the memory information for BSD-like systems
"""
grains = {"mem_total": 0, "swap_total": 0}
sysctl = salt.utils.path.which("sysctl")
if sysctl:
mem = __salt__["cmd.run"]("{0} -n hw.memsize".format(sysctl))
swap_total = (
__salt__["cmd.run"]("{0} -n vm.swapusage".format(sysctl))
.split()[2]
.replace(",", ".")
)
if swap_total.endswith("K"):
_power = 2 ** 10
elif swap_total.endswith("M"):
_power = 2 ** 20
elif swap_total.endswith("G"):
_power = 2 ** 30
swap_total = float(swap_total[:-1]) * _power
grains["mem_total"] = int(mem) // 1024 // 1024
grains["swap_total"] = int(swap_total) // 1024 // 1024
return grains
def _bsd_memdata(osdata):
"""
Return the memory information for BSD-like systems
"""
grains = {"mem_total": 0, "swap_total": 0}
sysctl = salt.utils.path.which("sysctl")
if sysctl:
mem = __salt__["cmd.run"]("{0} -n hw.physmem".format(sysctl))
if osdata["kernel"] == "NetBSD" and mem.startswith("-"):
mem = __salt__["cmd.run"]("{0} -n hw.physmem64".format(sysctl))
grains["mem_total"] = int(mem) // 1024 // 1024
if osdata["kernel"] in ["OpenBSD", "NetBSD"]:
swapctl = salt.utils.path.which("swapctl")
swap_data = __salt__["cmd.run"]("{0} -sk".format(swapctl))
if swap_data == "no swap devices configured":
swap_total = 0
else:
swap_total = swap_data.split(" ")[1]
else:
swap_total = __salt__["cmd.run"]("{0} -n vm.swap_total".format(sysctl))
grains["swap_total"] = int(swap_total) // 1024 // 1024
return grains
def _sunos_memdata():
"""
Return the memory information for SunOS-like systems
"""
grains = {"mem_total": 0, "swap_total": 0}
prtconf = "/usr/sbin/prtconf 2>/dev/null"
for line in __salt__["cmd.run"](prtconf, python_shell=True).splitlines():
comps = line.split(" ")
if comps[0].strip() == "Memory" and comps[1].strip() == "size:":
grains["mem_total"] = int(comps[2].strip())
swap_cmd = salt.utils.path.which("swap")
swap_data = __salt__["cmd.run"]("{0} -s".format(swap_cmd)).split()
try:
swap_avail = int(swap_data[-2][:-1])
swap_used = int(swap_data[-4][:-1])
swap_total = (swap_avail + swap_used) // 1024
except ValueError:
swap_total = None
grains["swap_total"] = swap_total
return grains
def _aix_memdata():
"""
Return the memory information for AIX systems
"""
grains = {"mem_total": 0, "swap_total": 0}
prtconf = salt.utils.path.which("prtconf")
if prtconf:
for line in __salt__["cmd.run"](prtconf, python_shell=True).splitlines():
comps = [x for x in line.strip().split(" ") if x]
if len(comps) > 2 and "Memory" in comps[0] and "Size" in comps[1]:
grains["mem_total"] = int(comps[2])
break
else:
log.error("The 'prtconf' binary was not found in $PATH.")
swap_cmd = salt.utils.path.which("swap")
if swap_cmd:
swap_data = __salt__["cmd.run"]("{0} -s".format(swap_cmd)).split()
try:
swap_total = (int(swap_data[-2]) + int(swap_data[-6])) * 4
except ValueError:
swap_total = None
grains["swap_total"] = swap_total
else:
log.error("The 'swap' binary was not found in $PATH.")
return grains
def _windows_memdata():
"""
Return the memory information for Windows systems
"""
grains = {"mem_total": 0}
# get the Total Physical memory as reported by msinfo32
tot_bytes = win32api.GlobalMemoryStatusEx()["TotalPhys"]
# return memory info in gigabytes
grains["mem_total"] = int(tot_bytes / (1024 ** 2))
return grains
def _memdata(osdata):
"""
Gather information about the system memory
"""
# Provides:
# mem_total
# swap_total, for supported systems.
grains = {"mem_total": 0}
if osdata["kernel"] == "Linux":
grains.update(_linux_memdata())
elif osdata["kernel"] in ("FreeBSD", "OpenBSD", "NetBSD"):
grains.update(_bsd_memdata(osdata))
elif osdata["kernel"] == "Darwin":
grains.update(_osx_memdata())
elif osdata["kernel"] == "SunOS":
grains.update(_sunos_memdata())
elif osdata["kernel"] == "AIX":
grains.update(_aix_memdata())
elif osdata["kernel"] == "Windows" and HAS_WMI:
grains.update(_windows_memdata())
return grains
def _aix_get_machine_id():
"""
Parse the output of lsattr -El sys0 for os_uuid
"""
grains = {}
cmd = salt.utils.path.which("lsattr")
if cmd:
data = __salt__["cmd.run"]("{0} -El sys0".format(cmd)) + os.linesep
uuid_regexes = [re.compile(r"(?im)^\s*os_uuid\s+(\S+)\s+(.*)")]
for regex in uuid_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains["machine_id"] = res.group(1).strip()
break
else:
log.error("The 'lsattr' binary was not found in $PATH.")
return grains
def _windows_virtual(osdata):
"""
Returns what type of virtual hardware is under the hood, kvm or physical
"""
# Provides:
# virtual
# virtual_subtype
grains = dict()
if osdata["kernel"] != "Windows":
return grains
grains["virtual"] = osdata.get("virtual", "physical")
# It is possible that the 'manufacturer' and/or 'productname' grains
# exist but have a value of None.
manufacturer = osdata.get("manufacturer", "")
if manufacturer is None:
manufacturer = ""
productname = osdata.get("productname", "")
if productname is None:
productname = ""
if "QEMU" in manufacturer:
# FIXME: Make this detect between kvm or qemu
grains["virtual"] = "kvm"
if "Bochs" in manufacturer:
grains["virtual"] = "kvm"
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
elif "oVirt" in productname:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "oVirt"
# Red Hat Enterprise Virtualization
elif "RHEV Hypervisor" in productname:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "rhev"
# Product Name: VirtualBox
elif "VirtualBox" in productname:
grains["virtual"] = "VirtualBox"
# Product Name: VMware Virtual Platform
elif "VMware Virtual Platform" in productname:
grains["virtual"] = "VMware"
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif "Microsoft" in manufacturer and "Virtual Machine" in productname:
grains["virtual"] = "VirtualPC"
# Manufacturer: Parallels Software International Inc.
elif "Parallels Software" in manufacturer:
grains["virtual"] = "Parallels"
# Apache CloudStack
elif "CloudStack KVM Hypervisor" in productname:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "cloudstack"
return grains
def _virtual(osdata):
"""
Returns what type of virtual hardware is under the hood, kvm or physical
"""
# This is going to be a monster, if you are running a vm you can test this
# grain with please submit patches!
# Provides:
# virtual
# virtual_subtype
grains = {"virtual": osdata.get("virtual", "physical")}
# Skip the below loop on platforms which have none of the desired cmds
# This is a temporary measure until we can write proper virtual hardware
# detection.
skip_cmds = ("AIX",)
# list of commands to be executed to determine the 'virtual' grain
_cmds = ["systemd-detect-virt", "virt-what", "dmidecode"]
# test first for virt-what, which covers most of the desired functionality
# on most platforms
if not salt.utils.platform.is_windows() and osdata["kernel"] not in skip_cmds:
if salt.utils.path.which("virt-what"):
_cmds = ["virt-what"]
# Check if enable_lspci is True or False
if __opts__.get("enable_lspci", True) is True:
# /proc/bus/pci does not exists, lspci will fail
if os.path.exists("/proc/bus/pci"):
_cmds += ["lspci"]
# Add additional last resort commands
if osdata["kernel"] in skip_cmds:
_cmds = ()
# Quick backout for BrandZ (Solaris LX Branded zones)
# Don't waste time trying other commands to detect the virtual grain
if (
HAS_UNAME
and osdata["kernel"] == "Linux"
and "BrandZ virtual linux" in os.uname()
):
grains["virtual"] = "zone"
return grains
failed_commands = set()
for command in _cmds:
args = []
if osdata["kernel"] == "Darwin":
command = "system_profiler"
args = ["SPDisplaysDataType"]
elif osdata["kernel"] == "SunOS":
virtinfo = salt.utils.path.which("virtinfo")
if virtinfo:
try:
ret = __salt__["cmd.run_all"]("{0} -a".format(virtinfo))
except salt.exceptions.CommandExecutionError:
if salt.log.is_logging_configured():
failed_commands.add(virtinfo)
else:
if ret["stdout"].endswith("not supported"):
command = "prtdiag"
else:
command = "virtinfo"
else:
command = "prtdiag"
cmd = salt.utils.path.which(command)
if not cmd:
continue
cmd = "{0} {1}".format(cmd, " ".join(args))
try:
ret = __salt__["cmd.run_all"](cmd)
if ret["retcode"] > 0:
if salt.log.is_logging_configured():
# systemd-detect-virt always returns > 0 on non-virtualized
# systems
# prtdiag only works in the global zone, skip if it fails
if (
salt.utils.platform.is_windows()
or "systemd-detect-virt" in cmd
or "prtdiag" in cmd
):
continue
failed_commands.add(command)
continue
except salt.exceptions.CommandExecutionError:
if salt.log.is_logging_configured():
if salt.utils.platform.is_windows():
continue
failed_commands.add(command)
continue
output = ret["stdout"]
if command == "system_profiler":
macoutput = output.lower()
if "0x1ab8" in macoutput:
grains["virtual"] = "Parallels"
if "parallels" in macoutput:
grains["virtual"] = "Parallels"
if "vmware" in macoutput:
grains["virtual"] = "VMware"
if "0x15ad" in macoutput:
grains["virtual"] = "VMware"
if "virtualbox" in macoutput:
grains["virtual"] = "VirtualBox"
# Break out of the loop so the next log message is not issued
break
elif command == "systemd-detect-virt":
if output in (
"qemu",
"kvm",
"oracle",
"xen",
"bochs",
"chroot",
"uml",
"systemd-nspawn",
):
grains["virtual"] = output
break
elif "vmware" in output:
grains["virtual"] = "VMware"
break
elif "microsoft" in output:
grains["virtual"] = "VirtualPC"
break
elif "lxc" in output:
grains["virtual"] = "LXC"
break
elif "systemd-nspawn" in output:
grains["virtual"] = "LXC"
break
elif command == "virt-what":
try:
output = output.splitlines()[-1]
except IndexError:
pass
if output in ("kvm", "qemu", "uml", "xen", "lxc"):
grains["virtual"] = output
break
elif "vmware" in output:
grains["virtual"] = "VMware"
break
elif "parallels" in output:
grains["virtual"] = "Parallels"
break
elif "hyperv" in output:
grains["virtual"] = "HyperV"
break
elif command == "dmidecode":
# Product Name: VirtualBox
if "Vendor: QEMU" in output:
# FIXME: Make this detect between kvm or qemu
grains["virtual"] = "kvm"
if "Manufacturer: QEMU" in output:
grains["virtual"] = "kvm"
if "Vendor: Bochs" in output:
grains["virtual"] = "kvm"
if "Manufacturer: Bochs" in output:
grains["virtual"] = "kvm"
if "BHYVE" in output:
grains["virtual"] = "bhyve"
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
elif "Manufacturer: oVirt" in output:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "ovirt"
# Red Hat Enterprise Virtualization
elif "Product Name: RHEV Hypervisor" in output:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "rhev"
elif "VirtualBox" in output:
grains["virtual"] = "VirtualBox"
# Product Name: VMware Virtual Platform
elif "VMware" in output:
grains["virtual"] = "VMware"
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif ": Microsoft" in output and "Virtual Machine" in output:
grains["virtual"] = "VirtualPC"
# Manufacturer: Parallels Software International Inc.
elif "Parallels Software" in output:
grains["virtual"] = "Parallels"
elif "Manufacturer: Google" in output:
grains["virtual"] = "kvm"
# Proxmox KVM
elif "Vendor: SeaBIOS" in output:
grains["virtual"] = "kvm"
# Break out of the loop, lspci parsing is not necessary
break
elif command == "lspci":
# dmidecode not available or the user does not have the necessary
# permissions
model = output.lower()
if "vmware" in model:
grains["virtual"] = "VMware"
# 00:04.0 System peripheral: InnoTek Systemberatung GmbH
# VirtualBox Guest Service
elif "virtualbox" in model:
grains["virtual"] = "VirtualBox"
elif "qemu" in model:
grains["virtual"] = "kvm"
elif "virtio" in model:
grains["virtual"] = "kvm"
# Break out of the loop so the next log message is not issued
break
elif command == "prtdiag":
model = output.lower().split("\n")[0]
if "vmware" in model:
grains["virtual"] = "VMware"
elif "virtualbox" in model:
grains["virtual"] = "VirtualBox"
elif "qemu" in model:
grains["virtual"] = "kvm"
elif "joyent smartdc hvm" in model:
grains["virtual"] = "kvm"
break
elif command == "virtinfo":
grains["virtual"] = "LDOM"
break
choices = ("Linux", "HP-UX")
isdir = os.path.isdir
sysctl = salt.utils.path.which("sysctl")
if osdata["kernel"] in choices:
if os.path.isdir("/proc"):
try:
self_root = os.stat("/")
init_root = os.stat("/proc/1/root/.")
if self_root != init_root:
grains["virtual_subtype"] = "chroot"
except (IOError, OSError):
pass
if isdir("/proc/vz"):
if os.path.isfile("/proc/vz/version"):
grains["virtual"] = "openvzhn"
elif os.path.isfile("/proc/vz/veinfo"):
grains["virtual"] = "openvzve"
# a posteriori, it's expected for these to have failed:
failed_commands.discard("lspci")
failed_commands.discard("dmidecode")
# Provide additional detection for OpenVZ
if os.path.isfile("/proc/self/status"):
with salt.utils.files.fopen("/proc/self/status") as status_file:
vz_re = re.compile(r"^envID:\s+(\d+)$")
for line in status_file:
vz_match = vz_re.match(line.rstrip("\n"))
if vz_match and int(vz_match.groups()[0]) != 0:
grains["virtual"] = "openvzve"
elif vz_match and int(vz_match.groups()[0]) == 0:
grains["virtual"] = "openvzhn"
if isdir("/proc/sys/xen") or isdir("/sys/bus/xen") or isdir("/proc/xen"):
if os.path.isfile("/proc/xen/xsd_kva"):
# Tested on CentOS 5.3 / 2.6.18-194.26.1.el5xen
# Tested on CentOS 5.4 / 2.6.18-164.15.1.el5xen
grains["virtual_subtype"] = "Xen Dom0"
else:
if osdata.get("productname", "") == "HVM domU":
# Requires dmidecode!
grains["virtual_subtype"] = "Xen HVM DomU"
elif os.path.isfile("/proc/xen/capabilities") and os.access(
"/proc/xen/capabilities", os.R_OK
):
with salt.utils.files.fopen("/proc/xen/capabilities") as fhr:
if "control_d" not in fhr.read():
# Tested on CentOS 5.5 / 2.6.18-194.3.1.el5xen
grains["virtual_subtype"] = "Xen PV DomU"
else:
# Shouldn't get to this, but just in case
grains["virtual_subtype"] = "Xen Dom0"
# Tested on Fedora 10 / 2.6.27.30-170.2.82 with xen
# Tested on Fedora 15 / 2.6.41.4-1 without running xen
elif isdir("/sys/bus/xen"):
if "xen:" in __salt__["cmd.run"]("dmesg").lower():
grains["virtual_subtype"] = "Xen PV DomU"
elif os.path.isfile("/sys/bus/xen/drivers/xenconsole"):
# An actual DomU will have the xenconsole driver
grains["virtual_subtype"] = "Xen PV DomU"
# If a Dom0 or DomU was detected, obviously this is xen
if "dom" in grains.get("virtual_subtype", "").lower():
grains["virtual"] = "xen"
# Check container type after hypervisors, to avoid variable overwrite on containers running in virtual environment.
if os.path.isfile("/proc/1/cgroup"):
try:
with salt.utils.files.fopen("/proc/1/cgroup", "r") as fhr:
fhr_contents = fhr.read()
if ":/lxc/" in fhr_contents:
grains["virtual"] = "container"
grains["virtual_subtype"] = "LXC"
elif ":/kubepods/" in fhr_contents:
grains["virtual_subtype"] = "kubernetes"
elif ":/libpod_parent/" in fhr_contents:
grains["virtual_subtype"] = "libpod"
else:
if any(
x in fhr_contents
for x in (":/system.slice/docker", ":/docker/", ":/docker-ce/")
):
grains["virtual"] = "container"
grains["virtual_subtype"] = "Docker"
except IOError:
pass
if os.path.isfile("/proc/cpuinfo"):
with salt.utils.files.fopen("/proc/cpuinfo", "r") as fhr:
if "QEMU Virtual CPU" in fhr.read():
grains["virtual"] = "kvm"
if os.path.isfile("/sys/devices/virtual/dmi/id/product_name"):
try:
with salt.utils.files.fopen(
"/sys/devices/virtual/dmi/id/product_name", "r"
) as fhr:
output = salt.utils.stringutils.to_unicode(
fhr.read(), errors="replace"
)
if "VirtualBox" in output:
grains["virtual"] = "VirtualBox"
elif "RHEV Hypervisor" in output:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "rhev"
elif "oVirt Node" in output:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "ovirt"
elif "Google" in output:
grains["virtual"] = "gce"
elif "BHYVE" in output:
grains["virtual"] = "bhyve"
except UnicodeDecodeError:
# Some firmwares provide non-valid 'product_name'
# files, ignore them
log.debug(
"The content in /sys/devices/virtual/dmi/id/product_name is not valid"
)
except IOError:
pass
elif osdata["kernel"] == "FreeBSD":
kenv = salt.utils.path.which("kenv")
if kenv:
product = __salt__["cmd.run"]("{0} smbios.system.product".format(kenv))
maker = __salt__["cmd.run"]("{0} smbios.system.maker".format(kenv))
if product.startswith("VMware"):
grains["virtual"] = "VMware"
if product.startswith("VirtualBox"):
grains["virtual"] = "VirtualBox"
if maker.startswith("Xen"):
grains["virtual_subtype"] = "{0} {1}".format(maker, product)
grains["virtual"] = "xen"
if maker.startswith("Microsoft") and product.startswith("Virtual"):
grains["virtual"] = "VirtualPC"
if maker.startswith("OpenStack"):
grains["virtual"] = "OpenStack"
if maker.startswith("Bochs"):
grains["virtual"] = "kvm"
if sysctl:
hv_vendor = __salt__["cmd.run"]("{0} -n hw.hv_vendor".format(sysctl))
model = __salt__["cmd.run"]("{0} -n hw.model".format(sysctl))
jail = __salt__["cmd.run"]("{0} -n security.jail.jailed".format(sysctl))
if "bhyve" in hv_vendor:
grains["virtual"] = "bhyve"
elif "QEMU Virtual CPU" in model:
grains["virtual"] = "kvm"
if jail == "1":
grains["virtual_subtype"] = "jail"
elif osdata["kernel"] == "OpenBSD":
if "manufacturer" in osdata:
if osdata["manufacturer"] in ["QEMU", "Red Hat", "Joyent"]:
grains["virtual"] = "kvm"
if osdata["manufacturer"] == "OpenBSD":
grains["virtual"] = "vmm"
elif osdata["kernel"] == "SunOS":
if grains["virtual"] == "LDOM":
roles = []
for role in ("control", "io", "root", "service"):
subtype_cmd = "{0} -c current get -H -o value {1}-role".format(
cmd, role
)
ret = __salt__["cmd.run_all"]("{0}".format(subtype_cmd))
if ret["stdout"] == "true":
roles.append(role)
if roles:
grains["virtual_subtype"] = roles
else:
# Check if it's a "regular" zone. (i.e. Solaris 10/11 zone)
zonename = salt.utils.path.which("zonename")
if zonename:
zone = __salt__["cmd.run"]("{0}".format(zonename))
if zone != "global":
grains["virtual"] = "zone"
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if isdir("/.SUNWnative"):
grains["virtual"] = "zone"
elif osdata["kernel"] == "NetBSD":
if sysctl:
if "QEMU Virtual CPU" in __salt__["cmd.run"](
"{0} -n machdep.cpu_brand".format(sysctl)
):
grains["virtual"] = "kvm"
elif "invalid" not in __salt__["cmd.run"](
"{0} -n machdep.xen.suspend".format(sysctl)
):
grains["virtual"] = "Xen PV DomU"
elif "VMware" in __salt__["cmd.run"](
"{0} -n machdep.dmi.system-vendor".format(sysctl)
):
grains["virtual"] = "VMware"
# NetBSD has Xen dom0 support
elif (
__salt__["cmd.run"]("{0} -n machdep.idle-mechanism".format(sysctl))
== "xen"
):
if os.path.isfile("/var/run/xenconsoled.pid"):
grains["virtual_subtype"] = "Xen Dom0"
# If we have a virtual_subtype, we're virtual, but maybe we couldn't
# figure out what specific virtual type we were?
if grains.get("virtual_subtype") and grains["virtual"] == "physical":
grains["virtual"] = "virtual"
for command in failed_commands:
log.info(
"Although '%s' was found in path, the current user "
"cannot execute it. Grains output might not be "
"accurate.",
command,
)
return grains
def _virtual_hv(osdata):
"""
Returns detailed hypervisor information from sysfs
Currently this seems to be used only by Xen
"""
grains = {}
# Bail early if we're not running on Xen
try:
if "xen" not in osdata["virtual"]:
return grains
except KeyError:
return grains
# Try to get the exact hypervisor version from sysfs
try:
version = {}
for fn in ("major", "minor", "extra"):
with salt.utils.files.fopen(
"/sys/hypervisor/version/{}".format(fn), "r"
) as fhr:
version[fn] = salt.utils.stringutils.to_unicode(fhr.read().strip())
grains["virtual_hv_version"] = "{}.{}{}".format(
version["major"], version["minor"], version["extra"]
)
grains["virtual_hv_version_info"] = [
version["major"],
version["minor"],
version["extra"],
]
except (IOError, OSError, KeyError):
pass
# Try to read and decode the supported feature set of the hypervisor
# Based on https://github.com/brendangregg/Misc/blob/master/xen/xen-features.py
# Table data from include/xen/interface/features.h
xen_feature_table = {
0: "writable_page_tables",
1: "writable_descriptor_tables",
2: "auto_translated_physmap",
3: "supervisor_mode_kernel",
4: "pae_pgdir_above_4gb",
5: "mmu_pt_update_preserve_ad",
7: "gnttab_map_avail_bits",
8: "hvm_callback_vector",
9: "hvm_safe_pvclock",
10: "hvm_pirqs",
11: "dom0",
12: "grant_map_identity",
13: "memory_op_vnode_supported",
14: "ARM_SMCCC_supported",
}
try:
with salt.utils.files.fopen("/sys/hypervisor/properties/features", "r") as fhr:
features = salt.utils.stringutils.to_unicode(fhr.read().strip())
enabled_features = []
for bit, feat in six.iteritems(xen_feature_table):
if int(features, 16) & (1 << bit):
enabled_features.append(feat)
grains["virtual_hv_features"] = features
grains["virtual_hv_features_list"] = enabled_features
except (IOError, OSError, KeyError):
pass
return grains
def _ps(osdata):
"""
Return the ps grain
"""
grains = {}
bsd_choices = ("FreeBSD", "NetBSD", "OpenBSD", "MacOS")
if osdata["os"] in bsd_choices:
grains["ps"] = "ps auxwww"
elif osdata["os_family"] == "Solaris":
grains["ps"] = "/usr/ucb/ps auxwww"
elif osdata["os"] == "Windows":
grains["ps"] = "tasklist.exe"
elif osdata.get("virtual", "") == "openvzhn":
grains["ps"] = (
'ps -fH -p $(grep -l "^envID:[[:space:]]*0\\$" '
'/proc/[0-9]*/status | sed -e "s=/proc/\\([0-9]*\\)/.*=\\1=") '
"| awk '{ $7=\"\"; print }'"
)
elif osdata["os_family"] == "AIX":
grains["ps"] = "/usr/bin/ps auxww"
elif osdata["os_family"] == "NILinuxRT":
grains["ps"] = "ps -o user,pid,ppid,tty,time,comm"
else:
grains["ps"] = "ps -efHww"
return grains
def _clean_value(key, val):
"""
Clean out well-known bogus values.
If it isn't clean (for example has value 'None'), return None.
Otherwise, return the original value.
NOTE: This logic also exists in the smbios module. This function is
for use when not using smbios to retrieve the value.
"""
if val is None or not val or re.match("none", val, flags=re.IGNORECASE):
return None
elif "uuid" in key:
# Try each version (1-5) of RFC4122 to check if it's actually a UUID
for uuidver in range(1, 5):
try:
uuid.UUID(val, version=uuidver)
return val
except ValueError:
continue
log.trace("HW %s value %s is an invalid UUID", key, val.replace("\n", " "))
return None
elif re.search("serial|part|version", key):
# 'To be filled by O.E.M.
# 'Not applicable' etc.
# 'Not specified' etc.
# 0000000, 1234567 etc.
# begone!
if (
re.match(r"^[0]+$", val)
or re.match(r"[0]?1234567[8]?[9]?[0]?", val)
or re.search(
r"sernum|part[_-]?number|specified|filled|applicable",
val,
flags=re.IGNORECASE,
)
):
return None
elif re.search("asset|manufacturer", key):
# AssetTag0. Manufacturer04. Begone.
if re.search(
r"manufacturer|to be filled|available|asset|^no(ne|t)",
val,
flags=re.IGNORECASE,
):
return None
else:
# map unspecified, undefined, unknown & whatever to None
if re.search(r"to be filled", val, flags=re.IGNORECASE) or re.search(
r"un(known|specified)|no(t|ne)? (asset|provided|defined|available|present|specified)",
val,
flags=re.IGNORECASE,
):
return None
return val
def _windows_os_release_grain(caption, product_type):
"""
helper function for getting the osrelease grain
:return:
"""
# This creates the osrelease grain based on the Windows Operating
# System Product Name. As long as Microsoft maintains a similar format
# this should be future proof
version = "Unknown"
release = ""
if "Server" in caption:
# Edge case here to handle MS Product that doesn't contain a year
if re.match(r"^Microsoft Hyper-V Server$", caption):
version = "2019"
else:
for item in caption.split(" "):
# If it's all digits, then it's version
if re.match(r"\d+", item):
version = item
# If it starts with R and then numbers, it's the release
# ie: R2
if re.match(r"^R\d+$", item):
release = item
os_release = "{0}Server{1}".format(version, release)
else:
for item in caption.split(" "):
# If it's a number, decimal number, Thin or Vista, then it's the
# version
if re.match(r"^(\d+(\.\d+)?)|Thin|Vista|XP$", item):
version = item
os_release = version
# If the version is still Unknown, revert back to the old way of getting
# the os_release
# https://github.com/saltstack/salt/issues/52339
if os_release in ["Unknown"]:
os_release = platform.release()
server = {
"Vista": "2008Server",
"7": "2008ServerR2",
"8": "2012Server",
"8.1": "2012ServerR2",
"10": "2016Server",
}
# Starting with Python 2.7.12 and 3.5.2 the `platform.uname()`
# function started reporting the Desktop version instead of the
# Server version on # Server versions of Windows, so we need to look
# those up. So, if you find a Server Platform that's a key in the
# server dictionary, then lookup the actual Server Release.
# (Product Type 1 is Desktop, Everything else is Server)
if product_type > 1 and os_release in server:
os_release = server[os_release]
return os_release
def _windows_platform_data():
"""
Use the platform module for as much as we can.
"""
# Provides:
# kernelrelease
# kernelversion
# osversion
# osrelease
# osservicepack
# osmanufacturer
# manufacturer
# productname
# biosversion
# serialnumber
# osfullname
# timezone
# windowsdomain
# windowsdomaintype
# motherboard.productname
# motherboard.serialnumber
# virtual
if not HAS_WMI:
return {}
with salt.utils.winapi.Com():
wmi_c = wmi.WMI()
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394102%28v=vs.85%29.aspx
systeminfo = wmi_c.Win32_ComputerSystem()[0]
# https://msdn.microsoft.com/en-us/library/aa394239(v=vs.85).aspx
osinfo = wmi_c.Win32_OperatingSystem()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394077(v=vs.85).aspx
biosinfo = wmi_c.Win32_BIOS()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394498(v=vs.85).aspx
timeinfo = wmi_c.Win32_TimeZone()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394072(v=vs.85).aspx
motherboard = {"product": None, "serial": None}
try:
motherboardinfo = wmi_c.Win32_BaseBoard()[0]
motherboard["product"] = motherboardinfo.Product
motherboard["serial"] = motherboardinfo.SerialNumber
except IndexError:
log.debug("Motherboard info not available on this system")
kernel_version = platform.version()
info = salt.utils.win_osinfo.get_os_version_info()
net_info = salt.utils.win_osinfo.get_join_info()
service_pack = None
if info["ServicePackMajor"] > 0:
service_pack = "".join(["SP", six.text_type(info["ServicePackMajor"])])
os_release = _windows_os_release_grain(
caption=osinfo.Caption, product_type=osinfo.ProductType
)
grains = {
"kernelrelease": _clean_value("kernelrelease", osinfo.Version),
"kernelversion": _clean_value("kernelversion", kernel_version),
"osversion": _clean_value("osversion", osinfo.Version),
"osrelease": _clean_value("osrelease", os_release),
"osservicepack": _clean_value("osservicepack", service_pack),
"osmanufacturer": _clean_value("osmanufacturer", osinfo.Manufacturer),
"manufacturer": _clean_value("manufacturer", systeminfo.Manufacturer),
"productname": _clean_value("productname", systeminfo.Model),
# bios name had a bunch of whitespace appended to it in my testing
# 'PhoenixBIOS 4.0 Release 6.0 '
"biosversion": _clean_value("biosversion", biosinfo.Name.strip()),
"serialnumber": _clean_value("serialnumber", biosinfo.SerialNumber),
"osfullname": _clean_value("osfullname", osinfo.Caption),
"timezone": _clean_value("timezone", timeinfo.Description),
"windowsdomain": _clean_value("windowsdomain", net_info["Domain"]),
"windowsdomaintype": _clean_value(
"windowsdomaintype", net_info["DomainType"]
),
"motherboard": {
"productname": _clean_value(
"motherboard.productname", motherboard["product"]
),
"serialnumber": _clean_value(
"motherboard.serialnumber", motherboard["serial"]
),
},
}
# test for virtualized environments
# I only had VMware available so the rest are unvalidated
if "VRTUAL" in biosinfo.Version: # (not a typo)
grains["virtual"] = "HyperV"
elif "A M I" in biosinfo.Version:
grains["virtual"] = "VirtualPC"
elif "VMware" in systeminfo.Model:
grains["virtual"] = "VMware"
elif "VirtualBox" in systeminfo.Model:
grains["virtual"] = "VirtualBox"
elif "Xen" in biosinfo.Version:
grains["virtual"] = "Xen"
if "HVM domU" in systeminfo.Model:
grains["virtual_subtype"] = "HVM domU"
elif "OpenStack" in systeminfo.Model:
grains["virtual"] = "OpenStack"
elif "AMAZON" in biosinfo.Version:
grains["virtual"] = "EC2"
return grains
def _osx_platform_data():
"""
Additional data for macOS systems
Returns: A dictionary containing values for the following:
- model_name
- boot_rom_version
- smc_version
- system_serialnumber
"""
cmd = "system_profiler SPHardwareDataType"
hardware = __salt__["cmd.run"](cmd)
grains = {}
for line in hardware.splitlines():
field_name, _, field_val = line.partition(": ")
if field_name.strip() == "Model Name":
key = "model_name"
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "Boot ROM Version":
key = "boot_rom_version"
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "SMC Version (system)":
key = "smc_version"
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "Serial Number (system)":
key = "system_serialnumber"
grains[key] = _clean_value(key, field_val)
return grains
def id_():
"""
Return the id
"""
return {"id": __opts__.get("id", "")}
_REPLACE_LINUX_RE = re.compile(r"\W(?:gnu/)?linux", re.IGNORECASE)
# This maps (at most) the first ten characters (no spaces, lowercased) of
# 'osfullname' to the 'os' grain that Salt traditionally uses.
# Please see os_data() and _supported_dists.
# If your system is not detecting properly it likely needs an entry here.
_OS_NAME_MAP = {
"redhatente": "RedHat",
"gentoobase": "Gentoo",
"archarm": "Arch ARM",
"arch": "Arch",
"debian": "Debian",
"raspbian": "Raspbian",
"fedoraremi": "Fedora",
"chapeau": "Chapeau",
"korora": "Korora",
"amazonami": "Amazon",
"alt": "ALT",
"enterprise": "OEL",
"oracleserv": "OEL",
"cloudserve": "CloudLinux",
"cloudlinux": "CloudLinux",
"pidora": "Fedora",
"scientific": "ScientificLinux",
"synology": "Synology",
"nilrt": "NILinuxRT",
"poky": "Poky",
"manjaro": "Manjaro",
"manjarolin": "Manjaro",
"univention": "Univention",
"antergos": "Antergos",
"sles": "SUSE",
"void": "Void",
"slesexpand": "RES",
"linuxmint": "Mint",
"neon": "KDE neon",
}
# Map the 'os' grain to the 'os_family' grain
# These should always be capitalized entries as the lookup comes
# post-_OS_NAME_MAP. If your system is having trouble with detection, please
# make sure that the 'os' grain is capitalized and working correctly first.
_OS_FAMILY_MAP = {
"Ubuntu": "Debian",
"Fedora": "RedHat",
"Chapeau": "RedHat",
"Korora": "RedHat",
"FedBerry": "RedHat",
"CentOS": "RedHat",
"GoOSe": "RedHat",
"Scientific": "RedHat",
"Amazon": "RedHat",
"CloudLinux": "RedHat",
"OVS": "RedHat",
"OEL": "RedHat",
"XCP": "RedHat",
"XCP-ng": "RedHat",
"XenServer": "RedHat",
"RES": "RedHat",
"Sangoma": "RedHat",
"Mandrake": "Mandriva",
"ESXi": "VMware",
"Mint": "Debian",
"VMwareESX": "VMware",
"Bluewhite64": "Bluewhite",
"Slamd64": "Slackware",
"SLES": "Suse",
"SUSE Enterprise Server": "Suse",
"SUSE Enterprise Server": "Suse",
"SLED": "Suse",
"openSUSE": "Suse",
"SUSE": "Suse",
"openSUSE Leap": "Suse",
"openSUSE Tumbleweed": "Suse",
"SLES_SAP": "Suse",
"Solaris": "Solaris",
"SmartOS": "Solaris",
"OmniOS": "Solaris",
"OpenIndiana Development": "Solaris",
"OpenIndiana": "Solaris",
"OpenSolaris Development": "Solaris",
"OpenSolaris": "Solaris",
"Oracle Solaris": "Solaris",
"Arch ARM": "Arch",
"Manjaro": "Arch",
"Antergos": "Arch",
"ALT": "RedHat",
"Trisquel": "Debian",
"GCEL": "Debian",
"Linaro": "Debian",
"elementary OS": "Debian",
"elementary": "Debian",
"Univention": "Debian",
"ScientificLinux": "RedHat",
"Raspbian": "Debian",
"Devuan": "Debian",
"antiX": "Debian",
"Kali": "Debian",
"neon": "Debian",
"Cumulus": "Debian",
"Deepin": "Debian",
"NILinuxRT": "NILinuxRT",
"KDE neon": "Debian",
"Void": "Void",
"IDMS": "Debian",
"Funtoo": "Gentoo",
"AIX": "AIX",
"TurnKey": "Debian",
}
# Matches any possible format:
# DISTRIB_ID="Ubuntu"
# DISTRIB_ID='Mageia'
# DISTRIB_ID=Fedora
# DISTRIB_RELEASE='10.10'
# DISTRIB_CODENAME='squeeze'
# DISTRIB_DESCRIPTION='Ubuntu 10.10'
_LSB_REGEX = re.compile(
(
"^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:'|\")?"
"([\\w\\s\\.\\-_]+)(?:'|\")?"
)
)
def _linux_bin_exists(binary):
"""
Does a binary exist in linux (depends on which, type, or whereis)
"""
for search_cmd in ("which", "type -ap"):
try:
return __salt__["cmd.retcode"]("{0} {1}".format(search_cmd, binary)) == 0
except salt.exceptions.CommandExecutionError:
pass
try:
return (
len(
__salt__["cmd.run_all"]("whereis -b {0}".format(binary))[
"stdout"
].split()
)
> 1
)
except salt.exceptions.CommandExecutionError:
return False
def _get_interfaces():
"""
Provide a dict of the connected interfaces and their ip addresses
"""
global _INTERFACES
if not _INTERFACES:
_INTERFACES = salt.utils.network.interfaces()
return _INTERFACES
def _parse_lsb_release():
ret = {}
try:
log.trace("Attempting to parse /etc/lsb-release")
with salt.utils.files.fopen("/etc/lsb-release") as ifile:
for line in ifile:
try:
key, value = _LSB_REGEX.match(line.rstrip("\n")).groups()[:2]
except AttributeError:
pass
else:
# Adds lsb_distrib_{id,release,codename,description}
ret["lsb_{0}".format(key.lower())] = value.rstrip()
except (IOError, OSError) as exc:
log.trace("Failed to parse /etc/lsb-release: %s", exc)
return ret
def _parse_os_release(*os_release_files):
"""
Parse os-release and return a parameter dictionary
See http://www.freedesktop.org/software/systemd/man/os-release.html
for specification of the file format.
"""
ret = {}
for filename in os_release_files:
try:
with salt.utils.files.fopen(filename) as ifile:
regex = re.compile("^([\\w]+)=(?:'|\")?(.*?)(?:'|\")?$")
for line in ifile:
match = regex.match(line.strip())
if match:
# Shell special characters ("$", quotes, backslash,
# backtick) are escaped with backslashes
ret[match.group(1)] = re.sub(
r'\\([$"\'\\`])', r"\1", match.group(2)
)
break
except (IOError, OSError):
pass
return ret
def _parse_cpe_name(cpe):
"""
Parse CPE_NAME data from the os-release
Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe
Note: cpe:2.3:part:vendor:product:version:update:edition:lang:sw_edition:target_sw:target_hw:other
however some OS's do not have the full 13 elements, for example:
CPE_NAME="cpe:2.3:o:amazon:amazon_linux:2"
:param cpe:
:return:
"""
part = {
"o": "operating system",
"h": "hardware",
"a": "application",
}
ret = {}
cpe = (cpe or "").split(":")
if len(cpe) > 4 and cpe[0] == "cpe":
if cpe[1].startswith("/"): # WFN to URI
ret["vendor"], ret["product"], ret["version"] = cpe[2:5]
ret["phase"] = cpe[5] if len(cpe) > 5 else None
ret["part"] = part.get(cpe[1][1:])
elif len(cpe) == 6 and cpe[1] == "2.3": # WFN to a string
ret["vendor"], ret["product"], ret["version"] = [
x if x != "*" else None for x in cpe[3:6]
]
ret["phase"] = None
ret["part"] = part.get(cpe[2])
elif len(cpe) > 7 and len(cpe) <= 13 and cpe[1] == "2.3": # WFN to a string
ret["vendor"], ret["product"], ret["version"], ret["phase"] = [
x if x != "*" else None for x in cpe[3:7]
]
ret["part"] = part.get(cpe[2])
return ret
def os_data():
"""
Return grains pertaining to the operating system
"""
grains = {
"num_gpus": 0,
"gpus": [],
}
# Windows Server 2008 64-bit
# ('Windows', 'MINIONNAME', '2008ServerR2', '6.1.7601', 'AMD64',
# 'Intel64 Fam ily 6 Model 23 Stepping 6, GenuineIntel')
# Ubuntu 10.04
# ('Linux', 'MINIONNAME', '2.6.32-38-server',
# '#83-Ubuntu SMP Wed Jan 4 11:26:59 UTC 2012', 'x86_64', '')
# pylint: disable=unpacking-non-sequence
(
grains["kernel"],
grains["nodename"],
grains["kernelrelease"],
grains["kernelversion"],
grains["cpuarch"],
_,
) = platform.uname()
# pylint: enable=unpacking-non-sequence
if salt.utils.platform.is_proxy():
grains["kernel"] = "proxy"
grains["kernelrelease"] = "proxy"
grains["kernelversion"] = "proxy"
grains["osrelease"] = "proxy"
grains["os"] = "proxy"
grains["os_family"] = "proxy"
grains["osfullname"] = "proxy"
elif salt.utils.platform.is_windows():
grains["os"] = "Windows"
grains["os_family"] = "Windows"
grains.update(_memdata(grains))
grains.update(_windows_platform_data())
grains.update(_windows_cpudata())
grains.update(_windows_virtual(grains))
grains.update(_ps(grains))
if "Server" in grains["osrelease"]:
osrelease_info = grains["osrelease"].split("Server", 1)
osrelease_info[1] = osrelease_info[1].lstrip("R")
else:
osrelease_info = grains["osrelease"].split(".")
for idx, value in enumerate(osrelease_info):
if not value.isdigit():
continue
osrelease_info[idx] = int(value)
grains["osrelease_info"] = tuple(osrelease_info)
grains["osfinger"] = "{os}-{ver}".format(
os=grains["os"], ver=grains["osrelease"]
)
grains["init"] = "Windows"
return grains
elif salt.utils.platform.is_linux():
# Add SELinux grain, if you have it
if _linux_bin_exists("selinuxenabled"):
log.trace("Adding selinux grains")
grains["selinux"] = {}
grains["selinux"]["enabled"] = (
__salt__["cmd.retcode"]("selinuxenabled") == 0
)
if _linux_bin_exists("getenforce"):
grains["selinux"]["enforced"] = __salt__["cmd.run"](
"getenforce"
).strip()
# Add systemd grain, if you have it
if _linux_bin_exists("systemctl") and _linux_bin_exists("localectl"):
log.trace("Adding systemd grains")
grains["systemd"] = {}
systemd_info = __salt__["cmd.run"]("systemctl --version").splitlines()
grains["systemd"]["version"] = systemd_info[0].split()[1]
grains["systemd"]["features"] = systemd_info[1]
# Add init grain
grains["init"] = "unknown"
log.trace("Adding init grain")
try:
os.stat("/run/systemd/system")
grains["init"] = "systemd"
except (OSError, IOError):
try:
with salt.utils.files.fopen("/proc/1/cmdline") as fhr:
init_cmdline = fhr.read().replace("\x00", " ").split()
except (IOError, OSError):
pass
else:
try:
init_bin = salt.utils.path.which(init_cmdline[0])
except IndexError:
# Emtpy init_cmdline
init_bin = None
log.warning("Unable to fetch data from /proc/1/cmdline")
if init_bin is not None and init_bin.endswith("bin/init"):
supported_inits = (b"upstart", b"sysvinit", b"systemd")
edge_len = max(len(x) for x in supported_inits) - 1
try:
buf_size = __opts__["file_buffer_size"]
except KeyError:
# Default to the value of file_buffer_size for the minion
buf_size = 262144
try:
with salt.utils.files.fopen(init_bin, "rb") as fp_:
edge = b""
buf = fp_.read(buf_size).lower()
while buf:
buf = edge + buf
for item in supported_inits:
if item in buf:
if six.PY3:
item = item.decode("utf-8")
grains["init"] = item
buf = b""
break
edge = buf[-edge_len:]
buf = fp_.read(buf_size).lower()
except (IOError, OSError) as exc:
log.error(
"Unable to read from init_bin (%s): %s", init_bin, exc
)
elif salt.utils.path.which("supervisord") in init_cmdline:
grains["init"] = "supervisord"
elif salt.utils.path.which("dumb-init") in init_cmdline:
# https://github.com/Yelp/dumb-init
grains["init"] = "dumb-init"
elif salt.utils.path.which("tini") in init_cmdline:
# https://github.com/krallin/tini
grains["init"] = "tini"
elif init_cmdline == ["runit"]:
grains["init"] = "runit"
elif "/sbin/my_init" in init_cmdline:
# Phusion Base docker container use runit for srv mgmt, but
# my_init as pid1
grains["init"] = "runit"
else:
log.debug(
"Could not determine init system from command line: (%s)",
" ".join(init_cmdline),
)
# Add lsb grains on any distro with lsb-release. Note that this import
# can fail on systems with lsb-release installed if the system package
# does not install the python package for the python interpreter used by
# Salt (i.e. python2 or python3)
try:
log.trace("Getting lsb_release distro information")
import lsb_release # pylint: disable=import-error
release = lsb_release.get_distro_information()
for key, value in six.iteritems(release):
key = key.lower()
lsb_param = "lsb_{0}{1}".format(
"" if key.startswith("distrib_") else "distrib_", key
)
grains[lsb_param] = value
# Catch a NameError to workaround possible breakage in lsb_release
# See https://github.com/saltstack/salt/issues/37867
except (ImportError, NameError):
# if the python library isn't available, try to parse
# /etc/lsb-release using regex
log.trace("lsb_release python bindings not available")
grains.update(_parse_lsb_release())
if grains.get("lsb_distrib_description", "").lower().startswith("antergos"):
# Antergos incorrectly configures their /etc/lsb-release,
# setting the DISTRIB_ID to "Arch". This causes the "os" grain
# to be incorrectly set to "Arch".
grains["osfullname"] = "Antergos Linux"
elif "lsb_distrib_id" not in grains:
log.trace("Failed to get lsb_distrib_id, trying to parse os-release")
os_release = _parse_os_release("/etc/os-release", "/usr/lib/os-release")
if os_release:
if "NAME" in os_release:
grains["lsb_distrib_id"] = os_release["NAME"].strip()
if "VERSION_ID" in os_release:
grains["lsb_distrib_release"] = os_release["VERSION_ID"]
if "VERSION_CODENAME" in os_release:
grains["lsb_distrib_codename"] = os_release["VERSION_CODENAME"]
elif "PRETTY_NAME" in os_release:
codename = os_release["PRETTY_NAME"]
# https://github.com/saltstack/salt/issues/44108
if os_release["ID"] == "debian":
codename_match = re.search(r"\((\w+)\)$", codename)
if codename_match:
codename = codename_match.group(1)
grains["lsb_distrib_codename"] = codename
if "CPE_NAME" in os_release:
cpe = _parse_cpe_name(os_release["CPE_NAME"])
if not cpe:
log.error("Broken CPE_NAME format in /etc/os-release!")
elif cpe.get("vendor", "").lower() in ["suse", "opensuse"]:
grains["os"] = "SUSE"
# openSUSE `osfullname` grain normalization
if os_release.get("NAME") == "openSUSE Leap":
grains["osfullname"] = "Leap"
elif os_release.get("VERSION") == "Tumbleweed":
grains["osfullname"] = os_release["VERSION"]
# Override VERSION_ID, if CPE_NAME around
if (
cpe.get("version") and cpe.get("vendor") == "opensuse"
): # Keep VERSION_ID for SLES
grains["lsb_distrib_release"] = cpe["version"]
elif os.path.isfile("/etc/SuSE-release"):
log.trace("Parsing distrib info from /etc/SuSE-release")
grains["lsb_distrib_id"] = "SUSE"
version = ""
patch = ""
with salt.utils.files.fopen("/etc/SuSE-release") as fhr:
for line in fhr:
if "enterprise" in line.lower():
grains["lsb_distrib_id"] = "SLES"
grains["lsb_distrib_codename"] = re.sub(
r"\(.+\)", "", line
).strip()
elif "version" in line.lower():
version = re.sub(r"[^0-9]", "", line)
elif "patchlevel" in line.lower():
patch = re.sub(r"[^0-9]", "", line)
grains["lsb_distrib_release"] = version
if patch:
grains["lsb_distrib_release"] += "." + patch
patchstr = "SP" + patch
if (
grains["lsb_distrib_codename"]
and patchstr not in grains["lsb_distrib_codename"]
):
grains["lsb_distrib_codename"] += " " + patchstr
if not grains.get("lsb_distrib_codename"):
grains["lsb_distrib_codename"] = "n.a"
elif os.path.isfile("/etc/altlinux-release"):
log.trace("Parsing distrib info from /etc/altlinux-release")
# ALT Linux
grains["lsb_distrib_id"] = "altlinux"
with salt.utils.files.fopen("/etc/altlinux-release") as ifile:
# This file is symlinked to from:
# /etc/fedora-release
# /etc/redhat-release
# /etc/system-release
for line in ifile:
# ALT Linux Sisyphus (unstable)
comps = line.split()
if comps[0] == "ALT":
grains["lsb_distrib_release"] = comps[2]
grains["lsb_distrib_codename"] = (
comps[3].replace("(", "").replace(")", "")
)
elif os.path.isfile("/etc/centos-release"):
log.trace("Parsing distrib info from /etc/centos-release")
# CentOS Linux
grains["lsb_distrib_id"] = "CentOS"
with salt.utils.files.fopen("/etc/centos-release") as ifile:
for line in ifile:
# Need to pull out the version and codename
# in the case of custom content in /etc/centos-release
find_release = re.compile(r"\d+\.\d+")
find_codename = re.compile(r"(?<=\()(.*?)(?=\))")
release = find_release.search(line)
codename = find_codename.search(line)
if release is not None:
grains["lsb_distrib_release"] = release.group()
if codename is not None:
grains["lsb_distrib_codename"] = codename.group()
elif os.path.isfile("/etc.defaults/VERSION") and os.path.isfile(
"/etc.defaults/synoinfo.conf"
):
grains["osfullname"] = "Synology"
log.trace(
"Parsing Synology distrib info from /etc/.defaults/VERSION"
)
with salt.utils.files.fopen("/etc.defaults/VERSION", "r") as fp_:
synoinfo = {}
for line in fp_:
try:
key, val = line.rstrip("\n").split("=")
except ValueError:
continue
if key in ("majorversion", "minorversion", "buildnumber"):
synoinfo[key] = val.strip('"')
if len(synoinfo) != 3:
log.warning(
"Unable to determine Synology version info. "
"Please report this, as it is likely a bug."
)
else:
grains[
"osrelease"
] = "{majorversion}.{minorversion}-{buildnumber}".format(
**synoinfo
)
# Use the already intelligent platform module to get distro info
# (though apparently it's not intelligent enough to strip quotes)
log.trace(
"Getting OS name, release, and codename from "
"platform.linux_distribution()"
)
(osname, osrelease, oscodename) = [
x.strip('"').strip("'") for x in linux_distribution()
]
# Try to assign these three names based on the lsb info, they tend to
# be more accurate than what python gets from /etc/DISTRO-release.
# It's worth noting that Ubuntu has patched their Python distribution
# so that linux_distribution() does the /etc/lsb-release parsing, but
# we do it anyway here for the sake for full portability.
if "osfullname" not in grains:
# If NI Linux RT distribution, set the grains['osfullname'] to 'nilrt'
if grains.get("lsb_distrib_id", "").lower().startswith("nilrt"):
grains["osfullname"] = "nilrt"
else:
grains["osfullname"] = grains.get("lsb_distrib_id", osname).strip()
if "osrelease" not in grains:
# NOTE: This is a workaround for CentOS 7 os-release bug
# https://bugs.centos.org/view.php?id=8359
# /etc/os-release contains no minor distro release number so we fall back to parse
# /etc/centos-release file instead.
# Commit introducing this comment should be reverted after the upstream bug is released.
if "CentOS Linux 7" in grains.get("lsb_distrib_codename", ""):
grains.pop("lsb_distrib_release", None)
grains["osrelease"] = grains.get("lsb_distrib_release", osrelease).strip()
grains["oscodename"] = (
grains.get("lsb_distrib_codename", "").strip() or oscodename
)
if "Red Hat" in grains["oscodename"]:
grains["oscodename"] = oscodename
distroname = _REPLACE_LINUX_RE.sub("", grains["osfullname"]).strip()
# return the first ten characters with no spaces, lowercased
shortname = distroname.replace(" ", "").lower()[:10]
# this maps the long names from the /etc/DISTRO-release files to the
# traditional short names that Salt has used.
if "os" not in grains:
grains["os"] = _OS_NAME_MAP.get(shortname, distroname)
grains.update(_linux_cpudata())
grains.update(_linux_gpu_data())
elif grains["kernel"] == "SunOS":
if salt.utils.platform.is_smartos():
# See https://github.com/joyent/smartos-live/issues/224
if HAS_UNAME:
uname_v = os.uname()[3] # format: joyent_20161101T004406Z
else:
uname_v = os.name
uname_v = uname_v[uname_v.index("_") + 1 :]
grains["os"] = grains["osfullname"] = "SmartOS"
# store a parsed version of YYYY.MM.DD as osrelease
grains["osrelease"] = ".".join(
[
uname_v.split("T")[0][0:4],
uname_v.split("T")[0][4:6],
uname_v.split("T")[0][6:8],
]
)
# store a untouched copy of the timestamp in osrelease_stamp
grains["osrelease_stamp"] = uname_v
elif os.path.isfile("/etc/release"):
with salt.utils.files.fopen("/etc/release", "r") as fp_:
rel_data = fp_.read()
try:
release_re = re.compile(
r"((?:Open|Oracle )?Solaris|OpenIndiana|OmniOS) (Development)?"
r"\s*(\d+\.?\d*|v\d+)\s?[A-Z]*\s?(r\d+|\d+\/\d+|oi_\S+|snv_\S+)?"
)
(
osname,
development,
osmajorrelease,
osminorrelease,
) = release_re.search(rel_data).groups()
except AttributeError:
# Set a blank osrelease grain and fallback to 'Solaris'
# as the 'os' grain.
grains["os"] = grains["osfullname"] = "Solaris"
grains["osrelease"] = ""
else:
if development is not None:
osname = " ".join((osname, development))
if HAS_UNAME:
uname_v = os.uname()[3]
else:
uname_v = os.name
grains["os"] = grains["osfullname"] = osname
if osname in ["Oracle Solaris"] and uname_v.startswith(
osmajorrelease
):
# Oracla Solars 11 and up have minor version in uname
grains["osrelease"] = uname_v
elif osname in ["OmniOS"]:
# OmniOS
osrelease = []
osrelease.append(osmajorrelease[1:])
osrelease.append(osminorrelease[1:])
grains["osrelease"] = ".".join(osrelease)
grains["osrelease_stamp"] = uname_v
else:
# Sun Solaris 10 and earlier/comparable
osrelease = []
osrelease.append(osmajorrelease)
if osminorrelease:
osrelease.append(osminorrelease)
grains["osrelease"] = ".".join(osrelease)
grains["osrelease_stamp"] = uname_v
grains.update(_sunos_cpudata())
elif grains["kernel"] == "VMkernel":
grains["os"] = "ESXi"
elif grains["kernel"] == "Darwin":
osrelease = __salt__["cmd.run"]("sw_vers -productVersion")
osname = __salt__["cmd.run"]("sw_vers -productName")
osbuild = __salt__["cmd.run"]("sw_vers -buildVersion")
grains["os"] = "MacOS"
grains["os_family"] = "MacOS"
grains["osfullname"] = "{0} {1}".format(osname, osrelease)
grains["osrelease"] = osrelease
grains["osbuild"] = osbuild
grains["init"] = "launchd"
grains.update(_bsd_cpudata(grains))
grains.update(_osx_gpudata())
grains.update(_osx_platform_data())
elif grains["kernel"] == "AIX":
osrelease = __salt__["cmd.run"]("oslevel")
osrelease_techlevel = __salt__["cmd.run"]("oslevel -r")
osname = __salt__["cmd.run"]("uname")
grains["os"] = "AIX"
grains["osfullname"] = osname
grains["osrelease"] = osrelease
grains["osrelease_techlevel"] = osrelease_techlevel
grains.update(_aix_cpudata())
else:
grains["os"] = grains["kernel"]
if grains["kernel"] == "FreeBSD":
grains["osfullname"] = grains["os"]
try:
grains["osrelease"] = __salt__["cmd.run"]("freebsd-version -u").split("-")[
0
]
except salt.exceptions.CommandExecutionError:
# freebsd-version was introduced in 10.0.
# derive osrelease from kernelversion prior to that
grains["osrelease"] = grains["kernelrelease"].split("-")[0]
grains.update(_bsd_cpudata(grains))
if grains["kernel"] in ("OpenBSD", "NetBSD"):
grains.update(_bsd_cpudata(grains))
grains["osrelease"] = grains["kernelrelease"].split("-")[0]
if grains["kernel"] == "NetBSD":
grains.update(_netbsd_gpu_data())
if not grains["os"]:
grains["os"] = "Unknown {0}".format(grains["kernel"])
grains["os_family"] = "Unknown"
else:
# this assigns family names based on the os name
# family defaults to the os name if not found
grains["os_family"] = _OS_FAMILY_MAP.get(grains["os"], grains["os"])
# Build the osarch grain. This grain will be used for platform-specific
# considerations such as package management. Fall back to the CPU
# architecture.
if grains.get("os_family") == "Debian":
osarch = __salt__["cmd.run"]("dpkg --print-architecture").strip()
elif grains.get("os_family") in ["RedHat", "Suse"]:
osarch = salt.utils.pkg.rpm.get_osarch()
elif grains.get("os_family") in ("NILinuxRT", "Poky"):
archinfo = {}
for line in __salt__["cmd.run"]("opkg print-architecture").splitlines():
if line.startswith("arch"):
_, arch, priority = line.split()
archinfo[arch.strip()] = int(priority.strip())
# Return osarch in priority order (higher to lower)
osarch = sorted(archinfo, key=archinfo.get, reverse=True)
else:
osarch = grains["cpuarch"]
grains["osarch"] = osarch
grains.update(_memdata(grains))
# Get the hardware and bios data
grains.update(_hw_data(grains))
# Load the virtual machine info
grains.update(_virtual(grains))
grains.update(_virtual_hv(grains))
grains.update(_ps(grains))
if grains.get("osrelease", ""):
osrelease_info = grains["osrelease"].split(".")
for idx, value in enumerate(osrelease_info):
if not value.isdigit():
continue
osrelease_info[idx] = int(value)
grains["osrelease_info"] = tuple(osrelease_info)
try:
grains["osmajorrelease"] = int(grains["osrelease_info"][0])
except (IndexError, TypeError, ValueError):
log.debug(
"Unable to derive osmajorrelease from osrelease_info '%s'. "
"The osmajorrelease grain will not be set.",
grains["osrelease_info"],
)
os_name = grains[
"os"
if grains.get("os")
in ("Debian", "FreeBSD", "OpenBSD", "NetBSD", "Mac", "Raspbian")
else "osfullname"
]
grains["osfinger"] = "{0}-{1}".format(
os_name,
grains["osrelease"]
if os_name in ("Ubuntu",)
else grains["osrelease_info"][0],
)
return grains
def locale_info():
"""
Provides
defaultlanguage
defaultencoding
"""
grains = {}
grains["locale_info"] = {}
if salt.utils.platform.is_proxy():
return grains
try:
(
grains["locale_info"]["defaultlanguage"],
grains["locale_info"]["defaultencoding"],
) = locale.getdefaultlocale()
except Exception: # pylint: disable=broad-except
# locale.getdefaultlocale can ValueError!! Catch anything else it
# might do, per #2205
grains["locale_info"]["defaultlanguage"] = "unknown"
grains["locale_info"]["defaultencoding"] = "unknown"
grains["locale_info"]["detectedencoding"] = __salt_system_encoding__
grains["locale_info"]["timezone"] = "unknown"
if _DATEUTIL_TZ:
try:
grains["locale_info"]["timezone"] = datetime.datetime.now(
dateutil.tz.tzlocal()
).tzname()
except UnicodeDecodeError:
# Because the method 'tzname' is not a part of salt the decoding error cant be fixed.
# The error is in datetime in the python2 lib
if salt.utils.platform.is_windows():
grains["locale_info"]["timezone"] = time.tzname[0].decode("mbcs")
return grains
def hostname():
"""
Return fqdn, hostname, domainname
.. note::
On Windows the ``domain`` grain may refer to the dns entry for the host
instead of the Windows domain to which the host is joined. It may also
be empty if not a part of any domain. Refer to the ``windowsdomain``
grain instead
"""
# This is going to need some work
# Provides:
# fqdn
# host
# localhost
# domain
global __FQDN__
grains = {}
if salt.utils.platform.is_proxy():
return grains
grains["localhost"] = socket.gethostname()
if __FQDN__ is None:
__FQDN__ = salt.utils.network.get_fqhostname()
# On some distros (notably FreeBSD) if there is no hostname set
# salt.utils.network.get_fqhostname() will return None.
# In this case we punt and log a message at error level, but force the
# hostname and domain to be localhost.localdomain
# Otherwise we would stacktrace below
if __FQDN__ is None: # still!
log.error(
"Having trouble getting a hostname. Does this machine have its hostname and domain set properly?"
)
__FQDN__ = "localhost.localdomain"
grains["fqdn"] = __FQDN__
(grains["host"], grains["domain"]) = grains["fqdn"].partition(".")[::2]
return grains
def append_domain():
"""
Return append_domain if set
"""
grain = {}
if salt.utils.platform.is_proxy():
return grain
if "append_domain" in __opts__:
grain["append_domain"] = __opts__["append_domain"]
return grain
def fqdns():
"""
Return all known FQDNs for the system by enumerating all interfaces and
then trying to reverse resolve them (excluding 'lo' interface).
"""
# Provides:
# fqdns
grains = {}
fqdns = set()
addresses = salt.utils.network.ip_addrs(
include_loopback=False, interface_data=_INTERFACES
)
addresses.extend(
salt.utils.network.ip_addrs6(include_loopback=False, interface_data=_INTERFACES)
)
err_message = "An exception occurred resolving address '%s': %s"
for ip in addresses:
try:
fqdns.add(socket.getfqdn(socket.gethostbyaddr(ip)[0]))
except socket.herror as err:
if err.errno in (0, HOST_NOT_FOUND, NO_DATA):
# No FQDN for this IP address, so we don't need to know this all the time.
log.debug("Unable to resolve address %s: %s", ip, err)
else:
log.error(err_message, ip, err)
except (socket.error, socket.gaierror, socket.timeout) as err:
log.error(err_message, ip, err)
grains["fqdns"] = sorted(list(fqdns))
return grains
def ip_fqdn():
"""
Return ip address and FQDN grains
"""
if salt.utils.platform.is_proxy():
return {}
ret = {}
ret["ipv4"] = salt.utils.network.ip_addrs(include_loopback=True)
ret["ipv6"] = salt.utils.network.ip_addrs6(include_loopback=True)
_fqdn = hostname()["fqdn"]
for socket_type, ipv_num in ((socket.AF_INET, "4"), (socket.AF_INET6, "6")):
key = "fqdn_ip" + ipv_num
if not ret["ipv" + ipv_num]:
ret[key] = []
else:
try:
start_time = datetime.datetime.utcnow()
info = socket.getaddrinfo(_fqdn, None, socket_type)
ret[key] = list(set(item[4][0] for item in info))
except (socket.error, UnicodeError):
timediff = datetime.datetime.utcnow() - start_time
if timediff.seconds > 5 and __opts__["__role"] == "master":
log.warning(
'Unable to find IPv%s record for "%s" causing a %s '
"second timeout when rendering grains. Set the dns or "
"/etc/hosts for IPv%s to clear this.",
ipv_num,
_fqdn,
timediff,
ipv_num,
)
ret[key] = []
return ret
def ip_interfaces():
"""
Provide a dict of the connected interfaces and their ip addresses
The addresses will be passed as a list for each interface
"""
# Provides:
# ip_interfaces
if salt.utils.platform.is_proxy():
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get("inet", []):
if "address" in inet:
iface_ips.append(inet["address"])
for inet in ifaces[face].get("inet6", []):
if "address" in inet:
iface_ips.append(inet["address"])
for secondary in ifaces[face].get("secondary", []):
if "address" in secondary:
iface_ips.append(secondary["address"])
ret[face] = iface_ips
return {"ip_interfaces": ret}
def ip4_interfaces():
"""
Provide a dict of the connected interfaces and their ip4 addresses
The addresses will be passed as a list for each interface
"""
# Provides:
# ip_interfaces
if salt.utils.platform.is_proxy():
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get("inet", []):
if "address" in inet:
iface_ips.append(inet["address"])
for secondary in ifaces[face].get("secondary", []):
if "address" in secondary:
iface_ips.append(secondary["address"])
ret[face] = iface_ips
return {"ip4_interfaces": ret}
def ip6_interfaces():
"""
Provide a dict of the connected interfaces and their ip6 addresses
The addresses will be passed as a list for each interface
"""
# Provides:
# ip_interfaces
if salt.utils.platform.is_proxy():
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get("inet6", []):
if "address" in inet:
iface_ips.append(inet["address"])
for secondary in ifaces[face].get("secondary", []):
if "address" in secondary:
iface_ips.append(secondary["address"])
ret[face] = iface_ips
return {"ip6_interfaces": ret}
def hwaddr_interfaces():
"""
Provide a dict of the connected interfaces and their
hw addresses (Mac Address)
"""
# Provides:
# hwaddr_interfaces
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
if "hwaddr" in ifaces[face]:
ret[face] = ifaces[face]["hwaddr"]
return {"hwaddr_interfaces": ret}
def dns():
"""
Parse the resolver configuration file
.. versionadded:: 2016.3.0
"""
# Provides:
# dns
if salt.utils.platform.is_windows() or "proxyminion" in __opts__:
return {}
resolv = salt.utils.dns.parse_resolv()
for key in ("nameservers", "ip4_nameservers", "ip6_nameservers", "sortlist"):
if key in resolv:
resolv[key] = [six.text_type(i) for i in resolv[key]]
return {"dns": resolv} if resolv else {}
def get_machine_id():
"""
Provide the machine-id for machine/virtualization combination
"""
# Provides:
# machine-id
if platform.system() == "AIX":
return _aix_get_machine_id()
locations = ["/etc/machine-id", "/var/lib/dbus/machine-id"]
existing_locations = [loc for loc in locations if os.path.exists(loc)]
if not existing_locations:
return {}
else:
with salt.utils.files.fopen(existing_locations[0]) as machineid:
return {"machine_id": machineid.read().strip()}
def cwd():
"""
Current working directory
"""
return {"cwd": os.getcwd()}
def path():
"""
Return the path
"""
# Provides:
# path
# systempath
_path = salt.utils.stringutils.to_unicode(os.environ.get("PATH", "").strip())
return {
"path": _path,
"systempath": _path.split(os.path.pathsep),
}
def pythonversion():
"""
Return the Python version
"""
# Provides:
# pythonversion
return {"pythonversion": list(sys.version_info)}
def pythonpath():
"""
Return the Python path
"""
# Provides:
# pythonpath
return {"pythonpath": sys.path}
def pythonexecutable():
"""
Return the python executable in use
"""
# Provides:
# pythonexecutable
return {"pythonexecutable": sys.executable}
def saltpath():
"""
Return the path of the salt module
"""
# Provides:
# saltpath
salt_path = os.path.abspath(os.path.join(__file__, os.path.pardir))
return {"saltpath": os.path.dirname(salt_path)}
def saltversion():
"""
Return the version of salt
"""
# Provides:
# saltversion
from salt.version import __version__
return {"saltversion": __version__}
def zmqversion():
"""
Return the zeromq version
"""
# Provides:
# zmqversion
try:
import zmq
return {"zmqversion": zmq.zmq_version()} # pylint: disable=no-member
except ImportError:
return {}
def saltversioninfo():
"""
Return the version_info of salt
.. versionadded:: 0.17.0
"""
# Provides:
# saltversioninfo
from salt.version import __version_info__
return {"saltversioninfo": list(__version_info__)}
def _hw_data(osdata):
"""
Get system specific hardware data from dmidecode
Provides
biosversion
productname
manufacturer
serialnumber
biosreleasedate
uuid
.. versionadded:: 0.9.5
"""
if salt.utils.platform.is_proxy():
return {}
grains = {}
if osdata["kernel"] == "Linux" and os.path.exists("/sys/class/dmi/id"):
# On many Linux distributions basic firmware information is available via sysfs
# requires CONFIG_DMIID to be enabled in the Linux kernel configuration
sysfs_firmware_info = {
"biosversion": "bios_version",
"productname": "product_name",
"manufacturer": "sys_vendor",
"biosreleasedate": "bios_date",
"uuid": "product_uuid",
"serialnumber": "product_serial",
}
for key, fw_file in sysfs_firmware_info.items():
contents_file = os.path.join("/sys/class/dmi/id", fw_file)
if os.path.exists(contents_file):
try:
with salt.utils.files.fopen(contents_file, "r") as ifile:
grains[key] = salt.utils.stringutils.to_unicode(
ifile.read().strip(), errors="replace"
)
if key == "uuid":
grains["uuid"] = grains["uuid"].lower()
except UnicodeDecodeError:
# Some firmwares provide non-valid 'product_name'
# files, ignore them
log.debug(
"The content in /sys/devices/virtual/dmi/id/product_name is not valid"
)
except (IOError, OSError) as err:
# PermissionError is new to Python 3, but corresponds to the EACESS and
# EPERM error numbers. Use those instead here for PY2 compatibility.
if err.errno == EACCES or err.errno == EPERM:
# Skip the grain if non-root user has no access to the file.
pass
elif salt.utils.path.which_bin(["dmidecode", "smbios"]) is not None and not (
salt.utils.platform.is_smartos()
or ( # SunOS on SPARC - 'smbios: failed to load SMBIOS: System does not export an SMBIOS table'
osdata["kernel"] == "SunOS" and osdata["cpuarch"].startswith("sparc")
)
):
# On SmartOS (possibly SunOS also) smbios only works in the global zone
# smbios is also not compatible with linux's smbios (smbios -s = print summarized)
grains = {
"biosversion": __salt__["smbios.get"]("bios-version"),
"productname": __salt__["smbios.get"]("system-product-name"),
"manufacturer": __salt__["smbios.get"]("system-manufacturer"),
"biosreleasedate": __salt__["smbios.get"]("bios-release-date"),
"uuid": __salt__["smbios.get"]("system-uuid"),
}
grains = dict([(key, val) for key, val in grains.items() if val is not None])
uuid = __salt__["smbios.get"]("system-uuid")
if uuid is not None:
grains["uuid"] = uuid.lower()
for serial in (
"system-serial-number",
"chassis-serial-number",
"baseboard-serial-number",
):
serial = __salt__["smbios.get"](serial)
if serial is not None:
grains["serialnumber"] = serial
break
elif salt.utils.path.which_bin(["fw_printenv"]) is not None:
# ARM Linux devices expose UBOOT env variables via fw_printenv
hwdata = {
"manufacturer": "manufacturer",
"serialnumber": "serial#",
"productname": "DeviceDesc",
}
for grain_name, cmd_key in six.iteritems(hwdata):
result = __salt__["cmd.run_all"]("fw_printenv {0}".format(cmd_key))
if result["retcode"] == 0:
uboot_keyval = result["stdout"].split("=")
grains[grain_name] = _clean_value(grain_name, uboot_keyval[1])
elif osdata["kernel"] == "FreeBSD":
# On FreeBSD /bin/kenv (already in base system)
# can be used instead of dmidecode
kenv = salt.utils.path.which("kenv")
if kenv:
# In theory, it will be easier to add new fields to this later
fbsd_hwdata = {
"biosversion": "smbios.bios.version",
"manufacturer": "smbios.system.maker",
"serialnumber": "smbios.system.serial",
"productname": "smbios.system.product",
"biosreleasedate": "smbios.bios.reldate",
"uuid": "smbios.system.uuid",
}
for key, val in six.iteritems(fbsd_hwdata):
value = __salt__["cmd.run"]("{0} {1}".format(kenv, val))
grains[key] = _clean_value(key, value)
elif osdata["kernel"] == "OpenBSD":
sysctl = salt.utils.path.which("sysctl")
hwdata = {
"biosversion": "hw.version",
"manufacturer": "hw.vendor",
"productname": "hw.product",
"serialnumber": "hw.serialno",
"uuid": "hw.uuid",
}
for key, oid in six.iteritems(hwdata):
value = __salt__["cmd.run"]("{0} -n {1}".format(sysctl, oid))
if not value.endswith(" value is not available"):
grains[key] = _clean_value(key, value)
elif osdata["kernel"] == "NetBSD":
sysctl = salt.utils.path.which("sysctl")
nbsd_hwdata = {
"biosversion": "machdep.dmi.board-version",
"manufacturer": "machdep.dmi.system-vendor",
"serialnumber": "machdep.dmi.system-serial",
"productname": "machdep.dmi.system-product",
"biosreleasedate": "machdep.dmi.bios-date",
"uuid": "machdep.dmi.system-uuid",
}
for key, oid in six.iteritems(nbsd_hwdata):
result = __salt__["cmd.run_all"]("{0} -n {1}".format(sysctl, oid))
if result["retcode"] == 0:
grains[key] = _clean_value(key, result["stdout"])
elif osdata["kernel"] == "Darwin":
grains["manufacturer"] = "Apple Inc."
sysctl = salt.utils.path.which("sysctl")
hwdata = {"productname": "hw.model"}
for key, oid in hwdata.items():
value = __salt__["cmd.run"]("{0} -b {1}".format(sysctl, oid))
if not value.endswith(" is invalid"):
grains[key] = _clean_value(key, value)
elif osdata["kernel"] == "SunOS" and osdata["cpuarch"].startswith("sparc"):
# Depending on the hardware model, commands can report different bits
# of information. With that said, consolidate the output from various
# commands and attempt various lookups.
data = ""
for (cmd, args) in (
("/usr/sbin/prtdiag", "-v"),
("/usr/sbin/prtconf", "-vp"),
("/usr/sbin/virtinfo", "-a"),
):
if salt.utils.path.which(cmd): # Also verifies that cmd is executable
data += __salt__["cmd.run"]("{0} {1}".format(cmd, args))
data += "\n"
sn_regexes = [
re.compile(r)
for r in [
r"(?im)^\s*Chassis\s+Serial\s+Number\n-+\n(\S+)", # prtdiag
r"(?im)^\s*chassis-sn:\s*(\S+)", # prtconf
r"(?im)^\s*Chassis\s+Serial#:\s*(\S+)", # virtinfo
]
]
obp_regexes = [
re.compile(r)
for r in [
r"(?im)^\s*System\s+PROM\s+revisions.*\nVersion\n-+\nOBP\s+(\S+)\s+(\S+)", # prtdiag
r"(?im)^\s*version:\s*\'OBP\s+(\S+)\s+(\S+)", # prtconf
]
]
fw_regexes = [
re.compile(r)
for r in [r"(?im)^\s*Sun\s+System\s+Firmware\s+(\S+)\s+(\S+)"] # prtdiag
]
uuid_regexes = [
re.compile(r) for r in [r"(?im)^\s*Domain\s+UUID:\s*(\S+)"] # virtinfo
]
manufacture_regexes = [
re.compile(r)
for r in [r"(?im)^\s*System\s+Configuration:\s*(.*)(?=sun)"] # prtdiag
]
product_regexes = [
re.compile(r)
for r in [
r"(?im)^\s*System\s+Configuration:\s*.*?sun\d\S+[^\S\r\n]*(.*)", # prtdiag
r"(?im)^[^\S\r\n]*banner-name:[^\S\r\n]*(.*)", # prtconf
r"(?im)^[^\S\r\n]*product-name:[^\S\r\n]*(.*)", # prtconf
]
]
sn_regexes = [
re.compile(r)
for r in [
r"(?im)Chassis\s+Serial\s+Number\n-+\n(\S+)", # prtdiag
r"(?i)Chassis\s+Serial#:\s*(\S+)", # virtinfo
r"(?i)chassis-sn:\s*(\S+)", # prtconf
]
]
obp_regexes = [
re.compile(r)
for r in [
r"(?im)System\s+PROM\s+revisions.*\nVersion\n-+\nOBP\s+(\S+)\s+(\S+)", # prtdiag
r"(?im)version:\s*\'OBP\s+(\S+)\s+(\S+)", # prtconf
]
]
fw_regexes = [
re.compile(r)
for r in [r"(?i)Sun\s+System\s+Firmware\s+(\S+)\s+(\S+)"] # prtdiag
]
uuid_regexes = [
re.compile(r) for r in [r"(?i)Domain\s+UUID:\s+(\S+)"] # virtinfo
]
for regex in sn_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains["serialnumber"] = res.group(1).strip().replace("'", "")
break
for regex in obp_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
obp_rev, obp_date = res.groups()[
0:2
] # Limit the number in case we found the data in multiple places
grains["biosversion"] = obp_rev.strip().replace("'", "")
grains["biosreleasedate"] = obp_date.strip().replace("'", "")
for regex in fw_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
fw_rev, fw_date = res.groups()[0:2]
grains["systemfirmware"] = fw_rev.strip().replace("'", "")
grains["systemfirmwaredate"] = fw_date.strip().replace("'", "")
break
for regex in uuid_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains["uuid"] = res.group(1).strip().replace("'", "")
break
for regex in manufacture_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains["manufacture"] = res.group(1).strip().replace("'", "")
break
for regex in product_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
t_productname = res.group(1).strip().replace("'", "")
if t_productname:
grains["product"] = t_productname
grains["productname"] = t_productname
break
elif osdata["kernel"] == "AIX":
cmd = salt.utils.path.which("prtconf")
if cmd:
data = __salt__["cmd.run"]("{0}".format(cmd)) + os.linesep
for dest, regstring in (
("serialnumber", r"(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)"),
("systemfirmware", r"(?im)^\s*Firmware\s+Version:\s+(.*)"),
):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", "")
product_regexes = [re.compile(r"(?im)^\s*System\s+Model:\s+(\S+)")]
for regex in product_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains["manufacturer"], grains["productname"] = (
res.group(1).strip().replace("'", "").split(",")
)
break
else:
log.error("The 'prtconf' binary was not found in $PATH.")
return grains
def get_server_id():
"""
Provides an integer based on the FQDN of a machine.
Useful as server-id in MySQL replication or anywhere else you'll need an ID
like this.
"""
# Provides:
# server_id
if salt.utils.platform.is_proxy():
return {}
id_ = __opts__.get("id", "")
id_hash = None
py_ver = sys.version_info[:2]
if py_ver >= (3, 3):
# Python 3.3 enabled hash randomization, so we need to shell out to get
# a reliable hash.
id_hash = __salt__["cmd.run"](
[sys.executable, "-c", 'print(hash("{0}"))'.format(id_)],
env={"PYTHONHASHSEED": "0"},
)
try:
id_hash = int(id_hash)
except (TypeError, ValueError):
log.debug(
"Failed to hash the ID to get the server_id grain. Result of "
"hash command: %s",
id_hash,
)
id_hash = None
if id_hash is None:
# Python < 3.3 or error encountered above
id_hash = hash(id_)
return {"server_id": abs(id_hash % (2 ** 31))}
def get_master():
"""
Provides the minion with the name of its master.
This is useful in states to target other services running on the master.
"""
# Provides:
# master
return {"master": __opts__.get("master", "")}
def default_gateway():
"""
Populates grains which describe whether a server has a default gateway
configured or not. Uses `ip -4 route show` and `ip -6 route show` and greps
for a `default` at the beginning of any line. Assuming the standard
`default via <ip>` format for default gateways, it will also parse out the
ip address of the default gateway, and put it in ip4_gw or ip6_gw.
If the `ip` command is unavailable, no grains will be populated.
Currently does not support multiple default gateways. The grains will be
set to the first default gateway found.
List of grains:
ip4_gw: True # ip/True/False if default ipv4 gateway
ip6_gw: True # ip/True/False if default ipv6 gateway
ip_gw: True # True if either of the above is True, False otherwise
"""
grains = {}
ip_bin = salt.utils.path.which("ip")
if not ip_bin:
return {}
grains["ip_gw"] = False
grains["ip4_gw"] = False
grains["ip6_gw"] = False
for ip_version in ("4", "6"):
try:
out = __salt__["cmd.run"]([ip_bin, "-" + ip_version, "route", "show"])
for line in out.splitlines():
if line.startswith("default"):
grains["ip_gw"] = True
grains["ip{0}_gw".format(ip_version)] = True
try:
via, gw_ip = line.split()[1:3]
except ValueError:
pass
else:
if via == "via":
grains["ip{0}_gw".format(ip_version)] = gw_ip
break
except Exception: # pylint: disable=broad-except
continue
return grains
def kernelparams():
"""
Return the kernel boot parameters
"""
try:
with salt.utils.files.fopen("/proc/cmdline", "r") as fhr:
cmdline = fhr.read()
grains = {"kernelparams": []}
for data in [
item.split("=") for item in salt.utils.args.shlex_split(cmdline)
]:
value = None
if len(data) == 2:
value = data[1].strip('"')
grains["kernelparams"] += [(data[0], value)]
except IOError as exc:
grains = {}
log.debug("Failed to read /proc/cmdline: %s", exc)
return grains
|
[] |
[] |
[
"PATH",
"NUMBER_OF_PROCESSORS"
] |
[]
|
["PATH", "NUMBER_OF_PROCESSORS"]
|
python
| 2 | 0 | |
leadmanager/leadmanager/asgi.py
|
"""
ASGI config for leadmanager project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'leadmanager.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "photocatalog.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
seismiqb/src/controllers/base.py
|
""" A convenient class to hold:
- dataset creation
- model train procedure
- inference on dataset
- evaluating predictions
- and more
"""
#pylint: disable=import-error, no-name-in-module, wrong-import-position, protected-access
import os
import gc
import logging
from time import perf_counter
from ast import literal_eval
from pprint import pformat
import psutil
import numpy as np
import torch
from ..plotters import plot_loss
from ...batchflow import Config, Monitor
from ...batchflow.models.torch import EncoderDecoder
class BaseController:
""" A common interface for train, inference, postprocessing and quality assessment.
Supposed to be used in an environment with set `CUDA_VISIBLE_DEVICES` variable.
At initialization, a nested configuration dict should be provided.
Common parameters are defined on root level of the config:
savedir : str
Directory to store outputs: logs, graphs, predictions.
monitor : bool
Whether to track resources during execution.
logger : callable
Function to log messages.
bar : bool
Whether to show progress bars during execution.
plot : bool
Whether to display graphs during execution.
Each of the methods retrieves parameters from the configuration by its name:
- `train`
- `inference`
- `postprocess`
- `evaluate`
Each of the methods also has the `config` argument to override parameters from that configuration.
Keyword arguments are used with the highest priority.
"""
#pylint: disable=attribute-defined-outside-init
DEFAULTS = Config({
# General parameters
'savedir': None,
'monitor': True,
'logger': None,
'bar': False,
'plot': False,
'train': {
'model_class': EncoderDecoder,
'model_config': None,
'batch_size': None,
'crop_shape': None,
'rebatch_threshold': 0.8,
'rescale_batch_size': True,
'prefetch': 1,
'n_iters': 100,
'early_stopping': True,
},
'inference': {},
# Common keys for both train and inference
'common': {},
# Make predictions better
'postprocess': {},
# Compute metrics
'evaluate': {}
})
def __init__(self, config=None, **kwargs):
self.config = Config(self.DEFAULTS)
self.config += config or {}
self.config += kwargs
self.monitor = self.config.monitor
self.plot = self.config.plot
devices = os.getenv('CUDA_VISIBLE_DEVICES')
if devices:
gpu_list = literal_eval(devices)
self.gpu_list = list(gpu_list) if isinstance(gpu_list, tuple) else [gpu_list]
else:
self.gpu_list = []
self.make_filelogger()
self.log(f'Initialized {self.__class__.__name__}')
# Utility functions
def make_savepath(self, *postfix):
""" Create nested path from provided strings.
Uses `savedir` config option.
If `savedir` config option is None, then None is returned: that is used as signal to omit saving
of, for example, metric map images, etc.
"""
savedir = self.config['savedir']
if savedir is not None:
path = os.path.join(savedir, *postfix[:-1])
os.makedirs(path, exist_ok=True)
return os.path.join(savedir, *postfix)
return None
# Logging
def make_filelogger(self):
""" Create logger inside `savedir`.
Note that logging is important.
"""
log_path = self.make_savepath('controller.log')
if log_path:
handler = logging.FileHandler(log_path, mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
logger = logging.getLogger(str(id(self)))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
self.filelogger = logger.info
else:
self.filelogger = None
def log(self, msg):
""" Log supplied message into both filelogger and supplied one. """
process = psutil.Process(os.getpid())
uss = process.memory_full_info().uss / (1024 ** 3)
msg = f'{self.__class__.__name__} ::: {uss:2.4f} ::: {msg}'
logger = self.config.logger
if logger:
logger = logger if isinstance(logger, (tuple, list)) else [logger]
for logger_ in logger:
logger_(msg)
if self.filelogger:
self.filelogger(msg)
def log_to_file(self, msg, path):
""" Log message to a separate file. """
log_path = self.make_savepath(path)
if log_path:
with open(log_path, 'w', encoding='utf-8') as file:
print(msg, file=file)
# Dataset creation
def make_dataset(self, **kwargs):
""" Create dataset to train/inference on. Must be implemented in inherited classes. """
_ = kwargs
def make_notifier(self):
""" Create notifier. """
return {
'bar': self.config.bar,
'monitors': 'loss_history',
'file': self.make_savepath('末 model_loss.log'),
}
# Train
def train(self, dataset, sampler, config=None, **kwargs):
""" Train model on a provided dataset.
Uses the `get_train_template` method to create pipeline of model training.
Returns
-------
Model instance
"""
# Prepare parameters
config = config or {}
pipeline_config = Config({**self.config['common'], **self.config['train'], **config, **kwargs})
n_iters, prefetch, rescale = pipeline_config.pop(['n_iters', 'prefetch', 'rescale_batch_size'])
notifier = self.make_notifier() if self.config['bar'] else None
self.log(f'Train started on device={self.gpu_list}')
# Start resource tracking
if self.monitor:
monitor = Monitor(['uss', 'gpu', 'gpu_memory'], frequency=0.5, gpu_list=self.gpu_list)
monitor.__enter__()
# Make pipeline
pipeline_config['sampler'] = sampler
train_pipeline = self.get_train_template(**kwargs) << pipeline_config << dataset
# Log: pipeline_config to a file
self.log_to_file(pformat(pipeline_config.config, depth=2), '末 train_config.txt')
# Test batch to initialize model and log stats
batch = train_pipeline.next_batch()
model = train_pipeline.m('model')
self.log(f'Target batch size: {pipeline_config["batch_size"]}')
self.log(f'Actual batch size: {len(batch)}')
self.log(f'Cache sizes: {dataset.geometries.cache_size}')
self.log(f'Cache lengths: {dataset.geometries.cache_length}')
# Log: full and short model repr
self.log_to_file(repr(model.model), '末 model_repr.txt')
self.log_to_file(model._short_repr(), '末 model_shortrepr.txt')
# Rescale batch size, if needed
if rescale:
scale = pipeline_config['batch_size'] / len(batch)
pipeline_config['batch_size'] = int(pipeline_config['batch_size'] * scale)
self.log(f'Rescaling batch size to: {pipeline_config["batch_size"]}')
train_pipeline.set_config(pipeline_config)
# Run training procedure
start_time = perf_counter()
self.log(f'Train run: n_iters={n_iters}, prefetch={prefetch}')
train_pipeline.run(n_iters=n_iters, prefetch=prefetch, notifier=notifier)
elapsed = perf_counter() - start_time
# Log: resource graphs
if self.monitor:
monitor.__exit__(None, None, None)
monitor.visualize(savepath=self.make_savepath('末 train_resource.png'), show=self.plot)
# Log: loss over iteration
plot_loss(model.loss_list, show=self.plot,
savepath=self.make_savepath('末 model_loss.png'))
final_loss = np.mean(model.loss_list[-25:])
# Log: model train information
self.log_to_file(model._information(config=True, devices=True, model=False, misc=True), '末 model_info.txt')
# Log: stats
self.log(f'Trained for {model.iteration} iterations in {elapsed:4.1f}s')
self.log(f'Average of 25 last loss values: {final_loss:4.3f}')
self.log(f'Cache sizes: {dataset.geometries.cache_size}')
self.log(f'Cache lengths: {dataset.geometries.cache_length}')
# Cleanup
torch.cuda.empty_cache()
gc.collect()
train_pipeline.reset('variables')
dataset.geometries.reset_cache()
self.log('')
self.train_log = {
'start_time': start_time,
'elapsed': elapsed,
'final_loss': final_loss,
}
return model
def finetune(self, dataset, sampler, model, config=None, **kwargs):
""" Train given model for a couple more iterations on a specific sampler.
Used to fine-tune the model on specific range during inference stage.
"""
# Prepare parameters
config = config or {}
pipeline_config = Config({**self.config['common'], **self.config['train'],
**self.config['finetune'], **config, **kwargs})
n_iters, prefetch = pipeline_config.pop(['n_iters', 'prefetch'])
pipeline_config['sampler'] = sampler
pipeline_config['source_model'] = model
train_pipeline = self.get_train_template(**kwargs) << pipeline_config << dataset
train_pipeline.run(n_iters=n_iters, prefetch=prefetch)
torch.cuda.empty_cache()
# Inference
def inference(self, dataset, model, **kwargs):
""" Inference: use trained/loaded model for making predictions on the supplied dataset.
Must be implemented in inherited classes.
"""
_ = dataset, model, kwargs
# Postprocess
def postprocess(self, predictions, **kwargs):
""" Optional postprocessing: algorithmic adjustments to predictions.
Must be implemented in inherited classes.
"""
_ = predictions, kwargs
# Evaluate
def evaluate(self, predictions, targets=None, dataset=None, **kwargs):
""" Assess quality of model generated outputs. Must be implemented in inherited classes. """
_ = predictions, targets, dataset, kwargs
# Pipelines: used inside train/inference methods
def get_train_template(self, **kwargs):
""" Define the whole training procedure pipeline including data loading, augmentation and model training. """
_ = kwargs
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
node-manager/volume_snapshot.go
|
// Copyright 2021 dfuse Platform Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nodemanager
import (
"context"
"fmt"
"os"
"time"
"github.com/streamingfast/snapshotter"
"go.uber.org/zap"
)
type GKEPVCSnapshotter struct {
tag string
project string
namespace string
pod string
prefix string
}
var gkeExampleConfigString = "type=gke-pvc-snapshot tag=v1 namespace=default project=mygcpproject prefix=datadir"
func gkeCheckMissing(conf map[string]string, param string) error {
if conf[param] == "" {
return fmt.Errorf("backup module gke-pvc-snapshot missing value for %s. Example: %s", param, gkeExampleConfigString)
}
return nil
}
func NewGKEPVCSnapshotter(conf map[string]string) (*GKEPVCSnapshotter, error) {
for _, label := range []string{"tag", "project", "namespace", "prefix"} {
if err := gkeCheckMissing(conf, label); err != nil {
return nil, err
}
}
return &GKEPVCSnapshotter{
tag: conf["tag"],
project: conf["project"],
namespace: conf["namespace"],
pod: os.Getenv("HOSTNAME"),
prefix: conf["prefix"],
}, nil
}
func (s *GKEPVCSnapshotter) RequiresStop() bool {
return true
}
func (s *GKEPVCSnapshotter) Backup(lastSeenBlockNum uint32) (string, error) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
snapshotName := snapshotter.GenerateName(s.namespace, s.tag, lastSeenBlockNum)
return snapshotName, snapshotter.TakeSnapshot(ctx, snapshotName, s.project, s.namespace, s.pod, s.prefix)
}
func (s *Superviser) TakeVolumeSnapshot(volumeSnapshotTag, project, namespace, pod, prefix string, lastSeenBlockNum uint64) error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
snapshotName := snapshotter.GenerateName(namespace, volumeSnapshotTag, uint32(lastSeenBlockNum))
s.Logger.Info("starting snapshot", zap.String("name", snapshotName))
return snapshotter.TakeSnapshot(ctx, snapshotName, project, namespace, pod, prefix)
}
|
[
"\"HOSTNAME\""
] |
[] |
[
"HOSTNAME"
] |
[]
|
["HOSTNAME"]
|
go
| 1 | 0 | |
daemon/daemon_unix.go
|
// +build linux freebsd
package daemon // import "github.com/docker/docker/daemon"
import (
"bufio"
"context"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"time"
statsV1 "github.com/containerd/cgroups/stats/v1"
statsV2 "github.com/containerd/cgroups/v2/stats"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/blkiodev"
pblkiodev "github.com/docker/docker/api/types/blkiodev"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/initlayer"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/runconfig"
volumemounts "github.com/docker/docker/volume/mounts"
"github.com/docker/libnetwork"
nwconfig "github.com/docker/libnetwork/config"
"github.com/docker/libnetwork/drivers/bridge"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/netutils"
"github.com/docker/libnetwork/options"
lntypes "github.com/docker/libnetwork/types"
"github.com/moby/sys/mount"
"github.com/opencontainers/runc/libcontainer/cgroups"
rsystem "github.com/opencontainers/runc/libcontainer/system"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
)
const (
isWindows = false
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269
linuxMinCPUShares = 2
linuxMaxCPUShares = 262144
platformSupported = true
// It's not kernel limit, we want this 4M limit to supply a reasonable functional container
linuxMinMemory = 4194304
// constants for remapped root settings
defaultIDSpecifier = "default"
defaultRemappedID = "dockremap"
// constant for cgroup drivers
cgroupFsDriver = "cgroupfs"
cgroupSystemdDriver = "systemd"
cgroupNoneDriver = "none"
// DefaultRuntimeName is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeName = "runc"
)
type containerGetter interface {
GetContainer(string) (*container.Container, error)
}
func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory {
memory := specs.LinuxMemory{}
if config.Memory > 0 {
memory.Limit = &config.Memory
}
if config.MemoryReservation > 0 {
memory.Reservation = &config.MemoryReservation
}
if config.MemorySwap > 0 {
memory.Swap = &config.MemorySwap
}
if config.MemorySwappiness != nil {
swappiness := uint64(*config.MemorySwappiness)
memory.Swappiness = &swappiness
}
if config.OomKillDisable != nil {
memory.DisableOOMKiller = config.OomKillDisable
}
if config.KernelMemory != 0 {
memory.Kernel = &config.KernelMemory
}
if config.KernelMemoryTCP != 0 {
memory.KernelTCP = &config.KernelMemoryTCP
}
return &memory
}
func getPidsLimit(config containertypes.Resources) *specs.LinuxPids {
if config.PidsLimit == nil {
return nil
}
if *config.PidsLimit <= 0 {
// docker API allows 0 and negative values to unset this to be consistent
// with default values. When updating values, runc requires -1 to unset
// the previous limit.
return &specs.LinuxPids{Limit: -1}
}
return &specs.LinuxPids{Limit: *config.PidsLimit}
}
func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) {
cpu := specs.LinuxCPU{}
if config.CPUShares < 0 {
return nil, fmt.Errorf("shares: invalid argument")
}
if config.CPUShares >= 0 {
shares := uint64(config.CPUShares)
cpu.Shares = &shares
}
if config.CpusetCpus != "" {
cpu.Cpus = config.CpusetCpus
}
if config.CpusetMems != "" {
cpu.Mems = config.CpusetMems
}
if config.NanoCPUs > 0 {
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
period := uint64(100 * time.Millisecond / time.Microsecond)
quota := config.NanoCPUs * int64(period) / 1e9
cpu.Period = &period
cpu.Quota = "a
}
if config.CPUPeriod != 0 {
period := uint64(config.CPUPeriod)
cpu.Period = &period
}
if config.CPUQuota != 0 {
q := config.CPUQuota
cpu.Quota = &q
}
if config.CPURealtimePeriod != 0 {
period := uint64(config.CPURealtimePeriod)
cpu.RealtimePeriod = &period
}
if config.CPURealtimeRuntime != 0 {
c := config.CPURealtimeRuntime
cpu.RealtimeRuntime = &c
}
return &cpu, nil
}
func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) {
var stat unix.Stat_t
var blkioWeightDevices []specs.LinuxWeightDevice
for _, weightDevice := range config.BlkioWeightDevice {
if err := unix.Stat(weightDevice.Path, &stat); err != nil {
return nil, err
}
weight := weightDevice.Weight
d := specs.LinuxWeightDevice{Weight: &weight}
// The type is 32bit on mips.
d.Major = int64(unix.Major(uint64(stat.Rdev))) // nolint: unconvert
d.Minor = int64(unix.Minor(uint64(stat.Rdev))) // nolint: unconvert
blkioWeightDevices = append(blkioWeightDevices, d)
}
return blkioWeightDevices, nil
}
func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error {
container.NoNewPrivileges = daemon.configStore.NoNewPrivileges
return parseSecurityOpt(container, hostConfig)
}
func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error {
var (
labelOpts []string
err error
)
for _, opt := range config.SecurityOpt {
if opt == "no-new-privileges" {
container.NoNewPrivileges = true
continue
}
if opt == "disable" {
labelOpts = append(labelOpts, "disable")
continue
}
var con []string
if strings.Contains(opt, "=") {
con = strings.SplitN(opt, "=", 2)
} else if strings.Contains(opt, ":") {
con = strings.SplitN(opt, ":", 2)
logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.")
}
if len(con) != 2 {
return fmt.Errorf("invalid --security-opt 1: %q", opt)
}
switch con[0] {
case "label":
labelOpts = append(labelOpts, con[1])
case "apparmor":
container.AppArmorProfile = con[1]
case "seccomp":
container.SeccompProfile = con[1]
case "no-new-privileges":
noNewPrivileges, err := strconv.ParseBool(con[1])
if err != nil {
return fmt.Errorf("invalid --security-opt 2: %q", opt)
}
container.NoNewPrivileges = noNewPrivileges
default:
return fmt.Errorf("invalid --security-opt 2: %q", opt)
}
}
container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts)
return err
}
func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) {
var throttleDevices []specs.LinuxThrottleDevice
var stat unix.Stat_t
for _, d := range devs {
if err := unix.Stat(d.Path, &stat); err != nil {
return nil, err
}
d := specs.LinuxThrottleDevice{Rate: d.Rate}
// the type is 32bit on mips
d.Major = int64(unix.Major(uint64(stat.Rdev))) // nolint: unconvert
d.Minor = int64(unix.Minor(uint64(stat.Rdev))) // nolint: unconvert
throttleDevices = append(throttleDevices, d)
}
return throttleDevices, nil
}
// adjustParallelLimit takes a number of objects and a proposed limit and
// figures out if it's reasonable (and adjusts it accordingly). This is only
// used for daemon startup, which does a lot of parallel loading of containers
// (and if we exceed RLIMIT_NOFILE then we're in trouble).
func adjustParallelLimit(n int, limit int) int {
// Rule-of-thumb overhead factor (how many files will each goroutine open
// simultaneously). Yes, this is ugly but to be frank this whole thing is
// ugly.
const overhead = 2
// On Linux, we need to ensure that parallelStartupJobs doesn't cause us to
// exceed RLIMIT_NOFILE. If parallelStartupJobs is too large, we reduce it
// and give a warning (since in theory the user should increase their
// ulimits to the largest possible value for dockerd).
var rlim unix.Rlimit
if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil {
logrus.Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err)
return limit
}
softRlimit := int(rlim.Cur)
// Much fewer containers than RLIMIT_NOFILE. No need to adjust anything.
if softRlimit > overhead*n {
return limit
}
// RLIMIT_NOFILE big enough, no need to adjust anything.
if softRlimit > overhead*limit {
return limit
}
logrus.Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit)
return softRlimit / overhead
}
func checkKernel() error {
// Check for unsupported kernel versions
// FIXME: it would be cleaner to not test for specific versions, but rather
// test for specific functionalities.
// Unfortunately we can't test for the feature "does not cause a kernel panic"
// without actually causing a kernel panic, so we need this workaround until
// the circumstances of pre-3.10 crashes are clearer.
// For details see https://github.com/docker/docker/issues/407
// Docker 1.11 and above doesn't actually run on kernels older than 3.4,
// due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4).
if !kernel.CheckKernelVersion(3, 10, 0) {
v, _ := kernel.GetKernelVersion()
if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String())
}
}
return nil
}
// adaptContainerSettings is called during container creation to modify any
// settings necessary in the HostConfig structure.
func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
if adjustCPUShares && hostConfig.CPUShares > 0 {
// Handle unsupported CPUShares
if hostConfig.CPUShares < linuxMinCPUShares {
logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares)
hostConfig.CPUShares = linuxMinCPUShares
} else if hostConfig.CPUShares > linuxMaxCPUShares {
logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares)
hostConfig.CPUShares = linuxMaxCPUShares
}
}
if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 {
// By default, MemorySwap is set to twice the size of Memory.
hostConfig.MemorySwap = hostConfig.Memory * 2
}
if hostConfig.ShmSize == 0 {
hostConfig.ShmSize = config.DefaultShmSize
if daemon.configStore != nil {
hostConfig.ShmSize = int64(daemon.configStore.ShmSize)
}
}
// Set default IPC mode, if unset for container
if hostConfig.IpcMode.IsEmpty() {
m := config.DefaultIpcMode
if daemon.configStore != nil {
m = daemon.configStore.IpcMode
}
hostConfig.IpcMode = containertypes.IpcMode(m)
}
// Set default cgroup namespace mode, if unset for container
if hostConfig.CgroupnsMode.IsEmpty() {
// for cgroup v2: unshare cgroupns even for privileged containers
// https://github.com/containers/libpod/pull/4374#issuecomment-549776387
if hostConfig.Privileged && !cgroups.IsCgroup2UnifiedMode() {
hostConfig.CgroupnsMode = containertypes.CgroupnsMode("host")
} else {
m := "host"
if cgroups.IsCgroup2UnifiedMode() {
m = "private"
}
if daemon.configStore != nil {
m = daemon.configStore.CgroupNamespaceMode
}
hostConfig.CgroupnsMode = containertypes.CgroupnsMode(m)
}
}
adaptSharedNamespaceContainer(daemon, hostConfig)
var err error
secOpts, err := daemon.generateSecurityOpt(hostConfig)
if err != nil {
return err
}
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...)
if hostConfig.OomKillDisable == nil {
defaultOomKillDisable := false
hostConfig.OomKillDisable = &defaultOomKillDisable
}
return nil
}
// adaptSharedNamespaceContainer replaces container name with its ID in hostConfig.
// To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode
// and NetworkMode.
//
// When a container shares its namespace with another container, use ID can keep the namespace
// sharing connection between the two containers even the another container is renamed.
func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) {
containerPrefix := "container:"
if hostConfig.PidMode.IsContainer() {
pidContainer := hostConfig.PidMode.Container()
// if there is any error returned here, we just ignore it and leave it to be
// handled in the following logic
if c, err := daemon.GetContainer(pidContainer); err == nil {
hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID)
}
}
if hostConfig.IpcMode.IsContainer() {
ipcContainer := hostConfig.IpcMode.Container()
if c, err := daemon.GetContainer(ipcContainer); err == nil {
hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID)
}
}
if hostConfig.NetworkMode.IsContainer() {
netContainer := hostConfig.NetworkMode.ConnectedContainer()
if c, err := daemon.GetContainer(netContainer); err == nil {
hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID)
}
}
}
// verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration
func verifyPlatformContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) (warnings []string, err error) {
fixMemorySwappiness(resources)
// memory subsystem checks and adjustments
if resources.Memory != 0 && resources.Memory < linuxMinMemory {
return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB")
}
if resources.Memory > 0 && !sysInfo.MemoryLimit {
warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.Memory = 0
resources.MemorySwap = -1
}
if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit {
warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.")
resources.MemorySwap = -1
}
if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory {
return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage")
}
if resources.Memory == 0 && resources.MemorySwap > 0 && !update {
return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage")
}
if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness {
warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.")
resources.MemorySwappiness = nil
}
if resources.MemorySwappiness != nil {
swappiness := *resources.MemorySwappiness
if swappiness < 0 || swappiness > 100 {
return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness)
}
}
if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation {
warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.MemoryReservation = 0
}
if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory {
return warnings, fmt.Errorf("Minimum memory reservation allowed is 4MB")
}
if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation {
return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage")
}
if resources.KernelMemory > 0 && !sysInfo.KernelMemory {
warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.KernelMemory = 0
}
if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory {
return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB")
}
if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) {
warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
}
if resources.OomKillDisable != nil && !sysInfo.OomKillDisable {
// only produce warnings if the setting wasn't to *disable* the OOM Kill; no point
// warning the caller if they already wanted the feature to be off
if *resources.OomKillDisable {
warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.")
}
resources.OomKillDisable = nil
}
if resources.OomKillDisable != nil && *resources.OomKillDisable && resources.Memory == 0 {
warnings = append(warnings, "OOM killer is disabled for the container, but no memory limit is set, this can result in the system running out of resources.")
}
if resources.PidsLimit != nil && !sysInfo.PidsLimit {
if *resources.PidsLimit > 0 {
warnings = append(warnings, "Your kernel does not support PIDs limit capabilities or the cgroup is not mounted. PIDs limit discarded.")
}
resources.PidsLimit = nil
}
// cpu subsystem checks and adjustments
if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 {
return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set")
}
if resources.NanoCPUs > 0 && resources.CPUQuota > 0 {
return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set")
}
if resources.NanoCPUs > 0 && (!sysInfo.CPUCfsPeriod || !sysInfo.CPUCfsQuota) {
return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU cfs period/quota or the cgroup is not mounted")
}
// The highest precision we could get on Linux is 0.001, by setting
// cpu.cfs_period_us=1000ms
// cpu.cfs_quota=1ms
// See the following link for details:
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
// Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error.
// The error message is 0.01 so that this is consistent with Windows
if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 {
return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU())
}
if resources.CPUShares > 0 && !sysInfo.CPUShares {
warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.")
resources.CPUShares = 0
}
if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod {
warnings = append(warnings, "Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.")
resources.CPUPeriod = 0
}
if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) {
return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)")
}
if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota {
warnings = append(warnings, "Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.")
resources.CPUQuota = 0
}
if resources.CPUQuota > 0 && resources.CPUQuota < 1000 {
return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)")
}
if resources.CPUPercent > 0 {
warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS))
resources.CPUPercent = 0
}
// cpuset subsystem checks and adjustments
if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset {
warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.")
resources.CpusetCpus = ""
resources.CpusetMems = ""
}
cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus)
if err != nil {
return warnings, errors.Wrapf(err, "Invalid value %s for cpuset cpus", resources.CpusetCpus)
}
if !cpusAvailable {
return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus)
}
memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems)
if err != nil {
return warnings, errors.Wrapf(err, "Invalid value %s for cpuset mems", resources.CpusetMems)
}
if !memsAvailable {
return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems)
}
// blkio subsystem checks and adjustments
if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight {
warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.")
resources.BlkioWeight = 0
}
if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) {
return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000")
}
if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 {
return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS)
}
if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice {
warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.")
resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{}
}
if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice {
warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.")
resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice {
warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.")
resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice {
warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.")
resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice {
warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.")
resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{}
}
return warnings, nil
}
func (daemon *Daemon) getCgroupDriver() string {
if UsingSystemd(daemon.configStore) {
return cgroupSystemdDriver
}
if daemon.Rootless() {
return cgroupNoneDriver
}
return cgroupFsDriver
}
// getCD gets the raw value of the native.cgroupdriver option, if set.
func getCD(config *config.Config) string {
for _, option := range config.ExecOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil || !strings.EqualFold(key, "native.cgroupdriver") {
continue
}
return val
}
return ""
}
// VerifyCgroupDriver validates native.cgroupdriver
func VerifyCgroupDriver(config *config.Config) error {
cd := getCD(config)
if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver {
return nil
}
if cd == cgroupNoneDriver {
return fmt.Errorf("native.cgroupdriver option %s is internally used and cannot be specified manually", cd)
}
return fmt.Errorf("native.cgroupdriver option %s not supported", cd)
}
// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd
func UsingSystemd(config *config.Config) bool {
if getCD(config) == cgroupSystemdDriver {
return true
}
// On cgroup v2 hosts, default to systemd driver
if getCD(config) == "" && cgroups.IsCgroup2UnifiedMode() && IsRunningSystemd() {
return true
}
return false
}
// IsRunningSystemd is from https://github.com/opencontainers/runc/blob/46be7b612e2533c494e6a251111de46d8e286ed5/libcontainer/cgroups/systemd/common.go#L27-L33
func IsRunningSystemd() bool {
fi, err := os.Lstat("/run/systemd/system")
if err != nil {
return false
}
return fi.IsDir()
}
// verifyPlatformContainerSettings performs platform-specific validation of the
// hostconfig and config structures.
func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
if hostConfig == nil {
return nil, nil
}
sysInfo := daemon.RawSysInfo(true)
w, err := verifyPlatformContainerResources(&hostConfig.Resources, sysInfo, update)
// no matter err is nil or not, w could have data in itself.
warnings = append(warnings, w...)
if err != nil {
return warnings, err
}
if hostConfig.ShmSize < 0 {
return warnings, fmt.Errorf("SHM size can not be less than 0")
}
if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 {
return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj)
}
// ip-forwarding does not affect container with '--net=host' (or '--net=none')
if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) {
warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.")
}
if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 {
warnings = append(warnings, "Published ports are discarded when using host network mode")
}
// check for various conflicting options with user namespaces
if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() {
if hostConfig.Privileged {
return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode")
}
if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() {
return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled")
}
if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() {
return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled")
}
}
if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) {
// CgroupParent for systemd cgroup should be named as "xxx.slice"
if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") {
return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
}
}
if hostConfig.Runtime == "" {
hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
}
if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil {
return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime)
}
parser := volumemounts.NewParser(runtime.GOOS)
for dest := range hostConfig.Tmpfs {
if err := parser.ValidateTmpfsMountDestination(dest); err != nil {
return warnings, err
}
}
if !hostConfig.CgroupnsMode.Valid() {
return warnings, fmt.Errorf("invalid cgroup namespace mode: %v", hostConfig.CgroupnsMode)
}
if hostConfig.CgroupnsMode.IsPrivate() {
if !sysInfo.CgroupNamespaces {
warnings = append(warnings, "Your kernel does not support cgroup namespaces. Cgroup namespace setting discarded.")
}
}
return warnings, nil
}
func (daemon *Daemon) loadRuntimes() error {
return daemon.initRuntimes(daemon.configStore.Runtimes)
}
func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error) {
runtimeDir := filepath.Join(daemon.configStore.Root, "runtimes")
// Remove old temp directory if any
os.RemoveAll(runtimeDir + "-old")
tmpDir, err := ioutils.TempDir(daemon.configStore.Root, "gen-runtimes")
if err != nil {
return errors.Wrap(err, "failed to get temp dir to generate runtime scripts")
}
defer func() {
if err != nil {
if err1 := os.RemoveAll(tmpDir); err1 != nil {
logrus.WithError(err1).WithField("dir", tmpDir).
Warn("failed to remove tmp dir")
}
return
}
if err = os.Rename(runtimeDir, runtimeDir+"-old"); err != nil {
return
}
if err = os.Rename(tmpDir, runtimeDir); err != nil {
err = errors.Wrap(err, "failed to setup runtimes dir, new containers may not start")
return
}
if err = os.RemoveAll(runtimeDir + "-old"); err != nil {
logrus.WithError(err).WithField("dir", tmpDir).
Warn("failed to remove old runtimes dir")
}
}()
for name, rt := range runtimes {
if len(rt.Args) == 0 {
continue
}
script := filepath.Join(tmpDir, name)
content := fmt.Sprintf("#!/bin/sh\n%s %s $@\n", rt.Path, strings.Join(rt.Args, " "))
if err := ioutil.WriteFile(script, []byte(content), 0700); err != nil {
return err
}
}
return nil
}
// verifyDaemonSettings performs validation of daemon config struct
func verifyDaemonSettings(conf *config.Config) error {
if conf.ContainerdNamespace == conf.ContainerdPluginNamespace {
return errors.New("containers namespace and plugins namespace cannot be the same")
}
// Check for mutually incompatible config options
if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" {
return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one")
}
if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication {
return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true")
}
if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq {
conf.BridgeConfig.EnableIPMasq = false
}
if err := VerifyCgroupDriver(conf); err != nil {
return err
}
if conf.CgroupParent != "" && UsingSystemd(conf) {
if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") {
return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
}
}
if conf.Rootless && UsingSystemd(conf) && !cgroups.IsCgroup2UnifiedMode() {
return fmt.Errorf("exec-opt native.cgroupdriver=systemd requires cgroup v2 for rootless mode")
}
if conf.DefaultRuntime == "" {
conf.DefaultRuntime = config.StockRuntimeName
}
if conf.Runtimes == nil {
conf.Runtimes = make(map[string]types.Runtime)
}
conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeName}
return nil
}
// checkSystem validates platform-specific requirements
func checkSystem() error {
return checkKernel()
}
// configureMaxThreads sets the Go runtime max threads threshold
// which is 90% of the kernel setting from /proc/sys/kernel/threads-max
func configureMaxThreads(config *config.Config) error {
mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max")
if err != nil {
return err
}
mtint, err := strconv.Atoi(strings.TrimSpace(string(mt)))
if err != nil {
return err
}
maxThreads := (mtint / 100) * 90
debug.SetMaxThreads(maxThreads)
logrus.Debugf("Golang's threads limit set to %d", maxThreads)
return nil
}
func overlaySupportsSelinux() (bool, error) {
f, err := os.Open("/proc/kallsyms")
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if strings.HasSuffix(s.Text(), " security_inode_copy_up") {
return true, nil
}
}
return false, s.Err()
}
// configureKernelSecuritySupport configures and validates security support for the kernel
func configureKernelSecuritySupport(config *config.Config, driverName string) error {
if config.EnableSelinuxSupport {
if !selinuxEnabled() {
logrus.Warn("Docker could not enable SELinux on the host system")
return nil
}
if driverName == "overlay" || driverName == "overlay2" {
// If driver is overlay or overlay2, make sure kernel
// supports selinux with overlay.
supported, err := overlaySupportsSelinux()
if err != nil {
return err
}
if !supported {
logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName)
}
}
} else {
selinuxSetDisabled()
}
return nil
}
func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) {
netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes)
if err != nil {
return nil, err
}
controller, err := libnetwork.New(netOptions...)
if err != nil {
return nil, fmt.Errorf("error obtaining controller instance: %v", err)
}
if len(activeSandboxes) > 0 {
logrus.Info("There are old running containers, the network config will not take affect")
return controller, nil
}
// Initialize default network on "null"
if n, _ := controller.NetworkByName("none"); n == nil {
if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil {
return nil, fmt.Errorf("Error creating default \"null\" network: %v", err)
}
}
// Initialize default network on "host"
if n, _ := controller.NetworkByName("host"); n == nil {
if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil {
return nil, fmt.Errorf("Error creating default \"host\" network: %v", err)
}
}
// Clear stale bridge network
if n, err := controller.NetworkByName("bridge"); err == nil {
if err = n.Delete(); err != nil {
return nil, fmt.Errorf("could not delete the default bridge network: %v", err)
}
if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled {
removeDefaultBridgeInterface()
}
}
if !config.DisableBridge {
// Initialize default driver "bridge"
if err := initBridgeDriver(controller, config); err != nil {
return nil, err
}
} else {
removeDefaultBridgeInterface()
}
// Set HostGatewayIP to the default bridge's IP if it is empty
if daemon.configStore.HostGatewayIP == nil && controller != nil {
if n, err := controller.NetworkByName("bridge"); err == nil {
v4Info, v6Info := n.Info().IpamInfo()
var gateway net.IP
if len(v4Info) > 0 {
gateway = v4Info[0].Gateway.IP
} else if len(v6Info) > 0 {
gateway = v6Info[0].Gateway.IP
}
daemon.configStore.HostGatewayIP = gateway
}
}
return controller, nil
}
func driverOptions(config *config.Config) []nwconfig.Option {
bridgeConfig := options.Generic{
"EnableIPForwarding": config.BridgeConfig.EnableIPForward,
"EnableIPTables": config.BridgeConfig.EnableIPTables,
"EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy,
"UserlandProxyPath": config.BridgeConfig.UserlandProxyPath}
bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig}
dOptions := []nwconfig.Option{}
dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption))
return dOptions
}
func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error {
bridgeName := bridge.DefaultBridgeName
if config.BridgeConfig.Iface != "" {
bridgeName = config.BridgeConfig.Iface
}
netOption := map[string]string{
bridge.BridgeName: bridgeName,
bridge.DefaultBridge: strconv.FormatBool(true),
netlabel.DriverMTU: strconv.Itoa(config.Mtu),
bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq),
bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication),
}
// --ip processing
if config.BridgeConfig.DefaultIP != nil {
netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String()
}
ipamV4Conf := &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName)
if err != nil {
return errors.Wrap(err, "list bridge addresses failed")
}
nw := nwList[0]
if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" {
_, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
if err != nil {
return errors.Wrap(err, "parse CIDR failed")
}
// Iterate through in case there are multiple addresses for the bridge
for _, entry := range nwList {
if fCIDR.Contains(entry.IP) {
nw = entry
break
}
}
}
ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String()
hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask)
if hip.IsGlobalUnicast() {
ipamV4Conf.Gateway = nw.IP.String()
}
if config.BridgeConfig.IP != "" {
ip, ipNet, err := net.ParseCIDR(config.BridgeConfig.IP)
if err != nil {
return err
}
ipamV4Conf.PreferredPool = ipNet.String()
ipamV4Conf.Gateway = ip.String()
} else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" {
logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool)
}
if config.BridgeConfig.FixedCIDR != "" {
_, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
if err != nil {
return err
}
ipamV4Conf.SubPool = fCIDR.String()
}
if config.BridgeConfig.DefaultGatewayIPv4 != nil {
ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String()
}
var (
deferIPv6Alloc bool
ipamV6Conf *libnetwork.IpamConf
)
if config.BridgeConfig.EnableIPv6 && config.BridgeConfig.FixedCIDRv6 == "" {
return errdefs.InvalidParameter(errors.New("IPv6 is enabled for the default bridge, but no subnet is configured. Specify an IPv6 subnet using --fixed-cidr-v6"))
} else if config.BridgeConfig.FixedCIDRv6 != "" {
_, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6)
if err != nil {
return err
}
// In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has
// at least 48 host bits, we need to guarantee the current behavior where the containers'
// IPv6 addresses will be constructed based on the containers' interface MAC address.
// We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints
// on this network until after the driver has created the endpoint and returned the
// constructed address. Libnetwork will then reserve this address with the ipam driver.
ones, _ := fCIDRv6.Mask.Size()
deferIPv6Alloc = ones <= 80
ipamV6Conf = &libnetwork.IpamConf{
AuxAddresses: make(map[string]string),
PreferredPool: fCIDRv6.String(),
}
// In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6
// address belongs to the same network, we need to inform libnetwork about it, so
// that it can be reserved with IPAM and it will not be given away to somebody else
for _, nw6 := range nw6List {
if fCIDRv6.Contains(nw6.IP) {
ipamV6Conf.Gateway = nw6.IP.String()
break
}
}
}
if config.BridgeConfig.DefaultGatewayIPv6 != nil {
if ipamV6Conf == nil {
ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
}
ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String()
}
v4Conf := []*libnetwork.IpamConf{ipamV4Conf}
v6Conf := []*libnetwork.IpamConf{}
if ipamV6Conf != nil {
v6Conf = append(v6Conf, ipamV6Conf)
}
// Initialize default network on "bridge" with the same name
_, err = controller.NewNetwork("bridge", "bridge", "",
libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6),
libnetwork.NetworkOptionDriverOpts(netOption),
libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil),
libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc))
if err != nil {
return fmt.Errorf("Error creating default \"bridge\" network: %v", err)
}
return nil
}
// Remove default bridge interface if present (--bridge=none use case)
func removeDefaultBridgeInterface() {
if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil {
if err := netlink.LinkDel(lnk); err != nil {
logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err)
}
}
}
func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error {
return func(initPath containerfs.ContainerFS) error {
return initlayer.Setup(initPath, idMapping.RootPair())
}
}
// Parse the remapped root (user namespace) option, which can be one of:
// username - valid username from /etc/passwd
// username:groupname - valid username; valid groupname from /etc/group
// uid - 32-bit unsigned int valid Linux UID value
// uid:gid - uid value; 32-bit unsigned int Linux GID value
//
// If no groupname is specified, and a username is specified, an attempt
// will be made to lookup a gid for that username as a groupname
//
// If names are used, they are verified to exist in passwd/group
func parseRemappedRoot(usergrp string) (string, string, error) {
var (
userID, groupID int
username, groupname string
)
idparts := strings.Split(usergrp, ":")
if len(idparts) > 2 {
return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp)
}
if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil {
// must be a uid; take it as valid
userID = int(uid)
luser, err := idtools.LookupUID(userID)
if err != nil {
return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err)
}
username = luser.Name
if len(idparts) == 1 {
// if the uid was numeric and no gid was specified, take the uid as the gid
groupID = userID
lgrp, err := idtools.LookupGID(groupID)
if err != nil {
return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err)
}
groupname = lgrp.Name
}
} else {
lookupName := idparts[0]
// special case: if the user specified "default", they want Docker to create or
// use (after creation) the "dockremap" user/group for root remapping
if lookupName == defaultIDSpecifier {
lookupName = defaultRemappedID
}
luser, err := idtools.LookupUser(lookupName)
if err != nil && idparts[0] != defaultIDSpecifier {
// error if the name requested isn't the special "dockremap" ID
return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err)
} else if err != nil {
// special case-- if the username == "default", then we have been asked
// to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid}
// ranges will be used for the user and group mappings in user namespaced containers
_, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID)
if err == nil {
return defaultRemappedID, defaultRemappedID, nil
}
return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err)
}
username = luser.Name
if len(idparts) == 1 {
// we only have a string username, and no group specified; look up gid from username as group
group, err := idtools.LookupGroup(lookupName)
if err != nil {
return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err)
}
groupname = group.Name
}
}
if len(idparts) == 2 {
// groupname or gid is separately specified and must be resolved
// to an unsigned 32-bit gid
if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil {
// must be a gid, take it as valid
groupID = int(gid)
lgrp, err := idtools.LookupGID(groupID)
if err != nil {
return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err)
}
groupname = lgrp.Name
} else {
// not a number; attempt a lookup
if _, err := idtools.LookupGroup(idparts[1]); err != nil {
return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err)
}
groupname = idparts[1]
}
}
return username, groupname, nil
}
func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) {
if runtime.GOOS != "linux" && config.RemappedRoot != "" {
return nil, fmt.Errorf("User namespaces are only supported on Linux")
}
// if the daemon was started with remapped root option, parse
// the config option to the int uid,gid values
if config.RemappedRoot != "" {
username, groupname, err := parseRemappedRoot(config.RemappedRoot)
if err != nil {
return nil, err
}
if username == "root" {
// Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op
// effectively
logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF")
return &idtools.IdentityMapping{}, nil
}
logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname)
// update remapped root setting now that we have resolved them to actual names
config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname)
// try with username:groupname, uid:groupname, username:gid, uid:gid,
// but keep the original error message (err)
mappings, err := idtools.NewIdentityMapping(username, groupname)
if err == nil {
return mappings, nil
}
user, lookupErr := idtools.LookupUser(username)
if lookupErr != nil {
return nil, errors.Wrap(err, "Can't create ID mappings")
}
logrus.Infof("Can't create ID mappings with username:groupname %s:%s, try uid:groupname %d:%s", username, groupname, user.Uid, groupname)
mappings, lookupErr = idtools.NewIdentityMapping(fmt.Sprintf("%d", user.Uid), groupname)
if lookupErr == nil {
return mappings, nil
}
logrus.Infof("Can't create ID mappings with uid:groupname %d:%s, try username:gid %s:%d", user.Uid, groupname, username, user.Gid)
mappings, lookupErr = idtools.NewIdentityMapping(username, fmt.Sprintf("%d", user.Gid))
if lookupErr == nil {
return mappings, nil
}
logrus.Infof("Can't create ID mappings with username:gid %s:%d, try uid:gid %d:%d", username, user.Gid, user.Uid, user.Gid)
mappings, lookupErr = idtools.NewIdentityMapping(fmt.Sprintf("%d", user.Uid), fmt.Sprintf("%d", user.Gid))
if lookupErr == nil {
return mappings, nil
}
return nil, errors.Wrap(err, "Can't create ID mappings")
}
return &idtools.IdentityMapping{}, nil
}
func setupDaemonRoot(config *config.Config, rootDir string, rootIdentity idtools.Identity) error {
config.Root = rootDir
// the docker root metadata directory needs to have execute permissions for all users (g+x,o+x)
// so that syscalls executing as non-root, operating on subdirectories of the graph root
// (e.g. mounted layers of a container) can traverse this path.
// The user namespace support will create subdirectories for the remapped root host uid:gid
// pair owned by that same uid:gid pair for proper write access to those needed metadata and
// layer content subtrees.
if _, err := os.Stat(rootDir); err == nil {
// root current exists; verify the access bits are correct by setting them
if err = os.Chmod(rootDir, 0711); err != nil {
return err
}
} else if os.IsNotExist(err) {
// no root exists yet, create it 0711 with root:root ownership
if err := os.MkdirAll(rootDir, 0711); err != nil {
return err
}
}
// if user namespaces are enabled we will create a subtree underneath the specified root
// with any/all specified remapped root uid/gid options on the daemon creating
// a new subdirectory with ownership set to the remapped uid/gid (so as to allow
// `chdir()` to work for containers namespaced to that uid/gid)
if config.RemappedRoot != "" {
config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootIdentity.UID, rootIdentity.GID))
logrus.Debugf("Creating user namespaced daemon root: %s", config.Root)
// Create the root directory if it doesn't exist
if err := idtools.MkdirAllAndChown(config.Root, 0700, rootIdentity); err != nil {
return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err)
}
// we also need to verify that any pre-existing directories in the path to
// the graphroot won't block access to remapped root--if any pre-existing directory
// has strict permissions that don't allow "x", container start will fail, so
// better to warn and fail now
dirPath := config.Root
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if !idtools.CanAccess(dirPath, rootIdentity) {
return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root)
}
}
}
if err := setupDaemonRootPropagation(config); err != nil {
logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior")
}
return nil
}
func setupDaemonRootPropagation(cfg *config.Config) error {
rootParentMount, mountOptions, err := getSourceMount(cfg.Root)
if err != nil {
return errors.Wrap(err, "error getting daemon root's parent mount")
}
var cleanupOldFile bool
cleanupFile := getUnmountOnShutdownPath(cfg)
defer func() {
if !cleanupOldFile {
return
}
if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) {
logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file")
}
}()
if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) {
cleanupOldFile = true
return nil
}
if err := mount.MakeShared(cfg.Root); err != nil {
return errors.Wrap(err, "could not setup daemon root propagation to shared")
}
// check the case where this may have already been a mount to itself.
// If so then the daemon only performed a remount and should not try to unmount this later.
if rootParentMount == cfg.Root {
cleanupOldFile = true
return nil
}
if err := os.MkdirAll(filepath.Dir(cleanupFile), 0700); err != nil {
return errors.Wrap(err, "error creating dir to store mount cleanup file")
}
if err := ioutil.WriteFile(cleanupFile, nil, 0600); err != nil {
return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown")
}
return nil
}
// getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown
// the daemon root should be unmounted.
func getUnmountOnShutdownPath(config *config.Config) string {
return filepath.Join(config.ExecRoot, "unmount-on-shutdown")
}
// registerLinks writes the links to a file.
func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error {
if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() {
return nil
}
for _, l := range hostConfig.Links {
name, alias, err := opts.ParseLink(l)
if err != nil {
return err
}
child, err := daemon.GetContainer(name)
if err != nil {
if errdefs.IsNotFound(err) {
// Trying to link to a non-existing container is not valid, and
// should return an "invalid parameter" error. Returning a "not
// found" error here would make the client report the container's
// image could not be found (see moby/moby#39823)
err = errdefs.InvalidParameter(err)
}
return errors.Wrapf(err, "could not get container for %s", name)
}
for child.HostConfig.NetworkMode.IsContainer() {
parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2)
child, err = daemon.GetContainer(parts[1])
if err != nil {
if errdefs.IsNotFound(err) {
// Trying to link to a non-existing container is not valid, and
// should return an "invalid parameter" error. Returning a "not
// found" error here would make the client report the container's
// image could not be found (see moby/moby#39823)
err = errdefs.InvalidParameter(err)
}
return errors.Wrapf(err, "Could not get container for %s", parts[1])
}
}
if child.HostConfig.NetworkMode.IsHost() {
return runconfig.ErrConflictHostNetworkAndLinks
}
if err := daemon.registerLink(container, child, alias); err != nil {
return err
}
}
// After we load all the links into the daemon
// set them to nil on the hostconfig
_, err := container.WriteHostConfig()
return err
}
// conditionalMountOnStart is a platform specific helper function during the
// container start to call mount.
func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
return daemon.Mount(container)
}
// conditionalUnmountOnCleanup is a platform specific helper function called
// during the cleanup of a container to unmount.
func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error {
return daemon.Unmount(container)
}
func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry {
out := make([]types.BlkioStatEntry, len(entries))
for i, re := range entries {
out[i] = types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: re.Op,
Value: re.Value,
}
}
return out
}
func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
if !c.IsRunning() {
return nil, errNotRunning(c.ID)
}
cs, err := daemon.containerd.Stats(context.Background(), c.ID)
if err != nil {
if strings.Contains(err.Error(), "container not found") {
return nil, containerNotFound(c.ID)
}
return nil, err
}
s := &types.StatsJSON{}
s.Read = cs.Read
stats := cs.Metrics
switch t := stats.(type) {
case *statsV1.Metrics:
return daemon.statsV1(s, t)
case *statsV2.Metrics:
return daemon.statsV2(s, t)
default:
return nil, errors.Errorf("unexpected type of metrics %+v", t)
}
}
func (daemon *Daemon) statsV1(s *types.StatsJSON, stats *statsV1.Metrics) (*types.StatsJSON, error) {
if stats.Blkio != nil {
s.BlkioStats = types.BlkioStats{
IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive),
IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive),
IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive),
IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive),
IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive),
IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive),
IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive),
SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive),
}
}
if stats.CPU != nil {
s.CPUStats = types.CPUStats{
CPUUsage: types.CPUUsage{
TotalUsage: stats.CPU.Usage.Total,
PercpuUsage: stats.CPU.Usage.PerCPU,
UsageInKernelmode: stats.CPU.Usage.Kernel,
UsageInUsermode: stats.CPU.Usage.User,
},
ThrottlingData: types.ThrottlingData{
Periods: stats.CPU.Throttling.Periods,
ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods,
ThrottledTime: stats.CPU.Throttling.ThrottledTime,
},
}
}
if stats.Memory != nil {
raw := make(map[string]uint64)
raw["cache"] = stats.Memory.Cache
raw["rss"] = stats.Memory.RSS
raw["rss_huge"] = stats.Memory.RSSHuge
raw["mapped_file"] = stats.Memory.MappedFile
raw["dirty"] = stats.Memory.Dirty
raw["writeback"] = stats.Memory.Writeback
raw["pgpgin"] = stats.Memory.PgPgIn
raw["pgpgout"] = stats.Memory.PgPgOut
raw["pgfault"] = stats.Memory.PgFault
raw["pgmajfault"] = stats.Memory.PgMajFault
raw["inactive_anon"] = stats.Memory.InactiveAnon
raw["active_anon"] = stats.Memory.ActiveAnon
raw["inactive_file"] = stats.Memory.InactiveFile
raw["active_file"] = stats.Memory.ActiveFile
raw["unevictable"] = stats.Memory.Unevictable
raw["hierarchical_memory_limit"] = stats.Memory.HierarchicalMemoryLimit
raw["hierarchical_memsw_limit"] = stats.Memory.HierarchicalSwapLimit
raw["total_cache"] = stats.Memory.TotalCache
raw["total_rss"] = stats.Memory.TotalRSS
raw["total_rss_huge"] = stats.Memory.TotalRSSHuge
raw["total_mapped_file"] = stats.Memory.TotalMappedFile
raw["total_dirty"] = stats.Memory.TotalDirty
raw["total_writeback"] = stats.Memory.TotalWriteback
raw["total_pgpgin"] = stats.Memory.TotalPgPgIn
raw["total_pgpgout"] = stats.Memory.TotalPgPgOut
raw["total_pgfault"] = stats.Memory.TotalPgFault
raw["total_pgmajfault"] = stats.Memory.TotalPgMajFault
raw["total_inactive_anon"] = stats.Memory.TotalInactiveAnon
raw["total_active_anon"] = stats.Memory.TotalActiveAnon
raw["total_inactive_file"] = stats.Memory.TotalInactiveFile
raw["total_active_file"] = stats.Memory.TotalActiveFile
raw["total_unevictable"] = stats.Memory.TotalUnevictable
if stats.Memory.Usage != nil {
s.MemoryStats = types.MemoryStats{
Stats: raw,
Usage: stats.Memory.Usage.Usage,
MaxUsage: stats.Memory.Usage.Max,
Limit: stats.Memory.Usage.Limit,
Failcnt: stats.Memory.Usage.Failcnt,
}
} else {
s.MemoryStats = types.MemoryStats{
Stats: raw,
}
}
// if the container does not set memory limit, use the machineMemory
if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 {
s.MemoryStats.Limit = daemon.machineMemory
}
}
if stats.Pids != nil {
s.PidsStats = types.PidsStats{
Current: stats.Pids.Current,
Limit: stats.Pids.Limit,
}
}
return s, nil
}
func (daemon *Daemon) statsV2(s *types.StatsJSON, stats *statsV2.Metrics) (*types.StatsJSON, error) {
if stats.Io != nil {
var isbr []types.BlkioStatEntry
for _, re := range stats.Io.Usage {
isbr = append(isbr,
types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: "read",
Value: re.Rbytes,
},
types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: "write",
Value: re.Wbytes,
},
)
}
s.BlkioStats = types.BlkioStats{
IoServiceBytesRecursive: isbr,
// Other fields are unsupported
}
}
if stats.CPU != nil {
s.CPUStats = types.CPUStats{
CPUUsage: types.CPUUsage{
TotalUsage: stats.CPU.UsageUsec * 1000,
// PercpuUsage is not supported
UsageInKernelmode: stats.CPU.SystemUsec * 1000,
UsageInUsermode: stats.CPU.UserUsec * 1000,
},
ThrottlingData: types.ThrottlingData{
Periods: stats.CPU.NrPeriods,
ThrottledPeriods: stats.CPU.NrThrottled,
ThrottledTime: stats.CPU.ThrottledUsec * 1000,
},
}
}
if stats.Memory != nil {
raw := make(map[string]uint64)
raw["anon"] = stats.Memory.Anon
raw["file"] = stats.Memory.File
raw["kernel_stack"] = stats.Memory.KernelStack
raw["slab"] = stats.Memory.Slab
raw["sock"] = stats.Memory.Sock
raw["shmem"] = stats.Memory.Shmem
raw["file_mapped"] = stats.Memory.FileMapped
raw["file_dirty"] = stats.Memory.FileDirty
raw["file_writeback"] = stats.Memory.FileWriteback
raw["anon_thp"] = stats.Memory.AnonThp
raw["inactive_anon"] = stats.Memory.InactiveAnon
raw["active_anon"] = stats.Memory.ActiveAnon
raw["inactive_file"] = stats.Memory.InactiveFile
raw["active_file"] = stats.Memory.ActiveFile
raw["unevictable"] = stats.Memory.Unevictable
raw["slab_reclaimable"] = stats.Memory.SlabReclaimable
raw["slab_unreclaimable"] = stats.Memory.SlabUnreclaimable
raw["pgfault"] = stats.Memory.Pgfault
raw["pgmajfault"] = stats.Memory.Pgmajfault
raw["workingset_refault"] = stats.Memory.WorkingsetRefault
raw["workingset_activate"] = stats.Memory.WorkingsetActivate
raw["workingset_nodereclaim"] = stats.Memory.WorkingsetNodereclaim
raw["pgrefill"] = stats.Memory.Pgrefill
raw["pgscan"] = stats.Memory.Pgscan
raw["pgsteal"] = stats.Memory.Pgsteal
raw["pgactivate"] = stats.Memory.Pgactivate
raw["pgdeactivate"] = stats.Memory.Pgdeactivate
raw["pglazyfree"] = stats.Memory.Pglazyfree
raw["pglazyfreed"] = stats.Memory.Pglazyfreed
raw["thp_fault_alloc"] = stats.Memory.ThpFaultAlloc
raw["thp_collapse_alloc"] = stats.Memory.ThpCollapseAlloc
s.MemoryStats = types.MemoryStats{
// Stats is not compatible with v1
Stats: raw,
Usage: stats.Memory.Usage,
// MaxUsage is not supported
Limit: stats.Memory.UsageLimit,
// TODO: Failcnt
}
// if the container does not set memory limit, use the machineMemory
if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 {
s.MemoryStats.Limit = daemon.machineMemory
}
}
if stats.Pids != nil {
s.PidsStats = types.PidsStats{
Current: stats.Pids.Current,
Limit: stats.Pids.Limit,
}
}
return s, nil
}
// setDefaultIsolation determines the default isolation mode for the
// daemon to run in. This is only applicable on Windows
func (daemon *Daemon) setDefaultIsolation() error {
return nil
}
// setupDaemonProcess sets various settings for the daemon's process
func setupDaemonProcess(config *config.Config) error {
// setup the daemons oom_score_adj
if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil {
return err
}
if err := setMayDetachMounts(); err != nil {
logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter")
}
return nil
}
// This is used to allow removal of mountpoints that may be mounted in other
// namespaces on RHEL based kernels starting from RHEL 7.4.
// Without this setting, removals on these RHEL based kernels may fail with
// "device or resource busy".
// This setting is not available in upstream kernels as it is not configurable,
// but has been in the upstream kernels since 3.15.
func setMayDetachMounts() error {
f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return errors.Wrap(err, "error opening may_detach_mounts kernel config file")
}
defer f.Close()
_, err = f.WriteString("1")
if os.IsPermission(err) {
// Setting may_detach_mounts does not work in an
// unprivileged container. Ignore the error, but log
// it if we appear not to be in that situation.
if !rsystem.RunningInUserNS() {
logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1")
}
return nil
}
return err
}
func setupOOMScoreAdj(score int) error {
f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0)
if err != nil {
return err
}
defer f.Close()
stringScore := strconv.Itoa(score)
_, err = f.WriteString(stringScore)
if os.IsPermission(err) {
// Setting oom_score_adj does not work in an
// unprivileged container. Ignore the error, but log
// it if we appear not to be in that situation.
if !rsystem.RunningInUserNS() {
logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore)
}
return nil
}
return err
}
func (daemon *Daemon) initCgroupsPath(path string) error {
if path == "/" || path == "." {
return nil
}
if daemon.configStore.CPURealtimePeriod == 0 && daemon.configStore.CPURealtimeRuntime == 0 {
return nil
}
if cgroups.IsCgroup2UnifiedMode() {
return fmt.Errorf("daemon-scoped cpu-rt-period and cpu-rt-runtime are not implemented for cgroup v2")
}
// Recursively create cgroup to ensure that the system and all parent cgroups have values set
// for the period and runtime as this limits what the children can be set to.
daemon.initCgroupsPath(filepath.Dir(path))
mnt, root, err := cgroups.FindCgroupMountpointAndRoot("", "cpu")
if err != nil {
return err
}
// When docker is run inside docker, the root is based of the host cgroup.
// Should this be handled in runc/libcontainer/cgroups ?
if strings.HasPrefix(root, "/docker/") {
root = "/"
}
path = filepath.Join(mnt, root, path)
sysInfo := daemon.RawSysInfo(true)
if err := maybeCreateCPURealTimeFile(sysInfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
return err
}
return maybeCreateCPURealTimeFile(sysInfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path)
}
func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error {
if sysinfoPresent && configValue != 0 {
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
if err := ioutil.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700); err != nil {
return err
}
}
return nil
}
func (daemon *Daemon) setupSeccompProfile() error {
if daemon.configStore.SeccompProfile != "" {
daemon.seccompProfilePath = daemon.configStore.SeccompProfile
b, err := ioutil.ReadFile(daemon.configStore.SeccompProfile)
if err != nil {
return fmt.Errorf("opening seccomp profile (%s) failed: %v", daemon.configStore.SeccompProfile, err)
}
daemon.seccompProfile = b
}
return nil
}
func (daemon *Daemon) useShimV2() bool {
return cgroups.IsCgroup2UnifiedMode()
}
// RawSysInfo returns *sysinfo.SysInfo .
func (daemon *Daemon) RawSysInfo(quiet bool) *sysinfo.SysInfo {
var opts []sysinfo.Opt
if daemon.getCgroupDriver() == cgroupSystemdDriver {
rootlesskitParentEUID := os.Getenv("ROOTLESSKIT_PARENT_EUID")
if rootlesskitParentEUID != "" {
groupPath := fmt.Sprintf("/user.slice/user-%s.slice", rootlesskitParentEUID)
opts = append(opts, sysinfo.WithCgroup2GroupPath(groupPath))
}
}
return sysinfo.New(quiet, opts...)
}
|
[
"\"DOCKER_NOWARN_KERNEL_VERSION\"",
"\"ROOTLESSKIT_PARENT_EUID\""
] |
[] |
[
"DOCKER_NOWARN_KERNEL_VERSION",
"ROOTLESSKIT_PARENT_EUID"
] |
[]
|
["DOCKER_NOWARN_KERNEL_VERSION", "ROOTLESSKIT_PARENT_EUID"]
|
go
| 2 | 0 | |
scripts/mix_train.py
|
import argparse
import gc
import logging
import os
import sys
import time
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sgan.data.loader import data_loader
from sgan.losses import gan_g_loss, gan_d_loss, l2_loss
from sgan.losses import displacement_error, final_displacement_error
# from sgan.ffd_models import TrajectoryGenerator, TrajectoryDiscriminator
# from sgan.models import TrajectoryGenerator, TrajectoryDiscriminator
from sgan.utils import int_tuple, bool_flag, get_total_norm
from sgan.utils import relative_to_abs, get_dset_path
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser()
FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(__name__)
# Dataset options
parser.add_argument('--delim', default='\t')
parser.add_argument('--loader_num_workers', default=4, type=int)
parser.add_argument('--obs_len', default=8, type=int)
parser.add_argument('--pred_len', default=8, type=int)
parser.add_argument('--skip', default=1, type=int)
# Optimization
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--num_iterations', default=1000, type=int)
parser.add_argument('--num_epochs', default=0, type=int)
# Model Options
parser.add_argument('--embedding_dim', default=16, type=int)
parser.add_argument('--num_layers', default=1, type=int)
parser.add_argument('--dropout', default=0, type=float)
parser.add_argument('--batch_norm', default=0, type=bool_flag)
parser.add_argument('--mlp_dim', default=64, type=int)
# Generator Options
parser.add_argument('--encoder_h_dim_g', default=16, type=int)
parser.add_argument('--encoder_num_layers', default=2, type=int)
parser.add_argument('--rep_dim', default=16, type=int)
parser.add_argument('--decoder_h_dim_g', default=16, type=int)
parser.add_argument('--decoder_num_layers', default=3, type=int)
parser.add_argument('--noise_dim', default=(0,), type=int_tuple)
parser.add_argument('--noise_type', default='gaussian')
parser.add_argument('--noise_mix_type', default='global')
parser.add_argument('--clipping_threshold_g', default=2.0, type=float)
parser.add_argument('--g_learning_rate', default=0.0001, type=float)
parser.add_argument('--g_steps', default=1, type=int)
#g = 0.0001
# Pooling Options
# parser.add_argument('--pooling_type', default='pool_net')
# parser.add_argument('--pool_every_timestep', default=1, type=bool_flag)
parser.add_argument('--pooling_type', default=None)
parser.add_argument('--pool_every_timestep', default=0, type=bool_flag)
# Pool Net Option
parser.add_argument('--bottleneck_dim', default=8, type=int)
# Social Pooling Options
parser.add_argument('--neighborhood_size', default=2.0, type=float)
parser.add_argument('--grid_size', default=8, type=int)
# Discriminator Options
parser.add_argument('--d_type', default='local', type=str)
parser.add_argument('--encoder_h_dim_d', default=16, type=int)
parser.add_argument('--discrim_num_layers', default=3, type=int)
parser.add_argument('--d_learning_rate', default=0.001, type=float)
parser.add_argument('--d_steps', default=1, type=int)
parser.add_argument('--clipping_threshold_d', default=0, type=float)
#d = 0.001
# Output
parser.add_argument('--output_dir', default=os.getcwd())
parser.add_argument('--print_every', default=100, type=int)
parser.add_argument('--checkpoint_every', default=300, type=int)
parser.add_argument('--checkpoint_start_from', default=None)
parser.add_argument('--num_samples_check', default=5000, type=int)
# Misc
parser.add_argument('--use_gpu', default=1, type=int)
parser.add_argument('--timing', default=0, type=int)
parser.add_argument('--gpu_num', default="0", type=str)
# Important changing Options
parser.add_argument('--GAN_type', default='ff')
parser.add_argument('--dataset_name', default='eth', type=str)
parser.add_argument('--restore_from_checkpoint', default=0, type=int)
parser.add_argument('--checkpoint_name', default='checkpoint')
# Loss Options
parser.add_argument('--l2_loss_weight', default=0.5, type=float)
parser.add_argument('--best_k', default=1, type=int)
parser.add_argument('--controlled_expt', default=0, type=bool_flag)
parser.add_argument('--pos_embed', default=0, type=bool_flag)
parser.add_argument('--pos_embed_freq', default=100, type=int)
def init_weights(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight)
def get_dtypes(args):
long_dtype = torch.LongTensor
float_dtype = torch.FloatTensor
if args.use_gpu == 1:
long_dtype = torch.cuda.LongTensor
float_dtype = torch.cuda.FloatTensor
return long_dtype, float_dtype
def main(args):
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_num
train_path = get_dset_path(args.dataset_name, 'train')
val_path = get_dset_path(args.dataset_name, 'val')
long_dtype, float_dtype = get_dtypes(args)
logger.info("Initializing train dataset")
train_dset, train_loader = data_loader(args, train_path)
logger.info("Initializing val dataset")
_, val_loader = data_loader(args, val_path)
iterations_per_epoch = len(train_dset) / args.batch_size / args.d_steps
# if args.num_epochs:
# args.num_iterations = int(iterations_per_epoch * args.num_epochs)
logger.info(
'There are {} iterations per epoch'.format(iterations_per_epoch)
)
global TrajectoryGenerator, TrajectoryDiscriminator
if args.GAN_type == 'rnn':
print("Default Social GAN")
from sgan.models import TrajectoryGenerator, TrajectoryDiscriminator
elif args.GAN_type == 'simple_rnn':
print("Default Social GAN")
from sgan.rnn_models import TrajectoryGenerator, TrajectoryDiscriminator
else:
print("Feedforward GAN")
from sgan.ffd_models import TrajectoryGenerator, TrajectoryDiscriminator
if args.GAN_type == 'ff':
generator = TrajectoryGenerator(
obs_len=args.obs_len,
pred_len=args.pred_len,
embedding_dim=args.embedding_dim,
encoder_h_dim=args.encoder_h_dim_g,
decoder_h_dim=args.decoder_h_dim_g,
rep_dim=args.rep_dim,
mlp_dim=args.mlp_dim,
encoder_num_layers=args.encoder_num_layers,
decoder_num_layers=args.decoder_num_layers,
noise_dim=args.noise_dim,
noise_type=args.noise_type,
noise_mix_type=args.noise_mix_type,
pooling_type=args.pooling_type,
pool_every_timestep=args.pool_every_timestep,
dropout=args.dropout,
bottleneck_dim=args.bottleneck_dim,
neighborhood_size=args.neighborhood_size,
grid_size=args.grid_size,
batch_norm=args.batch_norm,
pos_embed=args.pos_embed,
pos_embed_freq=args.pos_embed_freq,
)
discriminator = TrajectoryDiscriminator(
obs_len=args.obs_len,
pred_len=args.pred_len,
embedding_dim=args.embedding_dim,
h_dim=args.encoder_h_dim_d,
mlp_dim=args.mlp_dim,
num_layers=args.discrim_num_layers,
dropout=args.dropout,
batch_norm=args.batch_norm,
d_type=args.d_type)
else:
generator = TrajectoryGenerator(
obs_len=args.obs_len,
pred_len=args.pred_len,
embedding_dim=args.embedding_dim,
encoder_h_dim=args.encoder_h_dim_g,
decoder_h_dim=args.decoder_h_dim_g,
mlp_dim=args.mlp_dim,
num_layers=args.num_layers,
noise_dim=args.noise_dim,
noise_type=args.noise_type,
noise_mix_type=args.noise_mix_type,
pooling_type=args.pooling_type,
pool_every_timestep=args.pool_every_timestep,
dropout=args.dropout,
bottleneck_dim=args.bottleneck_dim,
neighborhood_size=args.neighborhood_size,
grid_size=args.grid_size,
batch_norm=args.batch_norm)
discriminator = TrajectoryDiscriminator(
obs_len=args.obs_len,
pred_len=args.pred_len,
embedding_dim=args.embedding_dim,
h_dim=args.encoder_h_dim_d,
mlp_dim=args.mlp_dim,
num_layers=args.num_layers,
dropout=args.dropout,
batch_norm=args.batch_norm,
d_type=args.d_type)
generator.apply(init_weights)
generator.type(float_dtype).train()
logger.info('Here is the generator:')
logger.info(generator)
discriminator.apply(init_weights)
discriminator.type(float_dtype).train()
logger.info('Here is the discriminator:')
logger.info(discriminator)
g_loss_fn = gan_g_loss
d_loss_fn = gan_d_loss
optimizer_g = optim.Adam(generator.parameters(), lr=args.g_learning_rate)
optimizer_d = optim.Adam(
discriminator.parameters(), lr=args.d_learning_rate
)
# Maybe restore from checkpoint
restore_path = None
if args.checkpoint_start_from is not None:
restore_path = args.checkpoint_start_from
elif args.restore_from_checkpoint == 1:
restore_path = os.path.join(args.output_dir,
'%s_with_model.pt' % args.checkpoint_name)
if restore_path is not None and os.path.isfile(restore_path):
logger.info('Restoring from checkpoint {}'.format(restore_path))
checkpoint = torch.load(restore_path)
generator.load_state_dict(checkpoint['g_state'])
discriminator.load_state_dict(checkpoint['d_state'])
optimizer_g.load_state_dict(checkpoint['g_optim_state'])
optimizer_d.load_state_dict(checkpoint['d_optim_state'])
t = checkpoint['counters']['t']
epoch = checkpoint['counters']['epoch']
checkpoint['restore_ts'].append(t)
else:
# Starting from scratch, so initialize checkpoint data structure
t, epoch = 0, 0
checkpoint = {
'args': args.__dict__,
'G_losses': defaultdict(list),
'D_losses': defaultdict(list),
'losses_ts': [],
'metrics_val': defaultdict(list),
'metrics_train': defaultdict(list),
'sample_ts': [],
'restore_ts': [],
'norm_g': [],
'norm_d': [],
'counters': {
't': None,
'epoch': None,
},
'g_state': None,
'g_optim_state': None,
'd_state': None,
'd_optim_state': None,
'g_best_state': None,
'd_best_state': None,
'best_t': None,
'g_best_nl_state': None,
'd_best_state_nl': None,
'best_t_nl': None,
}
t0 = None
fig = plt.figure()
ax = fig.add_axes([0.1,0.1,0.75,0.75])
while t < args.num_iterations:
gc.collect()
d_steps_left = args.d_steps
g_steps_left = args.g_steps
epoch += 1
logger.info('Starting epoch {}'.format(epoch))
for batch in train_loader:
if args.timing == 1:
torch.cuda.synchronize()
t1 = time.time()
# Decide whether to use the batch for stepping on discriminator or
# generator; an iteration consists of args.d_steps steps on the
# discriminator followed by args.g_steps steps on the generator.
if d_steps_left > 0:
step_type = 'd'
losses_d = discriminator_step(args, batch, generator,
discriminator, d_loss_fn,
optimizer_d)
checkpoint['norm_d'].append(
get_total_norm(discriminator.parameters()))
d_steps_left -= 1
elif g_steps_left > 0:
step_type = 'g'
losses_g = generator_step(args, batch, generator,
discriminator, g_loss_fn,
optimizer_g)
checkpoint['norm_g'].append(
get_total_norm(generator.parameters())
)
g_steps_left -= 1
if args.timing == 1:
torch.cuda.synchronize()
t2 = time.time()
logger.info('{} step took {}'.format(step_type, t2 - t1))
# Skip the rest if we are not at the end of an iteration
if d_steps_left > 0 or g_steps_left > 0:
continue
if args.timing == 1:
if t0 is not None:
logger.info('Interation {} took {}'.format(
t - 1, time.time() - t0
))
t0 = time.time()
# Maybe save loss
if t % args.print_every == 0:
logger.info('t = {} / {}'.format(t + 1, args.num_iterations))
for k, v in sorted(losses_d.items()):
logger.info(' [D] {}: {:.3f}'.format(k, v))
checkpoint['D_losses'][k].append(v)
for k, v in sorted(losses_g.items()):
logger.info(' [G] {}: {:.3f}'.format(k, v))
checkpoint['G_losses'][k].append(v)
checkpoint['losses_ts'].append(t)
if args.controlled_expt:
if t % 10 == 0:
save = False
# if t == 160:
# save = True
# print(t)
plot_trajectory(fig, ax, args, val_loader, generator, save)
# Maybe save a checkpoint
if t > 0 and t % args.checkpoint_every == 0:
checkpoint['counters']['t'] = t
checkpoint['counters']['epoch'] = epoch
checkpoint['sample_ts'].append(t)
# Check stats on the validation set
logger.info('Checking stats on val ...')
metrics_val = check_accuracy(
args, val_loader, generator, discriminator, d_loss_fn
)
logger.info('Checking stats on train ...')
metrics_train = check_accuracy(
args, train_loader, generator, discriminator,
d_loss_fn, limit=True
)
for k, v in sorted(metrics_val.items()):
logger.info(' [val] {}: {:.3f}'.format(k, v))
checkpoint['metrics_val'][k].append(v)
for k, v in sorted(metrics_train.items()):
logger.info(' [train] {}: {:.3f}'.format(k, v))
checkpoint['metrics_train'][k].append(v)
min_ade = min(checkpoint['metrics_val']['ade'])
min_ade_nl = min(checkpoint['metrics_val']['ade_nl'])
if metrics_val['ade'] == min_ade:
logger.info('New low for avg_disp_error')
checkpoint['best_t'] = t
checkpoint['g_best_state'] = generator.state_dict()
checkpoint['d_best_state'] = discriminator.state_dict()
if metrics_val['ade_nl'] == min_ade_nl:
logger.info('New low for avg_disp_error_nl')
checkpoint['best_t_nl'] = t
checkpoint['g_best_nl_state'] = generator.state_dict()
checkpoint['d_best_nl_state'] = discriminator.state_dict()
if metrics_val['ade'] == min_ade:
# Save another checkpoint with model weights and
# optimizer state
checkpoint['g_state'] = generator.state_dict()
checkpoint['g_optim_state'] = optimizer_g.state_dict()
checkpoint['d_state'] = discriminator.state_dict()
checkpoint['d_optim_state'] = optimizer_d.state_dict()
checkpoint_path = os.path.join(
args.output_dir, '%s_with_model.pt' % args.checkpoint_name
)
logger.info('Saving checkpoint to {}'.format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
logger.info('Done.')
# Save a checkpoint with no model weights by making a shallow
# copy of the checkpoint excluding some items
checkpoint_path = os.path.join(
args.output_dir, '%s_no_model.pt' % args.checkpoint_name)
logger.info('Saving checkpoint to {}'.format(checkpoint_path))
key_blacklist = [
'g_state', 'd_state', 'g_best_state', 'g_best_nl_state',
'g_optim_state', 'd_optim_state', 'd_best_state',
'd_best_nl_state'
]
small_checkpoint = {}
for k, v in checkpoint.items():
if k not in key_blacklist:
small_checkpoint[k] = v
torch.save(small_checkpoint, checkpoint_path)
logger.info('Done.')
t += 1
d_steps_left = args.d_steps
g_steps_left = args.g_steps
if t >= args.num_iterations:
break
def discriminator_step(
args, batch, generator, discriminator, d_loss_fn, optimizer_d
):
batch = [tensor.cuda() for tensor in batch]
(obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
loss_mask, seq_start_end) = batch
losses = {}
loss = torch.zeros(1).to(pred_traj_gt)
generator_out = generator(obs_traj, obs_traj_rel, seq_start_end)
pred_traj_fake_rel = generator_out
pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
scores_real = discriminator(traj_real, traj_real_rel, seq_start_end)
# Compute loss with optional gradient penalty
data_loss = d_loss_fn(scores_real, scores_fake)
losses['D_data_loss'] = data_loss.item()
loss += data_loss
losses['D_total_loss'] = loss.item()
optimizer_d.zero_grad()
loss.backward()
if args.clipping_threshold_d > 0:
nn.utils.clip_grad_norm_(discriminator.parameters(),
args.clipping_threshold_d)
optimizer_d.step()
return losses
def generator_step(
args, batch, generator, discriminator, g_loss_fn, optimizer_g
):
batch = [tensor.cuda() for tensor in batch]
(obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
loss_mask, seq_start_end) = batch
losses = {}
loss = torch.zeros(1).to(pred_traj_gt)
g_l2_loss_rel = []
loss_mask = loss_mask[:, args.obs_len:]
for _ in range(args.best_k):
# obs_traj_rel.requires_grad_(True)
# obs_traj.requires_grad_(True)
generator_out = generator(obs_traj, obs_traj_rel, seq_start_end)
pred_traj_fake_rel = generator_out
# print("Evaluating Attention")
# # get_attn(pred_traj_fake_rel, obs_traj_rel)
# print("Getting Attention")
# pred_traj_zero = pred_traj_fake_rel[:,0,:]
# obs_traj_zero = obs_traj_rel[:,0,:]
# print(pred_traj_zero.shape)
# print(obs_traj_zero.shape)
# print(pred_traj_zero[1,1])
# print(pred_traj_zero.requires_grad)
# print(obs_traj_zero.requires_grad)
# obs_traj_zero_grad = torch.autograd.grad(pred_traj_zero[1,1], obs_traj_zero, allow_unused=True)
# print(obs_traj_zero_grad)
pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
if args.l2_loss_weight > 0:
g_l2_loss_rel.append(args.l2_loss_weight * l2_loss(
pred_traj_fake_rel,
pred_traj_gt_rel,
loss_mask,
mode='raw'))
g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)
if args.l2_loss_weight > 0:
g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)
for start, end in seq_start_end.data:
_g_l2_loss_rel = g_l2_loss_rel[start:end]
_g_l2_loss_rel = torch.sum(_g_l2_loss_rel, dim=0)
_g_l2_loss_rel = torch.min(_g_l2_loss_rel) / torch.sum(
loss_mask[start:end])
g_l2_loss_sum_rel += _g_l2_loss_rel
losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()
loss += 2*args.l2_loss_weight*g_l2_loss_sum_rel
traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
discriminator_loss = g_loss_fn(scores_fake)
loss += 2*(1 - args.l2_loss_weight)*discriminator_loss
losses['G_discriminator_loss'] = discriminator_loss.item()
losses['G_total_loss'] = loss.item()
optimizer_g.zero_grad()
loss.backward()
if args.clipping_threshold_g > 0:
nn.utils.clip_grad_norm_(
generator.parameters(), args.clipping_threshold_g
)
optimizer_g.step()
return losses
def check_accuracy(
args, loader, generator, discriminator, d_loss_fn, limit=False
):
d_losses = []
metrics = {}
g_l2_losses_abs, g_l2_losses_rel = ([],) * 2
disp_error, disp_error_l, disp_error_nl = ([],) * 3
f_disp_error, f_disp_error_l, f_disp_error_nl = ([],) * 3
total_traj, total_traj_l, total_traj_nl = 0, 0, 0
loss_mask_sum = 0
generator.eval()
with torch.no_grad():
for batch in loader:
batch = [tensor.cuda() for tensor in batch]
(obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
non_linear_ped, loss_mask, seq_start_end) = batch
linear_ped = 1 - non_linear_ped
loss_mask = loss_mask[:, args.obs_len:]
pred_traj_fake_rel = generator(
obs_traj, obs_traj_rel, seq_start_end
)
pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
g_l2_loss_abs, g_l2_loss_rel = cal_l2_losses(
pred_traj_gt, pred_traj_gt_rel, pred_traj_fake,
pred_traj_fake_rel, loss_mask
)
ade, ade_l, ade_nl = cal_ade(
pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped
)
fde, fde_l, fde_nl = cal_fde(
pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped
)
traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
scores_real = discriminator(traj_real, traj_real_rel, seq_start_end)
d_loss = d_loss_fn(scores_real, scores_fake)
d_losses.append(d_loss.item())
g_l2_losses_abs.append(g_l2_loss_abs.item())
g_l2_losses_rel.append(g_l2_loss_rel.item())
disp_error.append(ade.item())
disp_error_l.append(ade_l.item())
disp_error_nl.append(ade_nl.item())
f_disp_error.append(fde.item())
f_disp_error_l.append(fde_l.item())
f_disp_error_nl.append(fde_nl.item())
loss_mask_sum += torch.numel(loss_mask.data)
total_traj += pred_traj_gt.size(1)
total_traj_l += torch.sum(linear_ped).item()
total_traj_nl += torch.sum(non_linear_ped).item()
if limit and total_traj >= args.num_samples_check:
break
metrics['d_loss'] = sum(d_losses) / len(d_losses)
metrics['g_l2_loss_abs'] = sum(g_l2_losses_abs) / loss_mask_sum
metrics['g_l2_loss_rel'] = sum(g_l2_losses_rel) / loss_mask_sum
metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
metrics['fde'] = sum(f_disp_error) / total_traj
if total_traj_l != 0:
metrics['ade_l'] = sum(disp_error_l) / (total_traj_l * args.pred_len)
metrics['fde_l'] = sum(f_disp_error_l) / total_traj_l
else:
metrics['ade_l'] = 0
metrics['fde_l'] = 0
if total_traj_nl != 0:
metrics['ade_nl'] = sum(disp_error_nl) / (
total_traj_nl * args.pred_len)
metrics['fde_nl'] = sum(f_disp_error_nl) / total_traj_nl
else:
metrics['ade_nl'] = 0
metrics['fde_nl'] = 0
generator.train()
return metrics
def cal_l2_losses(
pred_traj_gt, pred_traj_gt_rel, pred_traj_fake, pred_traj_fake_rel,
loss_mask
):
g_l2_loss_abs = l2_loss(
pred_traj_fake, pred_traj_gt, loss_mask, mode='sum'
)
g_l2_loss_rel = l2_loss(
pred_traj_fake_rel, pred_traj_gt_rel, loss_mask, mode='sum'
)
return g_l2_loss_abs, g_l2_loss_rel
def cal_ade(pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped):
ade = displacement_error(pred_traj_fake, pred_traj_gt)
ade_l = displacement_error(pred_traj_fake, pred_traj_gt, linear_ped)
ade_nl = displacement_error(pred_traj_fake, pred_traj_gt, non_linear_ped)
return ade, ade_l, ade_nl
def cal_fde(
pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped
):
fde = final_displacement_error(pred_traj_fake[-1], pred_traj_gt[-1])
fde_l = final_displacement_error(
pred_traj_fake[-1], pred_traj_gt[-1], linear_ped
)
fde_nl = final_displacement_error(
pred_traj_fake[-1], pred_traj_gt[-1], non_linear_ped
)
return fde, fde_l, fde_nl
## Important for controlled Experiments to plot trajectory distribution
def plot_trajectory(fig, ax, args, loader, generator, save=False):
with torch.no_grad():
for batch in loader:
batch = [tensor.cuda() for tensor in batch]
(obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
non_linear_ped, loss_mask, seq_start_end) = batch
for k in range(20):
pred_traj_fake_rel = generator(obs_traj, obs_traj_rel, seq_start_end)
pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
pred_traj_fake_permuted = pred_traj_fake.permute(1, 0, 2)
pred_traj_gt_permuted = pred_traj_gt.permute(1, 0, 2)
obs_traj_permuted = obs_traj.permute(1, 0, 2)
if k == 0:
view_traj(ax, pred_traj_fake_permuted[0,:,:], pred_traj_gt_permuted[0,:,:], obs_traj_permuted[0,:,:], args, all_three=True)
else:
view_traj(ax, pred_traj_fake_permuted[0,:,:], pred_traj_gt_permuted[0,:,:], obs_traj_permuted[0,:,:], args)
ax.legend()
ax.set_ylim((-5, 5))
ax.set_xlim((0, 20))
plt.show()
if save:
print("Saving Plot")
fig.savefig("ffd_converge.jpg", bbox_inches="tight")
ax.clear()
break
return
## Important for controlled Experiments to plot trajectory distribution
def view_traj(ax, fake_pred, real_pred, obs, args, all_three=False):
fake_pred = fake_pred.cpu().numpy()
real_pred = real_pred.cpu().numpy()
obs = obs.cpu().numpy()
fake_traj = np.concatenate((obs, fake_pred), axis=0)
real_traj = np.concatenate((obs, real_pred), axis=0)
if all_three:
x_obs = np.tile(np.linspace(1, 8, num=8, endpoint=True), (7,1))
y_obs = np.zeros((7, 8))
##Real Predictions
x_pred = np.tile(np.linspace(9, 16, num=8, endpoint=True), (7,1))
y_pred = np.zeros((7, 8))
##1st Mode (Straight Line)
##2nd Mode (Up Slant Line)
y_pred[1, :] = 1*np.linspace(1, 8, num=8, endpoint=True)
##3rd Mode (Down Slant Line)
y_pred[2, :] = -1*np.linspace(1, 8, num=8, endpoint=True)
# ##4th Mode (Up Curved Line)
# x_pred[3, :] = 4 + 0.5*x_pred[3, :]
# y_pred[3, :] = (x_pred[3, :] - 8)**2
# ##5th Mode (Down Curved Line)
# x_pred[4, :] = 4 + 0.5*x_pred[4, :]
# y_pred[4, :] = -(x_pred[4, :] - 8)**2
##4th Mode (Up Curved Line)
y_pred[3, :] = 2*np.linspace(1, 8, num=8, endpoint=True)
# x_pred[3, :] = 4 + 0.5*x_pred[3, :]
# y_pred[3, :] = (x_pred[3, :] - 8)**2
##5th Mode (Down Curved Line)
y_pred[4, :] = -2*np.linspace(1, 8, num=8, endpoint=True)
##6th Mode (Down Curved Line)
y_pred[5, :] = 3*np.linspace(1, 8, num=8, endpoint=True)
##7th Mode (Down Curved Line)
y_pred[6, :] = -3*np.linspace(1, 8, num=8, endpoint=True)
ax.plot(fake_pred[:,0], fake_pred[:,1], 'g', label='Predicted')
ax.plot(x_obs[0,:], y_obs[0,:], 'b', label='Observed')
ax.plot(x_pred[0,:], y_pred[0,:], 'r', label='Real Pred 1')
if args.dataset_name != 'straight':
ax.plot(x_pred[1,:], y_pred[1,:], 'r', label='Real Pred 2')
ax.plot(x_pred[2,:], y_pred[2,:], 'r', label='Real Pred 3')
if args.dataset_name != 'threeTraj' and args.dataset_name != 'uneqthreeTraj':
ax.plot(x_pred[3,:], y_pred[3,:], 'r', label='Real Pred 4')
ax.plot(x_pred[4,:], y_pred[4,:], 'r', label='Real Pred 5')
if args.dataset_name != 'fiveTraj':
ax.plot(x_pred[5,:], y_pred[5,:], 'r', label='Real Pred 4')
ax.plot(x_pred[6,:], y_pred[6,:], 'r', label='Real Pred 5')
else:
# pass
ax.plot(fake_pred[:,0], fake_pred[:,1], 'g')
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
HackerRank/Problem Solving/Drawing Book/drawing-book.py
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'pageCount' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER n
# 2. INTEGER p
#
def pageCount(n, p):
return min(p//2, n//2 - p//2)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
p = int(input().strip())
result = pageCount(n, p)
fptr.write(str(result) + '\n')
fptr.close()
|
[] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
python
| 1 | 0 | |
pkg/cmdutil/repo_override.go
|
package cmdutil
import (
"os"
"sort"
"strings"
"github.com/scmn-dev/gh-api/core/ghrepo"
"github.com/spf13/cobra"
)
func executeParentHooks(cmd *cobra.Command, args []string) error {
for cmd.HasParent() {
cmd = cmd.Parent()
if cmd.PersistentPreRunE != nil {
return cmd.PersistentPreRunE(cmd, args)
}
}
return nil
}
func EnableRepoOverride(cmd *cobra.Command, f *Factory) {
cmd.PersistentFlags().StringP("repo", "R", "", "Select another repository using the `[HOST/]OWNER/REPO` format")
_ = cmd.RegisterFlagCompletionFunc("repo", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
remotes, err := f.Remotes()
if err != nil {
return nil, cobra.ShellCompDirectiveError
}
config, err := f.Config()
if err != nil {
return nil, cobra.ShellCompDirectiveError
}
defaultHost, err := config.DefaultHost()
if err != nil {
return nil, cobra.ShellCompDirectiveError
}
var results []string
for _, remote := range remotes {
repo := remote.RepoOwner() + "/" + remote.RepoName()
if !strings.EqualFold(remote.RepoHost(), defaultHost) {
repo = remote.RepoHost() + "/" + repo
}
if strings.HasPrefix(repo, toComplete) {
results = append(results, repo)
}
}
sort.Strings(results)
return results, cobra.ShellCompDirectiveNoFileComp
})
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
if err := executeParentHooks(cmd, args); err != nil {
return err
}
repoOverride, _ := cmd.Flags().GetString("repo")
f.BaseRepo = OverrideBaseRepoFunc(f, repoOverride)
return nil
}
}
func OverrideBaseRepoFunc(f *Factory, override string) func() (ghrepo.Interface, error) {
if override == "" {
override = os.Getenv("GH_REPO")
}
if override != "" {
return func() (ghrepo.Interface, error) {
return ghrepo.FromFullName(override)
}
}
return f.BaseRepo
}
|
[
"\"GH_REPO\""
] |
[] |
[
"GH_REPO"
] |
[]
|
["GH_REPO"]
|
go
| 1 | 0 | |
jrpc2/jsonrpc2.go
|
package jrpc2
import (
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"os"
"reflect"
"strconv"
"strings"
)
const specVersion = "2.0"
const ParseError = -32700
const InvalidRequest = -32600
const MethodNotFound = -32601
const InvalidParams = -32603
const InternalErr = -32603
// ids for JSON-RPC v2 can be a string, an integer
// or null. We use the pointer type in Request to
// simulate a null value for JSON mapping; this
// struct manages the rest. for giggles, we map all Ids
// to strings, but in the case of this being something
// that's populated from an incoming request, we need to maintain the
// 'actual' type of it so when we send it back over the
// wire, we don't confuse the other side.
type Id struct {
intVal uint32
strVal string
}
func (id Id) MarshalJSON() ([]byte, error) {
if id.strVal != "" {
return json.Marshal(id.strVal)
}
return json.Marshal(id.intVal)
}
func (id *Id) UnmarshalJSON(data []byte) error {
// check first character...
if len(data) == 0 {
return NewError(nil, ParseError, "no data provided")
}
switch rune(data[0]) {
case '"':
if data[len(data)-1] != '"' {
return NewError(nil, ParseError, "Parse error")
}
id.strVal = string(data[1 : len(data)-1])
return nil
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
val, err := strconv.ParseUint(string(data), 10, 32)
if err != nil {
return NewError(nil, InvalidRequest, fmt.Sprintf("Invalid Id value: %s", string(data)))
}
id.intVal = uint32(val)
return nil
case '{': // objects not allowed!
fallthrough
case '[': // arrays not allowed!
fallthrough
default:
return NewError(nil, InvalidRequest, fmt.Sprintf("Invalid Id value: %s", string(data)))
}
}
func (id *Id) Val() string {
return id.String()
}
func (id Id) String() string {
if id.strVal != "" {
return id.strVal
}
return strconv.FormatInt(int64(id.intVal), 10)
}
func NewId(val string) *Id {
return &Id{
strVal: val,
}
}
func NewIdAsInt(val uint32) *Id {
return &Id{
intVal: val,
}
}
// Models for the model gods
type Request struct {
Id *Id `json:"id,omitempty"`
Method Method `json:"-"`
}
type Method interface {
Name() string
}
// Responses are sent by the Server
type Response struct {
Result Result `json:"result,omitempty"`
Error *RpcError `json:"error,omitempty"`
Id *Id `json:"id"`
}
// RawResponses are what the client gets back
// from an RPC call.
// Leaving raw json around is kind of hacky,
// until you realize how clean it is from a parsing
// perspective
type RawResponse struct {
Id *Id `json:"id"`
Raw json.RawMessage `json:"-"`
Error *RpcError `json:"error,omitempty"`
}
type Result interface{}
type RpcError struct {
Code int `json:"code"`
Message string `json:"message"`
Data json.RawMessage `json:"data,omitempty"`
}
// provide your own object to parse this with! ehehe
func (e *RpcError) ParseData(into interface{}) error {
return json.Unmarshal(e.Data, into)
}
func (e *RpcError) Error() string {
return fmt.Sprintf("%d:%s", e.Code, e.Message)
}
// What we really want is the parameter values off of
// the Method object
// called on the client side
func (r *Request) MarshalJSON() ([]byte, error) {
type Alias Request
return json.Marshal(&struct {
Version string `json:"jsonrpc"`
Name string `json:"method"`
Params map[string]interface{} `json:"params"`
*Alias
}{
Alias: (*Alias)(r),
Params: GetNamedParams(r.Method),
Version: specVersion,
Name: r.Method.Name(),
})
}
type CodedError struct {
Id *Id
Code int
Msg string
}
func (e CodedError) Error() string {
return fmt.Sprintf("%d: %s", e.Code, e.Msg)
}
func NewError(id *Id, code int, msg string) *CodedError {
return &CodedError{id, code, msg}
}
func (r *Request) UnmarshalJSON(data []byte) error {
panic("You can't unmarshal a request")
}
func (r *Response) MarshalJSON() ([]byte, error) {
type Alias Response
return json.Marshal(&struct {
Version string `json:"jsonrpc"`
*Alias
}{
Version: specVersion,
Alias: (*Alias)(r),
})
}
func (r *Response) UnmarshalJSON(data []byte) error {
type Alias Response
raw := &struct {
Version string `json:"jsonrpc"`
*Alias
}{
Alias: (*Alias)(r),
}
err := json.Unmarshal(data, &raw)
if err != nil {
return err
}
if r.Error == nil && r.Result == nil {
return errors.New("Must send either a result or an error in a response")
}
return nil
// note that I can't really do anything wrt
// the Result type at this stage, because
// I no longer have access to the Method,
// which contains the Result's type info
}
func (r *RawResponse) MarshalJSON() ([]byte, error) {
type Alias RawResponse
return json.Marshal(&struct {
Version string `json:"jsonrpc"`
Result json.RawMessage `json:"result,omitempty"`
*Alias
}{
Version: specVersion,
Result: r.Raw,
Alias: (*Alias)(r),
})
}
func (r *RawResponse) UnmarshalJSON(data []byte) error {
type Alias RawResponse
raw := &struct {
Version string `json:"jsonrpc"`
Result json.RawMessage `json:"result,omitempty"`
*Alias
}{
Alias: (*Alias)(r),
}
err := json.Unmarshal(data, &raw)
if err != nil {
return err
}
// map these together
r.Raw = raw.Result
if len(r.Raw) == 0 && r.Error == nil {
return errors.New("Must send either a result or an error in a response")
}
return nil
}
func GetParams(target Method) []interface{} {
params := make([]interface{}, 0)
v := reflect.ValueOf(target)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
typeOf := v.Type()
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
fType := typeOf.Field(i)
if !field.CanInterface() {
continue
}
tag, _ := fType.Tag.Lookup("json")
if _, omit := parseTag(tag); omit && isZero(field.Interface()) {
continue
}
params = append(params, field.Interface())
}
return params
}
func parseTag(tag string) (name string, omitempty bool) {
omitempty = false
name = ""
if tag == "" || tag == "-" {
return name, omitempty
}
for i, field := range strings.Split(tag, ",") {
if field == "omitempty" {
omitempty = true
}
if i == 0 && field != "omitempty" {
name = field
}
}
return name, omitempty
}
func GetNamedParams(target Method) map[string]interface{} {
params := make(map[string]interface{})
v := reflect.ValueOf(target)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
typeOf := v.Type()
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
fType := typeOf.Field(i)
if !field.CanInterface() {
continue
}
// if field is empty and has an 'omitempty' tag, leave it out
var name string
tag, ok := fType.Tag.Lookup("json")
if ok {
var omit bool
name, omit = parseTag(tag)
if omit && isZero(field.Interface()) {
continue
}
if name == "" {
name = strings.ToLower(fType.Name)
}
} else {
name = strings.ToLower(fType.Name)
}
params[name] = field.Interface()
}
return params
}
func isZero(x interface{}) bool {
return reflect.DeepEqual(x, reflect.Zero(reflect.TypeOf(x)).Interface())
}
// Map passed in params to the fields on the method, in listed order
func ParseParamArray(target Method, params []interface{}) error {
targetValue := reflect.Indirect(reflect.ValueOf(target))
fieldCount := targetFieldCount(targetValue)
if fieldCount < len(params) {
return errors.New(fmt.Sprintf("Too many parameters. Expected %d, received %d. See `help %s` for expected usage", fieldCount, len(params), target.Name()))
}
for i := range params {
// it's possible that there's a mismatch between
// 'settable' fields on the target and the params
// that we've received. for simplicity's sake,
// if you don't put all of your param names at the top
// of your object, well that's your problem.
fVal := targetValue.Field(i)
value := params[i]
err := innerParse(targetValue, fVal, value)
if err != nil {
return err
}
}
return nil
}
// We assume that all interfaceable fields are in the right place.
// This lets us ignore non-interfaceable fields though.
func targetFieldCount(fieldVal reflect.Value) int {
count := 0
for i := 0; i < fieldVal.NumField(); i++ {
if fieldVal.Field(i).CanInterface() {
count++
}
}
return count
}
func ParseNamedParams(target Method, params map[string]interface{}) error {
targetValue := reflect.Indirect(reflect.ValueOf(target))
return innerParseNamed(targetValue, params)
}
func innerParseNamed(targetValue reflect.Value, params map[string]interface{}) error {
tType := targetValue.Type()
for key, value := range params {
found := false
for i := 0; i < targetValue.NumField(); i++ {
fVal := targetValue.Field(i)
if !fVal.CanSet() {
continue
}
fT := tType.Field(i)
// check for the json tag match, as well a simple
// lower case name match
tag, _ := fT.Tag.Lookup("json")
if tag == key || key == strings.ToLower(fT.Name) {
found = true
err := innerParse(targetValue, fVal, value)
if err != nil {
return err
}
break
}
}
if !found && len(os.Getenv("GOLIGHT_STRICT_MODE")) > 0 {
return NewError(nil, InvalidParams, fmt.Sprintf("No exported field found %s.%s", targetValue.Type().Name(), key))
}
}
return nil
}
func innerParse(targetValue reflect.Value, fVal reflect.Value, value interface{}) error {
if !fVal.CanSet() {
var name string
if fVal.IsValid() {
name = fVal.Type().Name()
} else {
name = "<unknown>"
}
return NewError(nil, InvalidParams, fmt.Sprintf("Field %s.%s isn't settable. Are you sure it's exported?", targetValue.Type().Name(), name))
}
v := reflect.ValueOf(value)
if fVal.Kind() == v.Kind() &&
fVal.Kind() != reflect.Map &&
fVal.Kind() != reflect.Slice {
fVal.Set(v)
return nil
}
// json.RawMessage escape hatch
var eg json.RawMessage
if fVal.Type() == reflect.TypeOf(eg) {
out, err := json.Marshal(value)
if err != nil {
return err
}
jm := json.RawMessage(out)
fVal.Set(reflect.ValueOf(jm))
return nil
}
switch fVal.Kind() {
case reflect.Map:
fVal.Set(reflect.MakeMap(fVal.Type()))
// the only types of maps that we can get thru the json
// parser are map[string]interface{} ones
mapVal := value.(map[string]interface{})
keyType := fVal.Type().Key()
for key, entry := range mapVal {
eV := reflect.New(fVal.Type().Elem()).Elem()
kV := reflect.ValueOf(key).Convert(keyType)
err := innerParse(targetValue, eV, entry)
if err != nil {
return err
}
fVal.SetMapIndex(kV, eV)
}
return nil
case reflect.Slice:
// string -> []byte parsing
sv, sok := value.(string)
var xx []uint8
if sok && fVal.Type() == reflect.TypeOf(xx) {
// actually, we assume the string is
// a hexstring becasuse ... yikes.
// fixme: better would be to have a 'hexstring' type
av, err := hex.DecodeString(sv)
if err != nil {
return err
}
fVal.Set(reflect.ValueOf(av))
return nil
}
av := value.([]interface{})
fVal.Set(reflect.MakeSlice(fVal.Type(), len(av), len(av)))
for i := range av {
err := innerParse(targetValue, fVal.Index(i), av[i])
if err != nil {
return err
}
}
return nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
// float32 won't happen because of the json parser we're using
if v.Type().Kind() != reflect.Float64 {
return NewError(nil, InvalidParams, fmt.Sprintf("Expecting float64 input for %s.%s, but got %s", targetValue.Type().Name(), fVal.Type().Name(), v.Type()))
}
// Since our json parser (encoding/json) automatically defaults any
// 'number' field to a float64, here we mangle mash it back into
// an int field, since that's what we ostensibly wanted.
//
// there's probably a nicer way to do this but
// the json/encoding lib checks for 'fitablilty' while
// decoding so we don't have to worry about
// an overflow here :D
switch fVal.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
fVal.SetInt(int64(value.(float64)))
return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
fVal.SetUint(uint64(value.(float64)))
return nil
}
case reflect.Ptr:
if v.Kind() == reflect.Invalid {
// i'm afraid that's a nil, my dear
return nil
}
if v.Kind() != reflect.Map {
return NewError(nil, InvalidParams, fmt.Sprintf("Types don't match. Expected a map[string]interface{} from the JSON, instead got %s", v.Kind().String()))
}
if fVal.IsNil() {
// You need a new pointer object thing here
// so allocate one with this voodoo-magique
fVal.Set(reflect.New(fVal.Type().Elem()))
}
return innerParseNamed(fVal.Elem(), value.(map[string]interface{}))
case reflect.Struct:
if v.Kind() != reflect.Map {
return NewError(nil, InvalidParams, fmt.Sprintf("Types don't match. Expected a map[string]interface{} from the JSON, instead got %s", v.Kind().String()))
}
return innerParseNamed(fVal, value.(map[string]interface{}))
}
return NewError(nil, InvalidParams, fmt.Sprintf("Incompatible types: %s.%s (%s) != %s", targetValue.Type().Name(), fVal.Type().Name(), fVal.Kind(), v.Type().Kind()))
}
|
[
"\"GOLIGHT_STRICT_MODE\""
] |
[] |
[
"GOLIGHT_STRICT_MODE"
] |
[]
|
["GOLIGHT_STRICT_MODE"]
|
go
| 1 | 0 | |
forward.py
|
from convert import print_prob, load_image, checkpoint_fn, meta_fn
import tensorflow as tf
import os
layers = 50
img = load_image("data/cat.jpg")
sess = tf.Session()
dirname = os.path.join(os.environ['TV_DIR_DATA'], 'weights')
filename = meta_fn(layers)
filename = os.path.join(dirname, filename)
new_saver = tf.train.import_meta_graph(filename)
new_saver.restore(sess, filename)
graph = tf.get_default_graph()
prob_tensor = graph.get_tensor_by_name("prob:0")
images = graph.get_tensor_by_name("images:0")
for op in graph.get_operations():
print op.name
print "graph restored"
batch = img.reshape((1, 224, 224, 3))
feed_dict = {images: batch}
prob = sess.run(prob_tensor, feed_dict=feed_dict)
print_prob(prob[0])
|
[] |
[] |
[
"TV_DIR_DATA"
] |
[]
|
["TV_DIR_DATA"]
|
python
| 1 | 0 | |
cmd/aws-s3-proxy/main.go
|
package main
import (
"fmt"
"log"
"net"
"net/http"
"os"
"github.com/go-openapi/swag"
"github.com/pottava/aws-s3-proxy/internal/config"
"github.com/pottava/aws-s3-proxy/internal/controllers"
common "github.com/pottava/aws-s3-proxy/internal/http"
"github.com/pottava/aws-s3-proxy/internal/service"
)
var (
Ver = "dev"
Commit string
Date string
)
func main() {
validateAwsConfigurations()
http.Handle("/", common.WrapHandler(controllers.AwsS3))
http.HandleFunc("/--version", func(w http.ResponseWriter, r *http.Request) {
if len(Commit) > 0 && len(Date) > 0 {
fmt.Fprintf(w, "%s-%s (built at %s)\n", Ver, Commit, Date)
return
}
fmt.Fprintln(w, Ver)
})
// Listen & Serve
addr := net.JoinHostPort(config.Config.Host, config.Config.Port)
log.Printf("[service] listening on %s", addr)
if (len(config.Config.SslCert) > 0) && (len(config.Config.SslKey) > 0) {
log.Fatal(http.ListenAndServeTLS(
addr, config.Config.SslCert, config.Config.SslKey, nil,
))
} else {
log.Fatal(http.ListenAndServe(addr, nil))
}
}
func validateAwsConfigurations() {
if len(os.Getenv("AWS_ACCESS_KEY_ID")) == 0 {
log.Print("Not defined environment variable: AWS_ACCESS_KEY_ID")
}
if len(os.Getenv("AWS_SECRET_ACCESS_KEY")) == 0 {
log.Print("Not defined environment variable: AWS_SECRET_ACCESS_KEY")
}
if len(os.Getenv("AWS_S3_BUCKET")) == 0 {
log.Fatal("Missing required environment variable: AWS_S3_BUCKET")
}
if swag.IsZero(config.Config.AwsRegion) {
config.Config.AwsRegion = "us-east-1"
if region, err := service.GuessBucketRegion(config.Config.S3Bucket); err == nil {
config.Config.AwsRegion = region
}
}
}
|
[
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\"",
"\"AWS_S3_BUCKET\""
] |
[] |
[
"AWS_S3_BUCKET",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY"
] |
[]
|
["AWS_S3_BUCKET", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
|
go
| 3 | 0 | |
imagebuilder/vendor/github.com/aws/aws-sdk-go/private/model/api/api.go
|
// Package api represents API abstractions for rendering service generated files.
package api
import (
"bytes"
"fmt"
"path"
"regexp"
"sort"
"strings"
"text/template"
)
// An API defines a service API's definition. and logic to serialize the definition.
type API struct {
Metadata Metadata
Operations map[string]*Operation
Shapes map[string]*Shape
Waiters []Waiter
Documentation string
// Set to true to avoid removing unused shapes
NoRemoveUnusedShapes bool
// Set to true to avoid renaming to 'Input/Output' postfixed shapes
NoRenameToplevelShapes bool
// Set to true to ignore service/request init methods (for testing)
NoInitMethods bool
// Set to true to ignore String() and GoString methods (for generated tests)
NoStringerMethods bool
// Set to true to not generate API service name constants
NoConstServiceNames bool
// Set to true to not generate validation shapes
NoValidataShapeMethods bool
SvcClientImportPath string
initialized bool
imports map[string]bool
name string
path string
}
// A Metadata is the metadata about an API's definition.
type Metadata struct {
APIVersion string
EndpointPrefix string
SigningName string
ServiceAbbreviation string
ServiceFullName string
SignatureVersion string
JSONVersion string
TargetPrefix string
Protocol string
}
// PackageName name of the API package
func (a *API) PackageName() string {
return strings.ToLower(a.StructName())
}
// InterfacePackageName returns the package name for the interface.
func (a *API) InterfacePackageName() string {
return a.PackageName() + "iface"
}
var nameRegex = regexp.MustCompile(`^Amazon|AWS\s*|\(.*|\s+|\W+`)
// StructName returns the struct name for a given API.
func (a *API) StructName() string {
if a.name == "" {
name := a.Metadata.ServiceAbbreviation
if name == "" {
name = a.Metadata.ServiceFullName
}
name = nameRegex.ReplaceAllString(name, "")
switch name {
case "ElasticLoadBalancing":
a.name = "ELB"
case "Config":
a.name = "ConfigService"
default:
a.name = name
}
}
return a.name
}
// UseInitMethods returns if the service's init method should be rendered.
func (a *API) UseInitMethods() bool {
return !a.NoInitMethods
}
// NiceName returns the human friendly API name.
func (a *API) NiceName() string {
if a.Metadata.ServiceAbbreviation != "" {
return a.Metadata.ServiceAbbreviation
}
return a.Metadata.ServiceFullName
}
// ProtocolPackage returns the package name of the protocol this API uses.
func (a *API) ProtocolPackage() string {
switch a.Metadata.Protocol {
case "json":
return "jsonrpc"
case "ec2":
return "ec2query"
default:
return strings.Replace(a.Metadata.Protocol, "-", "", -1)
}
}
// OperationNames returns a slice of API operations supported.
func (a *API) OperationNames() []string {
i, names := 0, make([]string, len(a.Operations))
for n := range a.Operations {
names[i] = n
i++
}
sort.Strings(names)
return names
}
// OperationList returns a slice of API operation pointers
func (a *API) OperationList() []*Operation {
list := make([]*Operation, len(a.Operations))
for i, n := range a.OperationNames() {
list[i] = a.Operations[n]
}
return list
}
// OperationHasOutputPlaceholder returns if any of the API operation input
// or output shapes are place holders.
func (a *API) OperationHasOutputPlaceholder() bool {
for _, op := range a.Operations {
if op.OutputRef.Shape.Placeholder {
return true
}
}
return false
}
// ShapeNames returns a slice of names for each shape used by the API.
func (a *API) ShapeNames() []string {
i, names := 0, make([]string, len(a.Shapes))
for n := range a.Shapes {
names[i] = n
i++
}
sort.Strings(names)
return names
}
// ShapeList returns a slice of shape pointers used by the API.
func (a *API) ShapeList() []*Shape {
list := make([]*Shape, len(a.Shapes))
for i, n := range a.ShapeNames() {
list[i] = a.Shapes[n]
}
return list
}
// resetImports resets the import map to default values.
func (a *API) resetImports() {
a.imports = map[string]bool{
"github.com/aws/aws-sdk-go/aws": true,
}
}
// importsGoCode returns the generated Go import code.
func (a *API) importsGoCode() string {
if len(a.imports) == 0 {
return ""
}
corePkgs, extPkgs := []string{}, []string{}
for i := range a.imports {
if strings.Contains(i, ".") {
extPkgs = append(extPkgs, i)
} else {
corePkgs = append(corePkgs, i)
}
}
sort.Strings(corePkgs)
sort.Strings(extPkgs)
code := "import (\n"
for _, i := range corePkgs {
code += fmt.Sprintf("\t%q\n", i)
}
if len(corePkgs) > 0 {
code += "\n"
}
for _, i := range extPkgs {
code += fmt.Sprintf("\t%q\n", i)
}
code += ")\n\n"
return code
}
// A tplAPI is the top level template for the API
var tplAPI = template.Must(template.New("api").Parse(`
{{ range $_, $o := .OperationList }}
{{ $o.GoCode }}
{{ end }}
{{ range $_, $s := .ShapeList }}
{{ if and $s.IsInternal (eq $s.Type "structure") }}{{ $s.GoCode }}{{ end }}
{{ end }}
{{ range $_, $s := .ShapeList }}
{{ if $s.IsEnum }}{{ $s.GoCode }}{{ end }}
{{ end }}
`))
// APIGoCode renders the API in Go code. Returning it as a string
func (a *API) APIGoCode() string {
a.resetImports()
delete(a.imports, "github.com/aws/aws-sdk-go/aws")
a.imports["github.com/aws/aws-sdk-go/aws/awsutil"] = true
a.imports["github.com/aws/aws-sdk-go/aws/request"] = true
if a.OperationHasOutputPlaceholder() {
a.imports["github.com/aws/aws-sdk-go/private/protocol/"+a.ProtocolPackage()] = true
a.imports["github.com/aws/aws-sdk-go/private/protocol"] = true
}
for _, op := range a.Operations {
if op.AuthType == "none" {
a.imports["github.com/aws/aws-sdk-go/aws/credentials"] = true
break
}
}
var buf bytes.Buffer
err := tplAPI.Execute(&buf, a)
if err != nil {
panic(err)
}
code := a.importsGoCode() + strings.TrimSpace(buf.String())
return code
}
// A tplService defines the template for the service generated code.
var tplService = template.Must(template.New("service").Parse(`
{{ .Documentation }}//The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type {{ .StructName }} struct {
*client.Client
}
{{ if .UseInitMethods }}// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
{{ end }}
{{ if not .NoConstServiceNames }}
// A ServiceName is the name of the service the client will make API calls to.
const ServiceName = "{{ .Metadata.EndpointPrefix }}"
{{ end }}
// New creates a new instance of the {{ .StructName }} client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a {{ .StructName }} client from just a session.
// svc := {{ .PackageName }}.New(mySession)
//
// // Create a {{ .StructName }} client with additional configuration
// svc := {{ .PackageName }}.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *{{ .StructName }} {
c := p.ClientConfig({{ if .NoConstServiceNames }}"{{ .Metadata.EndpointPrefix }}"{{ else }}ServiceName{{ end }}, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *{{ .StructName }} {
svc := &{{ .StructName }}{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: {{ if .NoConstServiceNames }}"{{ .Metadata.EndpointPrefix }}"{{ else }}ServiceName{{ end }}, {{ if ne .Metadata.SigningName "" }}
SigningName: "{{ .Metadata.SigningName }}",{{ end }}
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "{{ .Metadata.APIVersion }}",
{{ if eq .Metadata.Protocol "json" }}JSONVersion: "{{ .Metadata.JSONVersion }}",
TargetPrefix: "{{ .Metadata.TargetPrefix }}",
{{ end }}
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed({{if eq .Metadata.SignatureVersion "v2"}}v2{{else}}v4{{end}}.SignRequestHandler)
{{if eq .Metadata.SignatureVersion "v2"}}svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
{{end}}svc.Handlers.Build.PushBackNamed({{ .ProtocolPackage }}.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed({{ .ProtocolPackage }}.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed({{ .ProtocolPackage }}.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed({{ .ProtocolPackage }}.UnmarshalErrorHandler)
{{ if .UseInitMethods }}// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
{{ end }}
return svc
}
// newRequest creates a new request for a {{ .StructName }} operation and runs any
// custom request initialization.
func (c *{{ .StructName }}) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
{{ if .UseInitMethods }}// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
{{ end }}
return req
}
`))
// ServiceGoCode renders service go code. Returning it as a string.
func (a *API) ServiceGoCode() string {
a.resetImports()
a.imports["github.com/aws/aws-sdk-go/aws/client"] = true
a.imports["github.com/aws/aws-sdk-go/aws/client/metadata"] = true
a.imports["github.com/aws/aws-sdk-go/aws/request"] = true
if a.Metadata.SignatureVersion == "v2" {
a.imports["github.com/aws/aws-sdk-go/private/signer/v2"] = true
a.imports["github.com/aws/aws-sdk-go/aws/corehandlers"] = true
} else {
a.imports["github.com/aws/aws-sdk-go/aws/signer/v4"] = true
}
a.imports["github.com/aws/aws-sdk-go/private/protocol/"+a.ProtocolPackage()] = true
var buf bytes.Buffer
err := tplService.Execute(&buf, a)
if err != nil {
panic(err)
}
code := a.importsGoCode() + buf.String()
return code
}
// ExampleGoCode renders service example code. Returning it as a string.
func (a *API) ExampleGoCode() string {
exs := []string{}
for _, o := range a.OperationList() {
exs = append(exs, o.Example())
}
code := fmt.Sprintf("import (\n%q\n%q\n%q\n\n%q\n%q\n%q\n)\n\n"+
"var _ time.Duration\nvar _ bytes.Buffer\n\n%s",
"bytes",
"fmt",
"time",
"github.com/aws/aws-sdk-go/aws",
"github.com/aws/aws-sdk-go/aws/session",
path.Join(a.SvcClientImportPath, a.PackageName()),
strings.Join(exs, "\n\n"),
)
return code
}
// A tplInterface defines the template for the service interface type.
var tplInterface = template.Must(template.New("interface").Parse(`
// {{ .StructName }}API is the interface type for {{ .PackageName }}.{{ .StructName }}.
type {{ .StructName }}API interface {
{{ range $_, $o := .OperationList }}
{{ $o.InterfaceSignature }}
{{ end }}
}
var _ {{ .StructName }}API = (*{{ .PackageName }}.{{ .StructName }})(nil)
`))
// InterfaceGoCode returns the go code for the service's API operations as an
// interface{}. Assumes that the interface is being created in a different
// package than the service API's package.
func (a *API) InterfaceGoCode() string {
a.resetImports()
a.imports = map[string]bool{
"github.com/aws/aws-sdk-go/aws/request": true,
path.Join(a.SvcClientImportPath, a.PackageName()): true,
}
var buf bytes.Buffer
err := tplInterface.Execute(&buf, a)
if err != nil {
panic(err)
}
code := a.importsGoCode() + strings.TrimSpace(buf.String())
return code
}
// NewAPIGoCodeWithPkgName returns a string of instantiating the API prefixed
// with its package name. Takes a string depicting the Config.
func (a *API) NewAPIGoCodeWithPkgName(cfg string) string {
return fmt.Sprintf("%s.New(%s)", a.PackageName(), cfg)
}
// computes the validation chain for all input shapes
func (a *API) addShapeValidations() {
for _, o := range a.Operations {
resolveShapeValidations(o.InputRef.Shape)
}
}
// Updates the source shape and all nested shapes with the validations that
// could possibly be needed.
func resolveShapeValidations(s *Shape, ancestry ...*Shape) {
for _, a := range ancestry {
if a == s {
return
}
}
children := []string{}
for _, name := range s.MemberNames() {
ref := s.MemberRefs[name]
if s.IsRequired(name) && !s.Validations.Has(ref, ShapeValidationRequired) {
s.Validations = append(s.Validations, ShapeValidation{
Name: name, Ref: ref, Type: ShapeValidationRequired,
})
}
if ref.Shape.Min != 0 && !s.Validations.Has(ref, ShapeValidationMinVal) {
s.Validations = append(s.Validations, ShapeValidation{
Name: name, Ref: ref, Type: ShapeValidationMinVal,
})
}
switch ref.Shape.Type {
case "map", "list", "structure":
children = append(children, name)
}
}
ancestry = append(ancestry, s)
for _, name := range children {
ref := s.MemberRefs[name]
nestedShape := ref.Shape.NestedShape()
var v *ShapeValidation
if len(nestedShape.Validations) > 0 {
v = &ShapeValidation{
Name: name, Ref: ref, Type: ShapeValidationNested,
}
} else {
resolveShapeValidations(nestedShape, ancestry...)
if len(nestedShape.Validations) > 0 {
v = &ShapeValidation{
Name: name, Ref: ref, Type: ShapeValidationNested,
}
}
}
if v != nil && !s.Validations.Has(v.Ref, v.Type) {
s.Validations = append(s.Validations, *v)
}
}
ancestry = ancestry[:len(ancestry)-1]
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
batch_test.go
|
package pgx_test
import (
"context"
"os"
"testing"
"github.com/cdn77/pgconn/v77"
"github.com/cdn77/pgconn/v77/stmtcache"
"github.com/cdn77/pgx/v77"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConnSendBatch(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
skipCockroachDB(t, conn, "Server serial type is incompatible with test")
sql := `create temporary table ledger(
id serial primary key,
description varchar not null,
amount int not null
);`
mustExec(t, conn, sql)
batch := &pgx.Batch{}
batch.Queue("insert into ledger(description, amount) values($1, $2)", "q1", 1)
batch.Queue("insert into ledger(description, amount) values($1, $2)", "q2", 2)
batch.Queue("insert into ledger(description, amount) values($1, $2)", "q3", 3)
batch.Queue("select id, description, amount from ledger order by id")
batch.Queue("select sum(amount) from ledger")
br := conn.SendBatch(context.Background(), batch)
ct, err := br.Exec()
if err != nil {
t.Error(err)
}
if ct.RowsAffected() != 1 {
t.Errorf("ct.RowsAffected() => %v, want %v", ct.RowsAffected(), 1)
}
ct, err = br.Exec()
if err != nil {
t.Error(err)
}
if ct.RowsAffected() != 1 {
t.Errorf("ct.RowsAffected() => %v, want %v", ct.RowsAffected(), 1)
}
ct, err = br.Exec()
if err != nil {
t.Error(err)
}
if ct.RowsAffected() != 1 {
t.Errorf("ct.RowsAffected() => %v, want %v", ct.RowsAffected(), 1)
}
rows, err := br.Query()
if err != nil {
t.Error(err)
}
var id int32
var description string
var amount int32
if !rows.Next() {
t.Fatal("expected a row to be available")
}
if err := rows.Scan(&id, &description, &amount); err != nil {
t.Fatal(err)
}
if id != 1 {
t.Errorf("id => %v, want %v", id, 1)
}
if description != "q1" {
t.Errorf("description => %v, want %v", description, "q1")
}
if amount != 1 {
t.Errorf("amount => %v, want %v", amount, 1)
}
if !rows.Next() {
t.Fatal("expected a row to be available")
}
if err := rows.Scan(&id, &description, &amount); err != nil {
t.Fatal(err)
}
if id != 2 {
t.Errorf("id => %v, want %v", id, 2)
}
if description != "q2" {
t.Errorf("description => %v, want %v", description, "q2")
}
if amount != 2 {
t.Errorf("amount => %v, want %v", amount, 2)
}
if !rows.Next() {
t.Fatal("expected a row to be available")
}
if err := rows.Scan(&id, &description, &amount); err != nil {
t.Fatal(err)
}
if id != 3 {
t.Errorf("id => %v, want %v", id, 3)
}
if description != "q3" {
t.Errorf("description => %v, want %v", description, "q3")
}
if amount != 3 {
t.Errorf("amount => %v, want %v", amount, 3)
}
if rows.Next() {
t.Fatal("did not expect a row to be available")
}
if rows.Err() != nil {
t.Fatal(rows.Err())
}
err = br.QueryRow().Scan(&amount)
if err != nil {
t.Error(err)
}
if amount != 6 {
t.Errorf("amount => %v, want %v", amount, 6)
}
err = br.Close()
if err != nil {
t.Fatal(err)
}
ensureConnValid(t, conn)
}
func TestConnSendBatchMany(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
sql := `create temporary table ledger(
id serial primary key,
description varchar not null,
amount int not null
);`
mustExec(t, conn, sql)
batch := &pgx.Batch{}
numInserts := 1000
for i := 0; i < numInserts; i++ {
batch.Queue("insert into ledger(description, amount) values($1, $2)", "q1", 1)
}
batch.Queue("select count(*) from ledger")
br := conn.SendBatch(context.Background(), batch)
for i := 0; i < numInserts; i++ {
ct, err := br.Exec()
assert.NoError(t, err)
assert.EqualValues(t, 1, ct.RowsAffected())
}
var actualInserts int
err := br.QueryRow().Scan(&actualInserts)
assert.NoError(t, err)
assert.EqualValues(t, numInserts, actualInserts)
err = br.Close()
require.NoError(t, err)
ensureConnValid(t, conn)
}
func TestConnSendBatchWithPreparedStatement(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
skipCockroachDB(t, conn, "Server issues incorrect ParameterDescription (https://github.com/cockroachdb/cockroach/issues/60907)")
_, err := conn.Prepare(context.Background(), "ps1", "select n from generate_series(0,$1::int) n")
if err != nil {
t.Fatal(err)
}
batch := &pgx.Batch{}
queryCount := 3
for i := 0; i < queryCount; i++ {
batch.Queue("ps1", 5)
}
br := conn.SendBatch(context.Background(), batch)
for i := 0; i < queryCount; i++ {
rows, err := br.Query()
if err != nil {
t.Fatal(err)
}
for k := 0; rows.Next(); k++ {
var n int
if err := rows.Scan(&n); err != nil {
t.Fatal(err)
}
if n != k {
t.Fatalf("n => %v, want %v", n, k)
}
}
if rows.Err() != nil {
t.Fatal(rows.Err())
}
}
err = br.Close()
if err != nil {
t.Fatal(err)
}
ensureConnValid(t, conn)
}
// https://github.com/jackc/pgx/issues/856
func TestConnSendBatchWithPreparedStatementAndStatementCacheDisabled(t *testing.T) {
t.Parallel()
config, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
config.BuildStatementCache = nil
conn := mustConnect(t, config)
defer closeConn(t, conn)
skipCockroachDB(t, conn, "Server issues incorrect ParameterDescription (https://github.com/cockroachdb/cockroach/issues/60907)")
_, err = conn.Prepare(context.Background(), "ps1", "select n from generate_series(0,$1::int) n")
if err != nil {
t.Fatal(err)
}
batch := &pgx.Batch{}
queryCount := 3
for i := 0; i < queryCount; i++ {
batch.Queue("ps1", 5)
}
br := conn.SendBatch(context.Background(), batch)
for i := 0; i < queryCount; i++ {
rows, err := br.Query()
if err != nil {
t.Fatal(err)
}
for k := 0; rows.Next(); k++ {
var n int
if err := rows.Scan(&n); err != nil {
t.Fatal(err)
}
if n != k {
t.Fatalf("n => %v, want %v", n, k)
}
}
if rows.Err() != nil {
t.Fatal(rows.Err())
}
}
err = br.Close()
if err != nil {
t.Fatal(err)
}
ensureConnValid(t, conn)
}
func TestConnSendBatchCloseRowsPartiallyRead(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
batch := &pgx.Batch{}
batch.Queue("select n from generate_series(0,5) n")
batch.Queue("select n from generate_series(0,5) n")
br := conn.SendBatch(context.Background(), batch)
rows, err := br.Query()
if err != nil {
t.Error(err)
}
for i := 0; i < 3; i++ {
if !rows.Next() {
t.Error("expected a row to be available")
}
var n int
if err := rows.Scan(&n); err != nil {
t.Error(err)
}
if n != i {
t.Errorf("n => %v, want %v", n, i)
}
}
rows.Close()
rows, err = br.Query()
if err != nil {
t.Error(err)
}
for i := 0; rows.Next(); i++ {
var n int
if err := rows.Scan(&n); err != nil {
t.Error(err)
}
if n != i {
t.Errorf("n => %v, want %v", n, i)
}
}
if rows.Err() != nil {
t.Error(rows.Err())
}
err = br.Close()
if err != nil {
t.Fatal(err)
}
ensureConnValid(t, conn)
}
func TestConnSendBatchQueryError(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
batch := &pgx.Batch{}
batch.Queue("select n from generate_series(0,5) n where 100/(5-n) > 0")
batch.Queue("select n from generate_series(0,5) n")
br := conn.SendBatch(context.Background(), batch)
rows, err := br.Query()
if err != nil {
t.Error(err)
}
for i := 0; rows.Next(); i++ {
var n int
if err := rows.Scan(&n); err != nil {
t.Error(err)
}
if n != i {
t.Errorf("n => %v, want %v", n, i)
}
}
if pgErr, ok := rows.Err().(*pgconn.PgError); !(ok && pgErr.Code == "22012") {
t.Errorf("rows.Err() => %v, want error code %v", rows.Err(), 22012)
}
err = br.Close()
if pgErr, ok := err.(*pgconn.PgError); !(ok && pgErr.Code == "22012") {
t.Errorf("rows.Err() => %v, want error code %v", err, 22012)
}
ensureConnValid(t, conn)
}
func TestConnSendBatchQuerySyntaxError(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
batch := &pgx.Batch{}
batch.Queue("select 1 1")
br := conn.SendBatch(context.Background(), batch)
var n int32
err := br.QueryRow().Scan(&n)
if pgErr, ok := err.(*pgconn.PgError); !(ok && pgErr.Code == "42601") {
t.Errorf("rows.Err() => %v, want error code %v", err, 42601)
}
err = br.Close()
if err == nil {
t.Error("Expected error")
}
ensureConnValid(t, conn)
}
func TestConnSendBatchQueryRowInsert(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
sql := `create temporary table ledger(
id serial primary key,
description varchar not null,
amount int not null
);`
mustExec(t, conn, sql)
batch := &pgx.Batch{}
batch.Queue("select 1")
batch.Queue("insert into ledger(description, amount) values($1, $2),($1, $2)", "q1", 1)
br := conn.SendBatch(context.Background(), batch)
var value int
err := br.QueryRow().Scan(&value)
if err != nil {
t.Error(err)
}
ct, err := br.Exec()
if err != nil {
t.Error(err)
}
if ct.RowsAffected() != 2 {
t.Errorf("ct.RowsAffected() => %v, want %v", ct.RowsAffected(), 2)
}
br.Close()
ensureConnValid(t, conn)
}
func TestConnSendBatchQueryPartialReadInsert(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
sql := `create temporary table ledger(
id serial primary key,
description varchar not null,
amount int not null
);`
mustExec(t, conn, sql)
batch := &pgx.Batch{}
batch.Queue("select 1 union all select 2 union all select 3")
batch.Queue("insert into ledger(description, amount) values($1, $2),($1, $2)", "q1", 1)
br := conn.SendBatch(context.Background(), batch)
rows, err := br.Query()
if err != nil {
t.Error(err)
}
rows.Close()
ct, err := br.Exec()
if err != nil {
t.Error(err)
}
if ct.RowsAffected() != 2 {
t.Errorf("ct.RowsAffected() => %v, want %v", ct.RowsAffected(), 2)
}
br.Close()
ensureConnValid(t, conn)
}
func TestTxSendBatch(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
sql := `create temporary table ledger1(
id serial primary key,
description varchar not null
);`
mustExec(t, conn, sql)
sql = `create temporary table ledger2(
id int primary key,
amount int not null
);`
mustExec(t, conn, sql)
tx, _ := conn.Begin(context.Background())
batch := &pgx.Batch{}
batch.Queue("insert into ledger1(description) values($1) returning id", "q1")
br := tx.SendBatch(context.Background(), batch)
var id int
err := br.QueryRow().Scan(&id)
if err != nil {
t.Error(err)
}
br.Close()
batch = &pgx.Batch{}
batch.Queue("insert into ledger2(id,amount) values($1, $2)", id, 2)
batch.Queue("select amount from ledger2 where id = $1", id)
br = tx.SendBatch(context.Background(), batch)
ct, err := br.Exec()
if err != nil {
t.Error(err)
}
if ct.RowsAffected() != 1 {
t.Errorf("ct.RowsAffected() => %v, want %v", ct.RowsAffected(), 1)
}
var amount int
err = br.QueryRow().Scan(&amount)
if err != nil {
t.Error(err)
}
br.Close()
tx.Commit(context.Background())
var count int
conn.QueryRow(context.Background(), "select count(1) from ledger1 where id = $1", id).Scan(&count)
if count != 1 {
t.Errorf("count => %v, want %v", count, 1)
}
err = br.Close()
if err != nil {
t.Fatal(err)
}
ensureConnValid(t, conn)
}
func TestTxSendBatchRollback(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
sql := `create temporary table ledger1(
id serial primary key,
description varchar not null
);`
mustExec(t, conn, sql)
tx, _ := conn.Begin(context.Background())
batch := &pgx.Batch{}
batch.Queue("insert into ledger1(description) values($1) returning id", "q1")
br := tx.SendBatch(context.Background(), batch)
var id int
err := br.QueryRow().Scan(&id)
if err != nil {
t.Error(err)
}
br.Close()
tx.Rollback(context.Background())
row := conn.QueryRow(context.Background(), "select count(1) from ledger1 where id = $1", id)
var count int
row.Scan(&count)
if count != 0 {
t.Errorf("count => %v, want %v", count, 0)
}
ensureConnValid(t, conn)
}
func TestConnBeginBatchDeferredError(t *testing.T) {
t.Parallel()
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
skipCockroachDB(t, conn, "Server does not support deferred constraint (https://github.com/cockroachdb/cockroach/issues/31632)")
mustExec(t, conn, `create temporary table t (
id text primary key,
n int not null,
unique (n) deferrable initially deferred
);
insert into t (id, n) values ('a', 1), ('b', 2), ('c', 3);`)
batch := &pgx.Batch{}
batch.Queue(`update t set n=n+1 where id='b' returning *`)
br := conn.SendBatch(context.Background(), batch)
rows, err := br.Query()
if err != nil {
t.Error(err)
}
for rows.Next() {
var id string
var n int32
err = rows.Scan(&id, &n)
if err != nil {
t.Fatal(err)
}
}
err = br.Close()
if err == nil {
t.Fatal("expected error 23505 but got none")
}
if err, ok := err.(*pgconn.PgError); !ok || err.Code != "23505" {
t.Fatalf("expected error 23505, got %v", err)
}
ensureConnValid(t, conn)
}
func TestConnSendBatchNoStatementCache(t *testing.T) {
config := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
config.BuildStatementCache = nil
conn := mustConnect(t, config)
defer closeConn(t, conn)
testConnSendBatch(t, conn, 3)
}
func TestConnSendBatchPrepareStatementCache(t *testing.T) {
config := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
return stmtcache.New(conn, stmtcache.ModePrepare, 32)
}
conn := mustConnect(t, config)
defer closeConn(t, conn)
testConnSendBatch(t, conn, 3)
}
func TestConnSendBatchDescribeStatementCache(t *testing.T) {
config := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache {
return stmtcache.New(conn, stmtcache.ModeDescribe, 32)
}
conn := mustConnect(t, config)
defer closeConn(t, conn)
testConnSendBatch(t, conn, 3)
}
func testConnSendBatch(t *testing.T, conn *pgx.Conn, queryCount int) {
batch := &pgx.Batch{}
for j := 0; j < queryCount; j++ {
batch.Queue("select n from generate_series(0,5) n")
}
br := conn.SendBatch(context.Background(), batch)
for j := 0; j < queryCount; j++ {
rows, err := br.Query()
require.NoError(t, err)
for k := 0; rows.Next(); k++ {
var n int
err := rows.Scan(&n)
require.NoError(t, err)
require.Equal(t, k, n)
}
require.NoError(t, rows.Err())
}
err := br.Close()
require.NoError(t, err)
}
func TestLogBatchStatementsOnExec(t *testing.T) {
l1 := &testLogger{}
config := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
config.Logger = l1
conn := mustConnect(t, config)
defer closeConn(t, conn)
l1.logs = l1.logs[0:0] // Clear logs written when establishing connection
batch := &pgx.Batch{}
batch.Queue("create table foo (id bigint)")
batch.Queue("drop table foo")
br := conn.SendBatch(context.Background(), batch)
_, err := br.Exec()
if err != nil {
t.Fatalf("Unexpected error creating table: %v", err)
}
_, err = br.Exec()
if err != nil {
t.Fatalf("Unexpected error dropping table: %v", err)
}
if len(l1.logs) != 2 {
t.Fatalf("Expected two log entries but got %d", len(l1.logs))
}
if l1.logs[0].msg != "BatchResult.Exec" {
t.Errorf("Expected first log message to be 'BatchResult.Exec' but was '%s", l1.logs[0].msg)
}
if l1.logs[0].data["sql"] != "create table foo (id bigint)" {
t.Errorf("Expected the first query to be 'create table foo (id bigint)' but was '%s'", l1.logs[0].data["sql"])
}
if l1.logs[1].msg != "BatchResult.Exec" {
t.Errorf("Expected second log message to be 'BatchResult.Exec' but was '%s", l1.logs[1].msg)
}
if l1.logs[1].data["sql"] != "drop table foo" {
t.Errorf("Expected the second query to be 'drop table foo' but was '%s'", l1.logs[1].data["sql"])
}
}
func TestLogBatchStatementsOnBatchResultClose(t *testing.T) {
l1 := &testLogger{}
config := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
config.Logger = l1
conn := mustConnect(t, config)
defer closeConn(t, conn)
l1.logs = l1.logs[0:0] // Clear logs written when establishing connection
batch := &pgx.Batch{}
batch.Queue("select generate_series(1,$1)", 100)
batch.Queue("select 1 = 1;")
br := conn.SendBatch(context.Background(), batch)
if err := br.Close(); err != nil {
t.Fatalf("Unexpected batch error: %v", err)
}
if len(l1.logs) != 2 {
t.Fatalf("Expected 2 log statements but found %d", len(l1.logs))
}
if l1.logs[0].msg != "BatchResult.Close" {
t.Errorf("Expected first log statement to be 'BatchResult.Close' but was %s", l1.logs[0].msg)
}
if l1.logs[0].data["sql"] != "select generate_series(1,$1)" {
t.Errorf("Expected first query to be 'select generate_series(1,$1)' but was '%s'", l1.logs[0].data["sql"])
}
if l1.logs[1].msg != "BatchResult.Close" {
t.Errorf("Expected second log statement to be 'BatchResult.Close' but was %s", l1.logs[1].msg)
}
if l1.logs[1].data["sql"] != "select 1 = 1;" {
t.Errorf("Expected second query to be 'select 1 = 1;' but was '%s'", l1.logs[1].data["sql"])
}
}
func TestSendBatchSimpleProtocol(t *testing.T) {
t.Parallel()
config := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE"))
config.PreferSimpleProtocol = true
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
conn := mustConnect(t, config)
defer closeConn(t, conn)
var batch pgx.Batch
batch.Queue("SELECT 1::int")
batch.Queue("SELECT 2::int; SELECT $1::int", 3)
results := conn.SendBatch(ctx, &batch)
rows, err := results.Query()
assert.NoError(t, err)
assert.True(t, rows.Next())
values, err := rows.Values()
assert.NoError(t, err)
assert.EqualValues(t, 1, values[0])
assert.False(t, rows.Next())
rows, err = results.Query()
assert.NoError(t, err)
assert.True(t, rows.Next())
values, err = rows.Values()
assert.NoError(t, err)
assert.EqualValues(t, 2, values[0])
assert.False(t, rows.Next())
rows, err = results.Query()
assert.NoError(t, err)
assert.True(t, rows.Next())
values, err = rows.Values()
assert.NoError(t, err)
assert.EqualValues(t, 3, values[0])
assert.False(t, rows.Next())
}
|
[
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\""
] |
[] |
[
"PGX_TEST_DATABASE"
] |
[]
|
["PGX_TEST_DATABASE"]
|
go
| 1 | 0 | |
bench_test.go
|
package main_test
import (
"crypto/rand"
"database/sql"
"encoding/hex"
"fmt"
"os"
"testing"
_ "github.com/lib/pq"
_ "github.com/iamnikolie/ramsql/driver"
)
func benchmarkInsert(b *testing.B, driver string, nbRows int) {
u := os.Getenv("PG_USER")
pwd := os.Getenv("PG_PASSWORD")
ip := os.Getenv("PG_IP")
port := os.Getenv("PG_PORT")
dsn := fmt.Sprintf("postgres://%s:%s@%s:%s/postgres?sslmode=disable", u, pwd, ip, port)
db, err := sql.Open(driver, dsn)
if err != nil {
b.Fatalf("sql.Open: %s", err)
}
db.Exec(`DROP TABLE account`)
_, err = db.Exec(`CREATE TABLE account (id BIGSERIAL PRIMARY KEY, email TEXT)`)
if err != nil {
b.Fatalf("sql.Exec: %s", err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
query := `INSERT INTO account (email) VALUES ($1)`
for i := 0; i < nbRows; i++ {
size := 32
bs := make([]byte, size)
_, err := rand.Read(bs)
if err != nil {
b.Fatalf("rand failed: %s", err)
}
str := hex.EncodeToString(bs)
token := []byte(str)[0:size]
_, err = db.Exec(query, string(token)+"@foobar.com")
if err != nil {
b.Fatalf("cannot insert rows: %s", err)
}
}
}
_, err = db.Exec(`DROP TABLE account`)
if err != nil {
b.Fatalf("sql.Exec: %s", err)
}
}
func benchmarkSelect(b *testing.B, driver string, n int) {
u := os.Getenv("PG_USER")
pwd := os.Getenv("PG_PASSWORD")
ip := os.Getenv("PG_IP")
port := os.Getenv("PG_PORT")
dsn := fmt.Sprintf("postgres://%s:%s@%s:%s/postgres?sslmode=disable", u, pwd, ip, port)
db, err := sql.Open(driver, dsn)
if err != nil {
b.Fatalf("sql.Open: %s", err)
}
db.Exec(`DROP TABLE account`)
_, err = db.Exec(`CREATE TABLE account (id BIGSERIAL PRIMARY KEY, email TEXT)`)
if err != nil {
b.Fatalf("sql.Exec: %s", err)
}
query := `INSERT INTO account (email) VALUES ($1)`
for i := 0; i < n; i++ {
size := 32
bs := make([]byte, size)
_, err := rand.Read(bs)
if err != nil {
b.Fatalf("rand failed: %s", err)
}
str := hex.EncodeToString(bs)
token := []byte(str)[0:size]
_, err = db.Exec(query, string(token)+"@foobar.com")
if err != nil {
b.Fatalf("cannot insert rows: %s", err)
}
}
var id int64
var email string
b.ResetTimer()
for n := 0; n < b.N; n++ {
query := `SELECT account.id, account.email FROM account WHERE id > $1 AND id < $2`
rows, err := db.Query(query, 20, 50)
if err != nil {
b.Fatalf("cannot query rows: %s", err)
}
for rows.Next() {
err = rows.Scan(&id, &email)
if err != nil {
b.Fatalf("cannot scan rows: %s", err)
}
}
}
_ = id
_ = email
_, err = db.Exec(`DROP TABLE account`)
if err != nil {
b.Fatalf("sql.Exec: %s", err)
}
}
func BenchmarkRamSQLSelect(b *testing.B) {
benchmarkSelect(b, "ramsql", 100)
}
func BenchmarkPostgresSelect(b *testing.B) {
benchmarkSelect(b, "postgres", 100)
}
func BenchmarkRamSQLInsert10(b *testing.B) {
benchmarkInsert(b, "ramsql", 10)
}
func BenchmarkPostgresInsert10(b *testing.B) {
benchmarkInsert(b, "postgres", 10)
}
|
[
"\"PG_USER\"",
"\"PG_PASSWORD\"",
"\"PG_IP\"",
"\"PG_PORT\"",
"\"PG_USER\"",
"\"PG_PASSWORD\"",
"\"PG_IP\"",
"\"PG_PORT\""
] |
[] |
[
"PG_USER",
"PG_PORT",
"PG_PASSWORD",
"PG_IP"
] |
[]
|
["PG_USER", "PG_PORT", "PG_PASSWORD", "PG_IP"]
|
go
| 4 | 0 | |
src/test/java/com/atlassian/db/replica/it/example/aurora/AuroraClusterTest.java
|
package com.atlassian.db.replica.it.example.aurora;
import com.atlassian.db.replica.api.DualConnection;
import com.atlassian.db.replica.api.SqlCall;
import com.atlassian.db.replica.api.reason.Reason;
import com.atlassian.db.replica.api.reason.RouteDecision;
import com.atlassian.db.replica.it.example.aurora.app.User;
import com.atlassian.db.replica.it.example.aurora.app.Users;
import com.atlassian.db.replica.it.example.aurora.replica.AuroraConnectionProvider;
import com.atlassian.db.replica.it.example.aurora.replica.ConsistencyFactory;
import com.atlassian.db.replica.it.example.aurora.utils.DecisionLog;
import com.atlassian.db.replica.it.example.aurora.utils.ReplicationLag;
import com.atlassian.db.replica.spi.ReplicaConnectionPerUrlProvider;
import com.atlassian.db.replica.spi.ConnectionProvider;
import com.atlassian.db.replica.spi.DatabaseCall;
import com.atlassian.db.replica.internal.DefaultReplicaConnectionPerUrlProvider;
import com.atlassian.db.replica.spi.ReplicaConsistency;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Collection;
import java.util.List;
import java.util.stream.Collectors;
import static com.atlassian.db.replica.api.reason.Reason.READ_OPERATION;
import static com.atlassian.db.replica.api.reason.Reason.REPLICA_INCONSISTENT;
import static org.assertj.core.api.Assertions.assertThat;
class AuroraClusterTest {
final String databaseName = "newdb";
final String readerEndpoint = "database-1.cluster-ro-crmnlihjxqlm.eu-central-1.rds.amazonaws.com:5432";
final String readerJdbcUrl = "jdbc:postgresql://" + readerEndpoint + "/" + databaseName;
final String writerJdbcUrl = "jdbc:postgresql://database-1.cluster-crmnlihjxqlm.eu-central-1.rds.amazonaws.com:5432" + "/" + databaseName;
final String jdbcUsername = "postgres";
final String jdbcPassword = System.getenv("password");
@Test
@Disabled
void shouldUtilizeReplicaForReadQueriesForSynchronisedWrites() throws SQLException {
final DecisionLog decisionLog = new DecisionLog();
final SqlCall<Connection> connectionPool = initializeConnectionPool(decisionLog);
new ReplicationLag(connectionPool).set(10);
final Users users = new Users(connectionPool);
final User newUser = new User();
users.add(newUser);
final Collection<User> allUsers = users.fetch();
final List<Reason> reasons = decisionLog.getDecisions().stream().map(RouteDecision::getReason).collect(
Collectors.toList());
assertThat(allUsers).contains(newUser);
assertThat(decisionLog.getDecisions()).contains(new RouteDecision(
"SELECT username FROM users",
READ_OPERATION,
null
));
assertThat(reasons).isNotEmpty().doesNotContain(REPLICA_INCONSISTENT);
}
private SqlCall<Connection> initializeConnectionPool(final DatabaseCall decisionLog) throws SQLException {
final ConnectionProvider connectionProvider = new AuroraConnectionProvider(
readerJdbcUrl,
writerJdbcUrl
);
ReplicaConnectionPerUrlProvider replicaConnectionPerUrlProvider = new DefaultReplicaConnectionPerUrlProvider(
jdbcUsername,
jdbcPassword
);
final ReplicaConsistency replicaConsistency = new ConsistencyFactory(
connectionProvider,
replicaConnectionPerUrlProvider
).create();
return () -> DualConnection.builder(connectionProvider, replicaConsistency)
.databaseCall(decisionLog)
.build();
}
}
|
[
"\"password\""
] |
[] |
[
"password"
] |
[]
|
["password"]
|
java
| 1 | 0 | |
main.go
|
package main
import (
"log"
"os"
"strconv"
)
func main() {
reseedStr := os.Getenv("PM_RESEED")
reseed, err := strconv.ParseBool(reseedStr)
if err != nil {
log.Println("REseed string not found")
reseed = false
}
log.Println(reseedStr)
Run(reseed)
}
|
[
"\"PM_RESEED\""
] |
[] |
[
"PM_RESEED"
] |
[]
|
["PM_RESEED"]
|
go
| 1 | 0 | |
test/i3s/deployment_plan_test.go
|
/*
(c) Copyright [2015] Hewlett Packard Enterprise Development LP
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package i3s
import (
"fmt"
"github.com/HewlettPackard/oneview-golang/i3s"
"github.com/docker/machine/libmachine/log"
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func TestCreateDeploymentPlan(t *testing.T) {
var (
d *I3STest
c *i3s.I3SClient
testName string
)
if os.Getenv("I3S_TEST_ACCEPTANCE") == "true" {
d, c = getTestDriverA("test_deployment_plan")
if c == nil {
t.Fatalf("Failed to execute getTestDriver() ")
}
// find out if the test deployment plan already exist
testName = d.Tc.GetTestData(d.Env, "Name").(string)
testDeploymentPlan, err := c.GetDeploymentPlanByName(testName)
assert.NoError(t, err, "CreateDeploymentPlan get the DeploymentPlan error -> %s", err)
if testDeploymentPlan.URI.IsNil() {
testDeploymentPlan = i3s.DeploymentPlan{
Name: testName,
Type: d.Tc.GetTestData(d.Env, "Type").(string),
}
err := c.CreateDeploymentPlan(testDeploymentPlan)
assert.NoError(t, err, "CreateDeploymentPlan error -> %s", err)
err = c.CreateDeploymentPlan(testDeploymentPlan)
assert.Error(t, err, "CreateDeploymentPlan should error because the DeploymentPlan already exists, err-> %s", err)
} else {
log.Warnf("The deploymentPlan already exist, so skipping CreateDeploymentPlan test for %s", testName)
}
// reload the test profile that we just created
testDeploymentPlan, err = c.GetDeploymentPlanByName(testName)
assert.NoError(t, err, "GetDeplymentPlan error -> %s", err)
}
}
func TestGetDeploymentPlanByName(t *testing.T) {
var (
d *I3STest
c *i3s.I3SClient
testName string
)
if os.Getenv("I3S_TEST_ACCEPTANCE") == "true" {
d, c = getTestDriverA("test_deployment_plan")
if c == nil {
t.Fatalf("Failed to execute getTestDriver() ")
}
testName = d.Tc.GetTestData(d.Env, "Name").(string)
testDeploymentPlan, err := c.GetDeploymentPlanByName(testName)
assert.NoError(t, err, "GetDeploymentPlanByName thew an error -> %s", err)
assert.Equal(t, testName, testDeploymentPlan.Name)
testDeploymentPlan, err = c.GetDeploymentPlanByName("bad")
assert.NoError(t, err, "GetDeploymentPlanByName with fake name -> %s", err)
assert.Equal(t, "", testDeploymentPlan.Name)
} else {
d, c = getTestDriverU("test_deployment_plan")
testName = d.Tc.GetTestData(d.Env, "Name").(string)
data, err := c.GetDeploymentPlanByName(testName)
assert.Error(t, err, fmt.Sprintf("ALL ok, no error, caught as expected: %s,%+v\n", err, data))
}
}
func TestGetDeploymentPlans(t *testing.T) {
var (
c *i3s.I3SClient
)
if os.Getenv("I3S_TEST_ACCEPTANCE") == "true" {
_, c = getTestDriverA("test_deployment_plan")
if c == nil {
t.Fatalf("Failed to execute getTestDriver() ")
}
deploymentPlans, err := c.GetDeploymentPlans("", "")
assert.NoError(t, err, "GetDeploymentPlans threw error -> %s, %+v\n", err, deploymentPlans)
deploymentPlans, err = c.GetDeploymentPlans("", "name:asc")
assert.NoError(t, err, "GetDeploymentPlans name:asc error -> %s, %+v\n", err, deploymentPlans)
} else {
_, c = getTestDriverU("test_deployment_plan")
data, err := c.GetDeploymentPlans("", "")
assert.Error(t, err, fmt.Sprintf("ALL ok, no error, caught as expected: %s,%+v\n", err, data))
}
}
func TestDeleteDeploymentPlanNotFound(t *testing.T) {
var (
c *i3s.I3SClient
testName = "fake"
testDeploymentPlan i3s.DeploymentPlan
)
if os.Getenv("I3S_TEST_ACCEPTANCE") == "true" {
_, c = getTestDriverA("test_deployment_plan")
if c == nil {
t.Fatalf("Failed to execute getTestDriver() ")
}
err := c.DeleteDeploymentPlan(testName)
assert.NoError(t, err, "DeleteDeploymentPlan err-> %s", err)
testDeploymentPlan, err = c.GetDeploymentPlanByName(testName)
assert.NoError(t, err, "GetDeploymentPlanByName with deleted deployment plan -> %+v", err)
assert.Equal(t, "", testDeploymentPlan.Name, fmt.Sprintf("Problem getting deployment plan name, %+v", testDeploymentPlan))
} else {
_, c = getTestDriverU("test_deployment_plan")
err := c.DeleteDeploymentPlan(testName)
assert.Error(t, err, fmt.Sprintf("All ok, no error, caught as expected: %s,%+v\n", err, testDeploymentPlan))
}
}
func TestDeleteDeploymentPlan(t *testing.T) {
var (
d *I3STest
c *i3s.I3SClient
testName string
testDeploymentPlan i3s.DeploymentPlan
)
if os.Getenv("I3S_TEST_ACCEPTANCE") == "true" {
d, c = getTestDriverA("test_deployment_plan")
if c == nil {
t.Fatalf("Failed to execute getTestDriver() ")
}
testName = d.Tc.GetTestData(d.Env, "Name").(string)
err := c.DeleteDeploymentPlan(testName)
assert.NoError(t, err, "DeleteDeploymentPlan err-> %s", err)
testDeploymentPlan, err = c.GetDeploymentPlanByName(testName)
assert.NoError(t, err, "GetDeploymentPlanByName with deleted deployment plan-> %+v", err)
assert.Equal(t, "", testDeploymentPlan.Name, fmt.Sprintf("Problem getting deployment plan name, %+v", testDeploymentPlan))
} else {
_, c = getTestDriverU("test_deployment_plan")
err := c.DeleteDeploymentPlan("footest")
assert.Error(t, err, fmt.Sprintf("ALL ok, no error, caught as expected: %s,%+v\n", err, testDeploymentPlan))
}
}
|
[
"\"I3S_TEST_ACCEPTANCE\"",
"\"I3S_TEST_ACCEPTANCE\"",
"\"I3S_TEST_ACCEPTANCE\"",
"\"I3S_TEST_ACCEPTANCE\"",
"\"I3S_TEST_ACCEPTANCE\""
] |
[] |
[
"I3S_TEST_ACCEPTANCE"
] |
[]
|
["I3S_TEST_ACCEPTANCE"]
|
go
| 1 | 0 | |
src/build/build_step_test.go
|
// Tests around the main part of the build process.
// These are somewhat fiddly because by its nature the code has many side effects.
// We attempt to minimise some through mocking.
//
// Note that because the tests run in an indeterminate order and maybe in parallel
// they all have to be careful to use distinct build targets.
package build
import (
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/op/go-logging.v1"
"github.com/thought-machine/please/src/core"
"github.com/thought-machine/please/src/fs"
)
var cache core.Cache
func TestBuildTargetWithNoDeps(t *testing.T) {
state, target := newState("//package1:target1")
target.AddOutput("file1")
err := buildTarget(1, state, target, false)
assert.NoError(t, err)
assert.Equal(t, core.Built, target.State())
}
func TestFailedBuildTarget(t *testing.T) {
state, target := newState("//package1:target1a")
target.Command = "false"
err := buildTarget(1, state, target, false)
assert.Error(t, err)
}
func TestBuildTargetWhichNeedsRebuilding(t *testing.T) {
// The output file for this target already exists, but it should still get rebuilt
// because there's no rule hash file.
state, target := newState("//package1:target2")
target.AddOutput("file2")
err := buildTarget(1, state, target, false)
assert.NoError(t, err)
assert.Equal(t, core.Built, target.State())
}
func TestBuildTargetWhichDoesntNeedRebuilding(t *testing.T) {
// We write a rule hash for this target before building it, so we don't need to build again.
state, target := newState("//package1:target3")
target.AddOutput("file3")
StoreTargetMetadata(target, new(core.BuildMetadata))
assert.NoError(t, writeRuleHash(state, target))
err := buildTarget(1, state, target, false)
assert.NoError(t, err)
assert.Equal(t, core.Reused, target.State())
}
func TestModifiedBuildTargetStillNeedsRebuilding(t *testing.T) {
// Similar to above, but if we change the target such that the rule hash no longer matches,
// it should get rebuilt.
state, target := newState("//package1:target4")
target.AddOutput("file4")
assert.NoError(t, writeRuleHash(state, target))
target.Command = "echo -n 'wibble wibble wibble' > $OUT"
target.RuleHash = nil // Have to force a reset of this
err := buildTarget(1, state, target, false)
assert.NoError(t, err)
assert.Equal(t, core.Built, target.State())
}
func TestSymlinkedOutputs(t *testing.T) {
// Test behaviour when the output is a symlink.
state, target := newState("//package1:target5")
target.AddOutput("file5")
target.AddSource(core.FileLabel{File: "src5", Package: "package1"})
target.Command = "ln -s $SRC $OUT"
err := buildTarget(1, state, target, false)
assert.NoError(t, err)
assert.Equal(t, core.Built, target.State())
}
func TestPreBuildFunction(t *testing.T) {
// Test modifying a command in the pre-build function.
state, target := newState("//package1:target6")
target.AddOutput("file6")
target.Command = "" // Target now won't produce the needed output
target.PreBuildFunction = preBuildFunction(func(target *core.BuildTarget) error {
target.Command = "echo 'wibble wibble wibble' > $OUT"
return nil
})
err := buildTarget(1, state, target, false)
assert.NoError(t, err)
assert.Equal(t, core.Built, target.State())
}
func TestPostBuildFunction(t *testing.T) {
// Test modifying a command in the post-build function.
state, target := newState("//package1:target7")
target.Command = "echo -n 'wibble wibble wibble' | tee file7"
target.PostBuildFunction = postBuildFunction(func(target *core.BuildTarget, output string) error {
target.AddOutput("file7")
assert.Equal(t, "wibble wibble wibble", output)
return nil
})
err := buildTarget(1, state, target, false)
assert.NoError(t, err)
assert.Equal(t, core.Built, target.State())
assert.Equal(t, []string{"file7"}, target.Outputs())
}
func TestOutputDir(t *testing.T) {
newTarget := func() (*core.BuildState, *core.BuildTarget) {
// Test modifying a command in the post-build function.
state, target := newState("//package1:target8")
target.Command = "mkdir OUT_DIR && touch OUT_DIR/file7"
target.OutputDirectories = append(target.OutputDirectories, "OUT_DIR")
return state, target
}
state, target := newTarget()
err := buildTarget(1, state, target, false)
require.NoError(t, err)
assert.Equal(t, []string{"file7"}, target.Outputs())
md, err := loadTargetMetadata(target)
require.NoError(t, err)
assert.Len(t, md.OutputDirOuts, 1)
assert.Equal(t, "file7", md.OutputDirOuts[0])
// Run again to load the outputs from the metadata
state, target = newTarget()
err = buildTarget(1, state, target, false)
require.NoError(t, err)
assert.Equal(t, []string{"file7"}, target.Outputs())
assert.Equal(t, core.Reused, target.State())
}
func TestOutputDirDoubleStar(t *testing.T) {
newTarget := func(withDoubleStar bool) (*core.BuildState, *core.BuildTarget) {
// Test modifying a command in the post-build function.
state, target := newState("//package1:target8")
target.Command = "mkdir -p OUT_DIR/foo && touch OUT_DIR/foo/file7 && chmod 777 OUT_DIR/foo/file7"
if withDoubleStar {
target.OutputDirectories = append(target.OutputDirectories, "OUT_DIR/**")
} else {
target.OutputDirectories = append(target.OutputDirectories, "OUT_DIR")
}
return state, target
}
state, target := newTarget(false)
err := buildTarget(1, state, target, false)
require.NoError(t, err)
assert.Equal(t, []string{"foo"}, target.Outputs())
md, err := loadTargetMetadata(target)
require.NoError(t, err)
assert.Len(t, md.OutputDirOuts, 1)
assert.Equal(t, "foo", md.OutputDirOuts[0])
info, err := os.Lstat(filepath.Join(target.OutDir(), "foo/file7"))
require.NoError(t, err)
assert.Equal(t, info.Mode().Perm().String(), "-rwxrwxrwx")
state, target = newTarget(true)
err = buildTarget(1, state, target, false)
require.NoError(t, err)
assert.Equal(t, []string{"foo/file7"}, target.Outputs())
info, err = os.Lstat(filepath.Join(target.OutDir(), "foo/file7"))
require.NoError(t, err)
assert.Equal(t, info.Mode().Perm().String(), "-rwxrwxrwx")
}
func TestCacheRetrieval(t *testing.T) {
// Test retrieving stuff from the cache
state, target := newState("//package1:target8")
target.AddOutput("file8")
target.Command = "false" // Will fail if we try to build it.
state.Cache = cache
err := buildTarget(1, state, target, false)
assert.NoError(t, err)
assert.Equal(t, core.Cached, target.State())
}
func TestPostBuildFunctionAndCache(t *testing.T) {
// Test the often subtle and quick to anger interaction of post-build function and cache.
// In this case when it fails to retrieve the post-build output it should still call the function after building.
state, target := newState("//package1:target9")
target.AddOutput("file9")
target.Command = "echo -n 'wibble wibble wibble' | tee $OUT"
called := false
target.PostBuildFunction = postBuildFunction(func(target *core.BuildTarget, output string) error {
called = true
assert.Equal(t, "wibble wibble wibble", output)
return nil
})
state.Cache = cache
err := buildTarget(1, state, target, false)
assert.NoError(t, err)
assert.Equal(t, core.Built, target.State())
assert.True(t, called)
}
func TestPostBuildFunctionAndCache2(t *testing.T) {
// Test the often subtle and quick to anger interaction of post-build function and cache.
// In this case it succeeds in retrieving the post-build output but must still call the function.
state, target := newState("//package1:target10")
target.AddOutput("file10")
target.Command = "echo 'wibble wibble wibble' | tee $OUT"
called := false
target.PostBuildFunction = postBuildFunction(func(target *core.BuildTarget, output string) error {
assert.False(t, called, "Must only call post-build function once (issue #113)")
called = true
assert.Equal(t, "retrieved from cache", output) // comes from implementation below
return nil
})
state.Cache = cache
err := buildTarget(1, state, target, false)
assert.NoError(t, err)
assert.Equal(t, core.Cached, target.State())
assert.True(t, called)
}
func TestInitPyCreation(t *testing.T) {
state, _ := newState("//pypkg:wevs")
target1 := newPyFilegroup(state, "//pypkg:target1", "file1.py")
target2 := newPyFilegroup(state, "//pypkg:target2", "__init__.py")
_, err := buildFilegroup(state, target1)
assert.NoError(t, err)
assert.True(t, fs.FileExists("plz-out/gen/pypkg/__init__.py"))
_, err = buildFilegroup(state, target2)
assert.NoError(t, err)
d, err := ioutil.ReadFile("plz-out/gen/pypkg/__init__.py")
assert.NoError(t, err)
assert.Equal(t, `"""output from //pypkg:target2"""`, strings.TrimSpace(string(d)))
}
func TestRecursiveInitPyCreation(t *testing.T) {
state, _ := newState("//package1/package2:wevs")
target1 := newPyFilegroup(state, "//package1/package2:target1", "file1.py")
_, err := buildFilegroup(state, target1)
assert.NoError(t, err)
assert.True(t, fs.FileExists("plz-out/gen/package1/package2/__init__.py"))
assert.True(t, fs.FileExists("plz-out/gen/package1/__init__.py"))
}
func TestGoModCreation(t *testing.T) {
state, _ := newState("//package_go/subpackage:wevs")
target := newPyFilegroup(state, "//package1/package2:target1", "file1.py")
target.AddLabel("go")
_, err := buildFilegroup(state, target)
assert.NoError(t, err)
assert.True(t, fs.PathExists("plz-out/go.mod"))
}
func TestCreatePlzOutGo(t *testing.T) {
state, target := newState("//package1:target")
target.AddLabel("link:plz-out/go/${PKG}/src")
target.AddOutput("file1.go")
assert.False(t, fs.PathExists("plz-out/go"))
assert.NoError(t, buildTarget(1, state, target, false))
assert.True(t, fs.PathExists("plz-out/go/package1/src/file1.go"))
}
func TestLicenceEnforcement(t *testing.T) {
state, target := newState("//pkg:good")
state.Config.Licences.Reject = append(state.Config.Licences.Reject, "gpl")
state.Config.Licences.Accept = append(state.Config.Licences.Accept, "mit")
// Target specifying no licence should not panic.
checkLicences(state, target)
// A license (non case sensitive) that is not in the list of accepted licenses will panic.
assert.Panics(t, func() {
target.Licences = append(target.Licences, "Bsd")
checkLicences(state, target)
}, "A target with a non-accepted licence will panic")
// Accepting bsd should resolve the panic
state.Config.Licences.Accept = append(state.Config.Licences.Accept, "BSD")
checkLicences(state, target)
// Now construct a new "bad" target.
state, target = newState("//pkg:bad")
state.Config.Licences.Reject = append(state.Config.Licences.Reject, "gpl")
state.Config.Licences.Accept = append(state.Config.Licences.Accept, "mit")
// Adding an explicitly rejected licence should panic no matter what.
target.Licences = append(target.Licences, "GPL")
assert.Panics(t, func() {
checkLicences(state, target)
}, "Trying to add GPL should panic (case insensitive)")
}
func TestFileGroupBinDir(t *testing.T) {
state, target := newState("//package1:bindir")
target.AddSource(core.FileLabel{File: "package2", Package: target.Label.PackageName})
target.IsBinary = true
target.IsFilegroup = true
_, err := buildFilegroup(state, target)
assert.NoError(t, err)
assert.True(t, fs.PathExists("plz-out/bin/package1/package2/"))
assert.True(t, fs.FileExists("plz-out/bin/package1/package2/file1.py"))
assert.True(t, fs.IsDirectory("plz-out/bin/package1/package2/"))
// Ensure permissions on directory are not modified
info, err := os.Stat("plz-out/bin/package1/package2/")
assert.NoError(t, err)
compareDir := "plz-out/bin/package1/package2_cmp/"
os.Mkdir(compareDir, core.DirPermissions)
infoCmp, err := os.Stat(compareDir)
assert.NoError(t, err)
assert.Equal(t, infoCmp.Mode().Perm(), info.Mode().Perm())
}
func TestOutputHash(t *testing.T) {
state, target := newState("//package3:target1")
target.AddOutput("file1")
target.Hashes = []string{"634b027b1b69e1242d40d53e312b3b4ac7710f55be81f289b549446ef6778bee"}
b, err := state.TargetHasher.OutputHash(target)
assert.NoError(t, err)
assert.Equal(t, "634b027b1b69e1242d40d53e312b3b4ac7710f55be81f289b549446ef6778bee", hex.EncodeToString(b))
}
func TestCheckRuleHashes(t *testing.T) {
state, target := newState("//package3:target1")
target.AddOutput("file1")
target.Hashes = []string{"6c6d66a0852b49cdeeb0e183b4f10b0309c5dd4a"}
// This is the normal sha1-with-combine hash calculation
b, _ := state.TargetHasher.OutputHash(target)
err := checkRuleHashes(state, target, b)
assert.NoError(t, err)
// This is testing the negative case
target.Hashes = []string{"630bff40cc8d5329e6176779493281ddb3e0add3"}
err = checkRuleHashes(state, target, b)
assert.Error(t, err)
// This is the equivalent to sha1sum of the file, so should be accepted too
target.Hashes = []string{"dba7673010f19a94af4345453005933fd511bea9"}
err = checkRuleHashes(state, target, b)
assert.NoError(t, err)
// This is the equivalent to sha256sum of the file, so should be accepted too
target.Hashes = []string{"634b027b1b69e1242d40d53e312b3b4ac7710f55be81f289b549446ef6778bee"}
err = checkRuleHashes(state, target, b)
assert.NoError(t, err)
// This is the equivalent to blake3 of the file, so should be accepted too
target.Hashes = []string{"37d6ae61eb7aba324b4633ef518a5a2e88feac81a0f65a67f9de40b55fe91277"}
err = checkRuleHashes(state, target, b)
assert.NoError(t, err)
}
func TestFetchLocalRemoteFile(t *testing.T) {
state, target := newState("//package4:target1")
target.AddSource(core.URLLabel("file://" + os.Getenv("TMP_DIR") + "/src/build/test_data/local_remote_file.txt"))
target.AddOutput("local_remote_file.txt")
// Temporarily reset the repo root so we can test this locally
oldRoot := core.RepoRoot
core.RepoRoot = "/wibble"
defer func() {
core.RepoRoot = oldRoot
}()
err := fetchRemoteFile(state, target)
assert.NoError(t, err)
assert.True(t, fs.FileExists(path.Join(target.TmpDir(), "local_remote_file.txt")))
}
func TestFetchLocalRemoteFileCannotBeRelative(t *testing.T) {
state, target := newState("//package4:target2")
target.AddSource(core.URLLabel("src/build/test_data/local_remote_file.txt"))
target.AddOutput("local_remote_file.txt")
err := fetchRemoteFile(state, target)
assert.Error(t, err)
}
func TestFetchLocalRemoteFileCannotBeWithinRepo(t *testing.T) {
state, target := newState("//package4:target2")
target.AddSource(core.URLLabel("file://" + os.Getenv("TMP_DIR") + "/src/build/test_data/local_remote_file.txt"))
target.AddOutput("local_remote_file.txt")
err := fetchRemoteFile(state, target)
assert.Error(t, err)
}
func TestBuildMetadatafileIsCreated(t *testing.T) {
stdOut := "wibble wibble wibble"
state, target := newState("//package1:mdtest")
target.AddOutput("file1")
err := buildTarget(rand.Int(), state, target, false)
require.NoError(t, err)
assert.False(t, target.BuildCouldModifyTarget())
assert.True(t, fs.FileExists(filepath.Join(target.OutDir(), target.TargetBuildMetadataFileName())))
state, target = newState("//package1:mdtest_post_build")
target.Command = fmt.Sprintf("echo -n '%s' | tee $OUT", stdOut)
target.AddOutput("file1")
target.PostBuildFunction = postBuildFunction(func(target *core.BuildTarget, output string) error {
assert.Equal(t, stdOut, output)
return nil
})
err = buildTarget(rand.Int(), state, target, false)
require.NoError(t, err)
assert.True(t, target.BuildCouldModifyTarget())
assert.True(t, fs.FileExists(filepath.Join(target.OutDir(), target.TargetBuildMetadataFileName())))
md, err := loadTargetMetadata(target)
require.NoError(t, err)
assert.Equal(t, stdOut, string(md.Stdout))
}
// Should return the hash of the first item
func TestSha1SingleHash(t *testing.T) {
testCases := []struct {
name string
algorithm string
sha1ForceCombine bool
fooHash string
fooAndBarHash string
}{
{
name: "sha1 no combine",
algorithm: "sha1",
sha1ForceCombine: false,
fooHash: "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33",
fooAndBarHash: "4030c3573bf908b75420818b8c0b041443a3f21e",
},
{
name: "sha1 force combine",
algorithm: "sha1",
sha1ForceCombine: true,
fooHash: "a7880a3d0e9799a88cf18ac67cb3ee19a7e43190",
fooAndBarHash: "4030c3573bf908b75420818b8c0b041443a3f21e",
},
{
name: "sha256",
algorithm: "sha256",
fooHash: "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
fooAndBarHash: "50d2e3c6f77d85d62907693deb75af0985012566e1fd37e0c2859b3716bccc85",
},
{
name: "crc32",
algorithm: "crc32",
fooHash: "8c736521",
fooAndBarHash: "045139db",
},
{
name: "crc64",
algorithm: "crc64",
fooHash: "3c3c303000000000",
fooAndBarHash: "1ff602f5b67b13f4",
},
{
name: "blake3",
algorithm: "blake3",
fooHash: "04e0bb39f30b1a3feb89f536c93be15055482df748674b00d26e5a75777702e9",
fooAndBarHash: "17d3b6ed7a554870abc95efae5e6255174a53efa40ef1844a21d0d29edac5d68",
},
}
for _, test := range testCases {
t.Run(test.name+" foo", func(t *testing.T) {
state, target := newStateWithHashFunc("//hash_test:hash_test", test.algorithm, test.sha1ForceCombine)
target.AddOutput("foo.txt")
h, err := newTargetHasher(state).OutputHash(target)
require.NoError(t, err)
assert.Equal(t, test.fooHash, hex.EncodeToString(h))
})
t.Run(test.name+" foo and bar", func(t *testing.T) {
state, target := newStateWithHashFunc("//hash_test:hash_test", test.algorithm, test.sha1ForceCombine)
target.AddOutput("foo.txt")
target.AddOutput("bar.txt")
h, err := newTargetHasher(state).OutputHash(target)
require.NoError(t, err)
assert.Equal(t, test.fooAndBarHash, hex.EncodeToString(h))
})
}
}
func newStateWithHashFunc(label, hashFunc string, sha1ForceCombine bool) (*core.BuildState, *core.BuildTarget) {
config, _ := core.ReadConfigFiles(nil, nil)
config.Build.HashFunction = hashFunc
config.FeatureFlags.SingleSHA1Hash = !sha1ForceCombine
state := core.NewBuildState(config)
state.Config.Parse.BuildFileName = []string{"BUILD_FILE"}
target := core.NewBuildTarget(core.ParseBuildLabel(label, ""))
target.Command = fmt.Sprintf("echo 'output of %s' > $OUT", target.Label)
target.BuildTimeout = 100 * time.Second
state.Graph.AddTarget(target)
state.Parser = &fakeParser{}
Init(state)
return state, target
}
func newState(label string) (*core.BuildState, *core.BuildTarget) {
config, _ := core.ReadConfigFiles(nil, nil)
state := core.NewBuildState(config)
state.Config.Parse.BuildFileName = []string{"BUILD_FILE"}
target := core.NewBuildTarget(core.ParseBuildLabel(label, ""))
target.Command = fmt.Sprintf("echo 'output of %s' > $OUT", target.Label)
target.BuildTimeout = 100 * time.Second
state.Graph.AddTarget(target)
state.Parser = &fakeParser{}
Init(state)
return state, target
}
func newPyFilegroup(state *core.BuildState, label, filename string) *core.BuildTarget {
target := core.NewBuildTarget(core.ParseBuildLabel(label, ""))
target.AddSource(core.FileLabel{File: filename, Package: target.Label.PackageName})
target.AddOutput(filename)
target.AddLabel("py")
target.IsFilegroup = true
state.Graph.AddTarget(target)
return target
}
// Fake cache implementation with hardcoded behaviour for the various tests above.
type mockCache struct{}
func (*mockCache) Store(target *core.BuildTarget, key []byte, files []string) {
}
func (*mockCache) Retrieve(target *core.BuildTarget, key []byte, outputs []string) bool {
if target.Label.Name == "target8" {
ioutil.WriteFile("plz-out/gen/package1/file8", []byte("retrieved from cache"), 0664)
md := &core.BuildMetadata{}
if err := StoreTargetMetadata(target, md); err != nil {
panic(err)
}
return true
} else if target.Label.Name == "target10" {
ioutil.WriteFile("plz-out/gen/package1/file10", []byte("retrieved from cache"), 0664)
md := &core.BuildMetadata{Stdout: []byte("retrieved from cache")}
if err := StoreTargetMetadata(target, md); err != nil {
panic(err)
}
return true
}
return false
}
func (*mockCache) Clean(target *core.BuildTarget) {}
func (*mockCache) CleanAll() {}
func (*mockCache) Shutdown() {}
type fakeParser struct {
}
func (fake *fakeParser) ParseFile(state *core.BuildState, pkg *core.Package, filename string) error {
return nil
}
func (fake *fakeParser) ParseReader(state *core.BuildState, pkg *core.Package, r io.ReadSeeker) error {
return nil
}
func (fake *fakeParser) RunPreBuildFunction(threadID int, state *core.BuildState, target *core.BuildTarget) error {
return target.PreBuildFunction.Call(target)
}
func (fake *fakeParser) RunPostBuildFunction(threadID int, state *core.BuildState, target *core.BuildTarget, output string) error {
return target.PostBuildFunction.Call(target, output)
}
type preBuildFunction func(*core.BuildTarget) error
type postBuildFunction func(*core.BuildTarget, string) error
func (f preBuildFunction) Call(target *core.BuildTarget) error { return f(target) }
func (f preBuildFunction) String() string { return "" }
func (f postBuildFunction) Call(target *core.BuildTarget, output string) error {
return f(target, output)
}
func (f postBuildFunction) String() string { return "" }
func TestMain(m *testing.M) {
cache = &mockCache{}
backend := logging.NewLogBackend(os.Stderr, "", 0)
backendLeveled := logging.AddModuleLevel(backend)
backendLeveled.SetLevel(logging.DEBUG, "")
logging.SetBackend(backend, backendLeveled)
// Move ourselves to the root of the test data tree
wd, _ := os.Getwd()
core.RepoRoot = path.Join(wd, "src/build/test_data")
Init(core.NewDefaultBuildState())
if err := os.Chdir(core.RepoRoot); err != nil {
panic(err)
}
os.Exit(m.Run())
}
|
[
"\"TMP_DIR\"",
"\"TMP_DIR\""
] |
[] |
[
"TMP_DIR"
] |
[]
|
["TMP_DIR"]
|
go
| 1 | 0 | |
synthtool/gcp/common.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import shutil
import fnmatch
from copy import deepcopy
from pathlib import Path
from typing import Dict, List, Optional
import jinja2
from synthtool import shell, _tracked_paths
from synthtool.gcp import partials
from synthtool.languages import node
from synthtool.log import logger
from synthtool.sources import git, templates
PathOrStr = templates.PathOrStr
TEMPLATES_URL: str = git.make_repo_clone_url("googleapis/synthtool")
DEFAULT_TEMPLATES_PATH = "synthtool/gcp/templates"
LOCAL_TEMPLATES: Optional[str] = os.environ.get("SYNTHTOOL_TEMPLATES")
class CommonTemplates:
def __init__(self, template_path: Optional[Path] = None):
if template_path:
self._template_root = template_path
elif LOCAL_TEMPLATES:
logger.debug(f"Using local templates at {LOCAL_TEMPLATES}")
self._template_root = Path(LOCAL_TEMPLATES)
else:
templates_git = git.clone(TEMPLATES_URL)
self._template_root = templates_git / DEFAULT_TEMPLATES_PATH
self._templates = templates.Templates(self._template_root)
self.excludes = [] # type: List[str]
def _generic_library(self, directory: str, **kwargs) -> Path:
# load common repo meta information (metadata that's not language specific).
if "metadata" in kwargs:
self._load_generic_metadata(kwargs["metadata"])
# if no samples were found, don't attempt to render a
# samples/README.md.
if "samples" not in kwargs["metadata"] or not kwargs["metadata"]["samples"]:
self.excludes.append("samples/README.md")
t = templates.TemplateGroup(self._template_root / directory, self.excludes)
if "repository" in kwargs["metadata"] and "repo" in kwargs["metadata"]:
kwargs["metadata"]["repo"]["default_branch"] = _get_default_branch_name(
kwargs["metadata"]["repository"]
)
# TODO: migrate to python.py once old sample gen is deprecated
if directory == "python_samples":
t.env.globals["get_help"] = lambda filename: shell.run(
["python", filename, "--help"]
).stdout
result = t.render(**kwargs)
_tracked_paths.add(result)
return result
def py_samples(self, **kwargs) -> List[Path]:
"""
Handles generation of README.md templates for Python samples
- Determines whether generation is being done in a client library or in a samples
folder automatically
- Otherwise accepts manually set sample_project_dir through kwargs metadata
- Delegates generation of additional sample documents alternate/overridden folders
through py_samples_override()
"""
# kwargs["metadata"] is required to load values from .repo-metadata.json
if "metadata" not in kwargs:
kwargs["metadata"] = {}
# load common repo meta information (metadata that's not language specific).
self._load_generic_metadata(kwargs["metadata"])
# temporary exclusion prior to old templates being migrated out
self.excludes.extend(
[
"README.rst",
"auth_api_key.tmpl.rst",
"auth.tmpl.rst",
"install_deps.tmpl.rst",
"install_portaudio.tmpl.rst",
"noxfile.py.j2",
]
)
# ensure samples will generate
kwargs["metadata"]["samples"] = True
# determine if in client lib and set custom root sample dir if specified, else None
in_client_library = Path("samples").exists()
sample_project_dir = kwargs["metadata"]["repo"].get("sample_project_dir")
if sample_project_dir is None: # Not found in metadata
if in_client_library:
sample_project_dir = "samples"
else:
sample_project_dir = "."
elif not Path(sample_project_dir).exists():
raise Exception(f"'{sample_project_dir}' does not exist")
override_paths_to_samples: Dict[
str, List[str]
] = {} # Dict of format { override_path : sample(s) }
samples_dict = deepcopy(kwargs["metadata"]["repo"].get("samples"))
default_samples_dict = [] # Dict which will generate in sample_project_dir
# Iterate through samples to store override_paths_to_samples for all existing
# override paths
for sample_idx, sample in enumerate(samples_dict):
override_path = samples_dict[sample_idx].get("override_path")
if override_path is not None:
# add absolute path to metadata so `python foo.py --help` succeeds
if sample.get("file") is not None:
path = os.path.join(
sample_project_dir, override_path, sample.get("file")
)
sample["abs_path"] = Path(path).resolve()
cur_override_sample = override_paths_to_samples.get(override_path)
# Base case: No samples are yet planned to gen in this override dir
if cur_override_sample is None:
override_paths_to_samples[override_path] = [sample]
# Else: Sample docs will be generated in README merged with other
# sample doc(s) already planned to generate in this dir
else:
cur_override_sample.append(sample)
override_paths_to_samples[override_path] = cur_override_sample
# If override path none, will be generated in the default
# folder: sample_project_dir
else:
if sample.get("file") is not None:
path = os.path.join(sample_project_dir, sample.get("file"))
sample["abs_path"] = Path(path).resolve()
default_samples_dict.append(sample)
# List of paths to tempdirs which will be copied into sample folders
result = []
# deep copy is req. here to avoid kwargs being affected
overridden_samples_kwargs = deepcopy(kwargs)
for override_path in override_paths_to_samples:
# Generate override sample docs
result.append(
self.py_samples_override(
root=sample_project_dir,
override_path=override_path,
override_samples=override_paths_to_samples[override_path],
**overridden_samples_kwargs,
)
)
kwargs["metadata"]["repo"]["samples"] = default_samples_dict
logger.debug(
f"Generating templates for samples directory '{sample_project_dir}'"
)
kwargs["subdir"] = sample_project_dir
# Generate default sample docs
result.append(self._generic_library("python_samples", **kwargs))
for path in result:
# .add() records the root of the paths and needs to be applied to each
_tracked_paths.add(path)
return result
def py_samples_override(
self, root, override_path, override_samples, **overridden_samples_kwargs
) -> Path:
"""
Handles additional generation of READMEs where "override_path"s
are set in one or more samples' metadata
"""
overridden_samples_kwargs["metadata"]["repo"][
"sample_project_dir"
] = override_path
# Set samples metadata to ONLY samples intended to generate
# under this directory (override_path)
overridden_samples_kwargs["metadata"]["repo"]["samples"] = override_samples
if root != ".":
override_path = Path(root) / override_path
logger.debug(f"Generating templates for override path '{override_path}'")
overridden_samples_kwargs["subdir"] = override_path
return self._generic_library("python_samples", **overridden_samples_kwargs)
def py_library(self, **kwargs) -> Path:
# kwargs["metadata"] is required to load values from .repo-metadata.json
if "metadata" not in kwargs:
kwargs["metadata"] = {}
# rename variable to accomodate existing synth.py files
if "system_test_dependencies" in kwargs:
kwargs["system_test_local_dependencies"] = kwargs[
"system_test_dependencies"
]
logger.warning(
"Template argument 'system_test_dependencies' is deprecated."
"Use 'system_test_local_dependencies' or 'system_test_external_dependencies'"
"instead."
)
# Set default Python versions for noxfile.py
if "default_python_version" not in kwargs:
kwargs["default_python_version"] = "3.8"
if "unit_test_python_versions" not in kwargs:
kwargs["unit_test_python_versions"] = ["3.6", "3.7", "3.8", "3.9"]
if "microgenerator" not in kwargs:
kwargs["unit_test_python_versions"] = ["2.7"] + kwargs[
"unit_test_python_versions"
]
if "system_test_python_versions" not in kwargs:
kwargs["system_test_python_versions"] = ["3.8"]
if "microgenerator" not in kwargs:
kwargs["system_test_python_versions"] = ["2.7"] + kwargs[
"system_test_python_versions"
]
# If cov_level is not given, set it to None.
if "cov_level" not in kwargs:
kwargs["cov_level"] = None
# Don't add samples templates if there are no samples
if "samples" not in kwargs:
self.excludes += ["samples/AUTHORING_GUIDE.md", "samples/CONTRIBUTING.md"]
ret = self._generic_library("python_library", **kwargs)
# If split_system_tests is set to True, we disable the system
# test in the main presubmit build and create individual build
# configs for each python versions.
if kwargs.get("split_system_tests", False):
template_root = self._template_root / "py_library_split_systests"
# copy the main presubmit config
shutil.copy2(
template_root / ".kokoro/presubmit/presubmit.cfg",
ret / ".kokoro/presubmit/presubmit.cfg",
)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(str(template_root)))
tmpl = env.get_template(".kokoro/presubmit/system.cfg")
for v in kwargs["system_test_python_versions"]:
nox_session = f"system-{v}"
dest = ret / f".kokoro/presubmit/system-{v}.cfg"
content = tmpl.render(nox_session=nox_session)
with open(dest, "w") as f:
f.write(content)
return ret
def java_library(self, **kwargs) -> Path:
# kwargs["metadata"] is required to load values from .repo-metadata.json
if "metadata" not in kwargs:
kwargs["metadata"] = {}
return self._generic_library("java_library", **kwargs)
def node_library(self, **kwargs) -> Path:
# TODO: once we've migrated all Node.js repos to either having
# .repo-metadata.json, or excluding README.md, we can remove this.
if not os.path.exists("./.repo-metadata.json"):
self.excludes.append("README.md")
if "samples/README.md" not in self.excludes:
self.excludes.append("samples/README.md")
kwargs["metadata"] = node.template_metadata()
kwargs["publish_token"] = node.get_publish_token(kwargs["metadata"]["name"])
ignore_src_index = [
"yes" for f in self.excludes if fnmatch.fnmatch("src/index.ts", f)
]
# generate root-level `src/index.ts` to export multiple versions and its default clients
if (
"versions" in kwargs
and "default_version" in kwargs
and not ignore_src_index
):
node.generate_index_ts(
versions=kwargs["versions"], default_version=kwargs["default_version"]
)
return self._generic_library("node_library", **kwargs)
def php_library(self, **kwargs) -> Path:
return self._generic_library("php_library", **kwargs)
def ruby_library(self, **kwargs) -> Path:
# kwargs["metadata"] is required to load values from .repo-metadata.json
if "metadata" not in kwargs:
kwargs["metadata"] = {}
return self._generic_library("ruby_library", **kwargs)
def render(self, template_name: str, **kwargs) -> Path:
template = self._templates.render(template_name, **kwargs)
_tracked_paths.add(template)
return template
def _load_generic_metadata(self, metadata: Dict):
"""
loads additional meta information from .repo-metadata.json.
"""
metadata["partials"] = partials.load_partials()
# Loads repo metadata information from the default location if it
# hasn't already been set. Some callers may have already loaded repo
# metadata, so we don't need to do it again or overwrite it. Also, only
# set the "repo" key.
if "repo" not in metadata:
metadata["repo"] = _load_repo_metadata()
def decamelize(value: str):
""" parser to convert fooBar.js to Foo Bar. """
if not value:
return ""
str_decamelize = re.sub("^.", value[0].upper(), value) # apple -> Apple.
str_decamelize = re.sub(
"([A-Z]+)([A-Z])([a-z0-9])", r"\1 \2\3", str_decamelize
) # ACLBatman -> ACL Batman.
return re.sub("([a-z0-9])([A-Z])", r"\1 \2", str_decamelize) # FooBar -> Foo Bar.
def _load_repo_metadata(metadata_file: str = "./.repo-metadata.json") -> Dict:
"""Parse a metadata JSON file into a Dict.
Currently, the defined fields are:
* `name` - The service's API name
* `name_pretty` - The service's API title. This will be used for generating titles on READMEs
* `product_documentation` - The product documentation on cloud.google.com
* `client_documentation` - The client library reference documentation
* `issue_tracker` - The public issue tracker for the product
* `release_level` - The release level of the client library. One of: alpha, beta, ga, deprecated
* `language` - The repo language. One of dotnet, go, java, nodejs, php, python, ruby
* `repo` - The GitHub repo in the format {owner}/{repo}
* `distribution_name` - The language-idiomatic package/distribution name
* `api_id` - The API ID associated with the service. Fully qualified identifier use to
enable a service in the cloud platform (e.g. monitoring.googleapis.com)
* `requires_billing` - Whether or not the API requires billing to be configured on the
customer's acocunt
Args:
metadata_file (str, optional): Path to the metadata json file
Returns:
A dictionary of metadata. This may not necessarily include all the defined fields above.
"""
if os.path.exists(metadata_file):
with open(metadata_file) as f:
return json.load(f)
return {}
def _get_default_branch_name(repository_name: str) -> str:
# This default should be switched to "main" once we've migrated
# the majority of our repositories:
return os.getenv("DEFAULT_BRANCH", "master")
|
[] |
[] |
[
"SYNTHTOOL_TEMPLATES",
"DEFAULT_BRANCH"
] |
[]
|
["SYNTHTOOL_TEMPLATES", "DEFAULT_BRANCH"]
|
python
| 2 | 0 | |
providers/ibm/ibm_is_public_gateway.go
|
// Copyright 2019 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ibm
import (
"fmt"
"log"
"os"
"github.com/GoogleCloudPlatform/terraformer/terraformutils"
"github.com/IBM/go-sdk-core/v4/core"
"github.com/IBM/vpc-go-sdk/vpcv1"
)
// PublicGatewayGenerator ...
type PublicGatewayGenerator struct {
IBMService
}
func (g PublicGatewayGenerator) createPublicGatewayResources(publicGatewayID, publicGatewayName string) terraformutils.Resource {
resources := terraformutils.NewSimpleResource(
publicGatewayID,
publicGatewayName,
"ibm_is_public_gateway",
"ibm",
[]string{})
return resources
}
// InitResources ...
func (g *PublicGatewayGenerator) InitResources() error {
var resoureGroup string
region := envFallBack([]string{"IC_REGION"}, "us-south")
apiKey := os.Getenv("IC_API_KEY")
if apiKey == "" {
log.Fatal("No API key set")
}
rg := g.Args["resource_group"]
if rg != nil {
resoureGroup = rg.(string)
}
vpcurl := fmt.Sprintf("https://%s.iaas.cloud.ibm.com/v1", region)
vpcoptions := &vpcv1.VpcV1Options{
URL: envFallBack([]string{"IBMCLOUD_IS_API_ENDPOINT"}, vpcurl),
Authenticator: &core.IamAuthenticator{
ApiKey: apiKey,
},
}
vpcclient, err := vpcv1.NewVpcV1(vpcoptions)
if err != nil {
return err
}
start := ""
var allrecs []vpcv1.PublicGateway
for {
options := &vpcv1.ListPublicGatewaysOptions{}
if start != "" {
options.Start = &start
}
if resoureGroup != "" {
options.ResourceGroupID = &resoureGroup
}
pgs, response, err := vpcclient.ListPublicGateways(options)
if err != nil {
return fmt.Errorf("Error Fetching Public Gateways %s\n%s", err, response)
}
start = GetNext(pgs.Next)
allrecs = append(allrecs, pgs.PublicGateways...)
if start == "" {
break
}
}
for _, pg := range allrecs {
g.Resources = append(g.Resources, g.createPublicGatewayResources(*pg.ID, *pg.Name))
}
return nil
}
|
[
"\"IC_API_KEY\""
] |
[] |
[
"IC_API_KEY"
] |
[]
|
["IC_API_KEY"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"net/http"
"os"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/version"
log "github.com/sirupsen/logrus"
"github.com/moonug/mikrotik-exporter/collector"
"github.com/moonug/mikrotik-exporter/config"
)
// single device can be defined via CLI flags, multiple via config file.
var (
address = flag.String("address", "", "address of the device to monitor")
configFile = flag.String("config-file", "", "config file to load")
device = flag.String("device", "", "single device to monitor")
insecure = flag.Bool("insecure", false, "skips verification of server certificate when using TLS (not recommended)")
logFormat = flag.String("log-format", "json", "logformat text or json (default json)")
logLevel = flag.String("log-level", "info", "log level")
metricsPath = flag.String("path", "/metrics", "path to answer requests on")
password = flag.String("password", "", "password for authentication for single device")
deviceport = flag.String("deviceport", "8728", "port for single device")
port = flag.String("port", ":9436", "port number to listen on")
timeout = flag.Duration("timeout", collector.DefaultTimeout, "timeout when connecting to devices")
tls = flag.Bool("tls", false, "use tls to connect to routers")
user = flag.String("user", "", "user for authentication with single device")
ver = flag.Bool("version", false, "find the version of binary")
withBgp = flag.Bool("with-bgp", false, "retrieves BGP routing infrormation")
withRoutes = flag.Bool("with-routes", false, "retrieves routing table information")
withDHCP = flag.Bool("with-dhcp", false, "retrieves DHCP server metrics")
withDHCPL = flag.Bool("with-dhcpl", false, "retrieves DHCP server lease metrics")
withDHCPv6 = flag.Bool("with-dhcpv6", false, "retrieves DHCPv6 server metrics")
withFirmware = flag.Bool("with-firmware", false, "retrieves firmware versions")
withHealth = flag.Bool("with-health", false, "retrieves board Health metrics")
withPOE = flag.Bool("with-poe", false, "retrieves PoE metrics")
withPools = flag.Bool("with-pools", false, "retrieves IP(v6) pool metrics")
withOptics = flag.Bool("with-optics", false, "retrieves optical diagnostic metrics")
withW60G = flag.Bool("with-w60g", false, "retrieves w60g interface metrics")
withWlanSTA = flag.Bool("with-wlansta", false, "retrieves connected wlan station metrics")
withCapsman = flag.Bool("with-capsman", false, "retrieves capsman station metrics")
withWlanIF = flag.Bool("with-wlanif", false, "retrieves wlan interface metrics")
withMonitor = flag.Bool("with-monitor", false, "retrieves ethernet interface monitor info")
withIpsec = flag.Bool("with-ipsec", false, "retrieves ipsec metrics")
withIpsecPeers = flag.Bool("with-ipsec-peers", false, "retrieves ipsec peers metrics")
withOSPFNeighbor = flag.Bool("with-ospf-neighbor", false, "retrieves ospf neighbor metrics")
withLTE = flag.Bool("with-lte", false, "retrieves lte metrics")
withNetwatch = flag.Bool("with-netwatch", false, "retrieves netwatch metrics")
withConntrack = flag.Bool("with-conntrack", false, "retrieves conntrack table metrics")
withBridgeHost = flag.Bool("with-bridge-host", false, "retrieves bridge host metrics")
cfg *config.Config
appVersion = "DEVELOPMENT"
shortSha = "0xDEADBEEF"
)
func init() {
prometheus.MustRegister(version.NewCollector("mikrotik_exporter"))
}
func main() {
flag.Parse()
if *ver {
fmt.Printf("\nVersion: %s\nShort SHA: %s\n\n", appVersion, shortSha)
os.Exit(0)
}
configureLog()
c, err := loadConfig()
if err != nil {
log.Errorf("Could not load config: %v", err)
os.Exit(3)
}
cfg = c
startServer()
}
func configureLog() {
ll, err := log.ParseLevel(*logLevel)
if err != nil {
panic(err)
}
log.SetLevel(ll)
if *logFormat == "text" {
log.SetFormatter(&log.TextFormatter{})
} else {
log.SetFormatter(&log.JSONFormatter{})
}
}
func loadConfig() (*config.Config, error) {
if *configFile != "" {
return loadConfigFromFile()
}
return loadConfigFromFlags()
}
func loadConfigFromFile() (*config.Config, error) {
b, err := ioutil.ReadFile(*configFile)
if err != nil {
return nil, err
}
return config.Load(bytes.NewReader(b))
}
func loadConfigFromFlags() (*config.Config, error) {
// Attempt to read credentials from env if not already defined
if *user == "" {
*user = os.Getenv("MIKROTIK_USER")
}
if *password == "" {
*password = os.Getenv("MIKROTIK_PASSWORD")
}
if *device == "" || *address == "" || *user == "" || *password == "" {
return nil, fmt.Errorf("missing required param for single device configuration")
}
return &config.Config{
Devices: []config.Device{
{
Name: *device,
Address: *address,
User: *user,
Password: *password,
Port: *deviceport,
},
},
}, nil
}
func startServer() {
h, err := createMetricsHandler()
if err != nil {
log.Fatal(err)
}
http.Handle(*metricsPath, h)
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("ok"))
})
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte(`<html>
<head><title>Mikrotik Exporter</title></head>
<body>
<h1>Mikrotik Exporter</h1>
<p><a href="` + *metricsPath + `">Metrics</a></p>
</body>
</html>`))
})
log.Info("Listening on ", *port)
log.Fatal(http.ListenAndServe(*port, nil))
}
func createMetricsHandler() (http.Handler, error) {
opts := collectorOptions()
nc, err := collector.NewCollector(cfg, opts...)
if err != nil {
return nil, err
}
registry := prometheus.NewRegistry()
err = registry.Register(nc)
if err != nil {
return nil, err
}
return promhttp.HandlerFor(registry,
promhttp.HandlerOpts{
ErrorLog: log.New(),
ErrorHandling: promhttp.ContinueOnError,
}), nil
}
func collectorOptions() []collector.Option {
opts := []collector.Option{}
if *withBgp || cfg.Features.BGP {
opts = append(opts, collector.WithBGP())
}
if *withRoutes || cfg.Features.Routes {
opts = append(opts, collector.WithRoutes())
}
if *withDHCP || cfg.Features.DHCP {
opts = append(opts, collector.WithDHCP())
}
if *withDHCPL || cfg.Features.DHCPL {
opts = append(opts, collector.WithDHCPL())
}
if *withDHCPv6 || cfg.Features.DHCPv6 {
opts = append(opts, collector.WithDHCPv6())
}
if *withFirmware || cfg.Features.Firmware {
opts = append(opts, collector.WithFirmware())
}
if *withHealth || cfg.Features.Health {
opts = append(opts, collector.WithHealth())
}
if *withPOE || cfg.Features.POE {
opts = append(opts, collector.WithPOE())
}
if *withPools || cfg.Features.Pools {
opts = append(opts, collector.WithPools())
}
if *withOptics || cfg.Features.Optics {
opts = append(opts, collector.WithOptics())
}
if *withW60G || cfg.Features.W60G {
opts = append(opts, collector.WithW60G())
}
if *withWlanSTA || cfg.Features.WlanSTA {
opts = append(opts, collector.WithWlanSTA())
}
if *withCapsman || cfg.Features.Capsman {
opts = append(opts, collector.WithCapsman())
}
if *withWlanIF || cfg.Features.WlanIF {
opts = append(opts, collector.WithWlanIF())
}
if *withMonitor || cfg.Features.Monitor {
opts = append(opts, collector.WithMonitor())
}
if *withIpsec || cfg.Features.Ipsec {
opts = append(opts, collector.WithIpsec())
}
if *withIpsecPeers || cfg.Features.IpsecPeers {
opts = append(opts, collector.WithIpsecPeers())
}
if *withOSPFNeighbor || cfg.Features.OSPFNeighbor {
opts = append(opts, collector.WithOSPFNeighbor())
}
if *withLTE || cfg.Features.LTE {
opts = append(opts, collector.WithLTE())
}
if *withNetwatch || cfg.Features.Netwatch {
opts = append(opts, collector.WithNetwatch())
}
if *withConntrack || cfg.Features.Conntrack {
opts = append(opts, collector.WithConntrack())
}
if *withBridgeHost || cfg.Features.BridgeHost {
opts = append(opts, collector.WithBridgeHost())
}
if *timeout != collector.DefaultTimeout {
opts = append(opts, collector.WithTimeout(*timeout))
}
if *tls {
opts = append(opts, collector.WithTLS(*insecure))
}
return opts
}
|
[
"\"MIKROTIK_USER\"",
"\"MIKROTIK_PASSWORD\""
] |
[] |
[
"MIKROTIK_PASSWORD",
"MIKROTIK_USER"
] |
[]
|
["MIKROTIK_PASSWORD", "MIKROTIK_USER"]
|
go
| 2 | 0 | |
misc/cgo/testcarchive/carchive_test.go
|
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package carchive_test
import (
"bufio"
"bytes"
"debug/elf"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"syscall"
"testing"
"time"
"unicode"
)
// Program to run.
var bin []string
// C compiler with args (from $(go env CC) $(go env GOGCCFLAGS)).
var cc []string
// ".exe" on Windows.
var exeSuffix string
var GOOS, GOARCH, GOPATH string
var libgodir string
func TestMain(m *testing.M) {
flag.Parse()
if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" {
fmt.Printf("SKIP - short mode and $GO_BUILDER_NAME not set\n")
os.Exit(0)
}
log.SetFlags(log.Lshortfile)
os.Exit(testMain(m))
}
func testMain(m *testing.M) int {
// We need a writable GOPATH in which to run the tests.
// Construct one in a temporary directory.
var err error
GOPATH, err = ioutil.TempDir("", "carchive_test")
if err != nil {
log.Panic(err)
}
defer os.RemoveAll(GOPATH)
os.Setenv("GOPATH", GOPATH)
// Copy testdata into GOPATH/src/testarchive, along with a go.mod file
// declaring the same path.
modRoot := filepath.Join(GOPATH, "src", "testcarchive")
if err := overlayDir(modRoot, "testdata"); err != nil {
log.Panic(err)
}
if err := os.Chdir(modRoot); err != nil {
log.Panic(err)
}
os.Setenv("PWD", modRoot)
if err := ioutil.WriteFile("go.mod", []byte("module testcarchive\n"), 0666); err != nil {
log.Panic(err)
}
GOOS = goEnv("GOOS")
GOARCH = goEnv("GOARCH")
bin = cmdToRun("./testp")
ccOut := goEnv("CC")
cc = []string{string(ccOut)}
out := goEnv("GOGCCFLAGS")
quote := '\000'
start := 0
lastSpace := true
backslash := false
s := string(out)
for i, c := range s {
if quote == '\000' && unicode.IsSpace(c) {
if !lastSpace {
cc = append(cc, s[start:i])
lastSpace = true
}
} else {
if lastSpace {
start = i
lastSpace = false
}
if quote == '\000' && !backslash && (c == '"' || c == '\'') {
quote = c
backslash = false
} else if !backslash && quote == c {
quote = '\000'
} else if (quote == '\000' || quote == '"') && !backslash && c == '\\' {
backslash = true
} else {
backslash = false
}
}
}
if !lastSpace {
cc = append(cc, s[start:])
}
if GOOS == "darwin" {
// For Darwin/ARM.
// TODO(crawshaw): can we do better?
cc = append(cc, []string{"-framework", "CoreFoundation", "-framework", "Foundation"}...)
}
if GOOS == "aix" {
// -Wl,-bnoobjreorder is mandatory to keep the same layout
// in .text section.
cc = append(cc, "-Wl,-bnoobjreorder")
}
libbase := GOOS + "_" + GOARCH
if runtime.Compiler == "gccgo" {
libbase = "gccgo_" + libgodir + "_fPIC"
} else {
switch GOOS {
case "darwin":
if GOARCH == "arm" || GOARCH == "arm64" {
libbase += "_shared"
}
case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris", "illumos":
libbase += "_shared"
}
}
libgodir = filepath.Join(GOPATH, "pkg", libbase, "testcarchive")
cc = append(cc, "-I", libgodir)
if GOOS == "windows" {
exeSuffix = ".exe"
}
return m.Run()
}
func goEnv(key string) string {
out, err := exec.Command("go", "env", key).Output()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok {
fmt.Fprintf(os.Stderr, "%s", ee.Stderr)
}
log.Panicf("go env %s failed:\n%s\n", key, err)
}
return strings.TrimSpace(string(out))
}
func cmdToRun(name string) []string {
execScript := "go_" + goEnv("GOOS") + "_" + goEnv("GOARCH") + "_exec"
executor, err := exec.LookPath(execScript)
if err != nil {
return []string{name}
}
return []string{executor, name}
}
func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) {
t.Helper()
cmd := exec.Command(buildcmd[0], buildcmd[1:]...)
t.Log(buildcmd)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
defer func() {
os.Remove(libgoa)
os.Remove(libgoh)
}()
ccArgs := append(cc, "-o", exe, "main.c")
if GOOS == "windows" {
ccArgs = append(ccArgs, "main_windows.c", libgoa, "-lntdll", "-lws2_32", "-lwinmm")
} else {
ccArgs = append(ccArgs, "main_unix.c", libgoa)
}
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
t.Log(ccArgs)
if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
defer os.Remove(exe)
binArgs := append(cmdToRun(exe), "arg1", "arg2")
cmd = exec.Command(binArgs[0], binArgs[1:]...)
if runtime.Compiler == "gccgo" {
cmd.Env = append(os.Environ(), "GCCGO=1")
}
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
checkLineComments(t, libgoh)
}
var badLineRegexp = regexp.MustCompile(`(?m)^#line [0-9]+ "/.*$`)
// checkLineComments checks that the export header generated by
// -buildmode=c-archive doesn't have any absolute paths in the #line
// comments. We don't want those paths because they are unhelpful for
// the user and make the files change based on details of the location
// of GOPATH.
func checkLineComments(t *testing.T, hdrname string) {
hdr, err := ioutil.ReadFile(hdrname)
if err != nil {
if !os.IsNotExist(err) {
t.Error(err)
}
return
}
if line := badLineRegexp.Find(hdr); line != nil {
t.Errorf("bad #line directive with absolute path in %s: %q", hdrname, line)
}
}
func TestInstall(t *testing.T) {
defer os.RemoveAll(filepath.Join(GOPATH, "pkg"))
libgoa := "libgo.a"
if runtime.Compiler == "gccgo" {
libgoa = "liblibgo.a"
}
testInstall(t, "./testp1"+exeSuffix,
filepath.Join(libgodir, libgoa),
filepath.Join(libgodir, "libgo.h"),
"go", "install", "-i", "-buildmode=c-archive", "./libgo")
// Test building libgo other than installing it.
// Header files are now present.
testInstall(t, "./testp2"+exeSuffix, "libgo.a", "libgo.h",
"go", "build", "-buildmode=c-archive", filepath.Join(".", "libgo", "libgo.go"))
testInstall(t, "./testp3"+exeSuffix, "libgo.a", "libgo.h",
"go", "build", "-buildmode=c-archive", "-o", "libgo.a", "./libgo")
}
func TestEarlySignalHandler(t *testing.T) {
switch GOOS {
case "darwin":
switch GOARCH {
case "arm", "arm64":
t.Skipf("skipping on %s/%s; see https://golang.org/issue/13701", GOOS, GOARCH)
}
case "windows":
t.Skip("skipping signal test on Windows")
}
defer func() {
os.Remove("libgo2.a")
os.Remove("libgo2.h")
os.Remove("testp")
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo2.a", "./libgo2")
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
checkLineComments(t, "libgo2.h")
ccArgs := append(cc, "-o", "testp"+exeSuffix, "main2.c", "libgo2.a")
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
if out, err := exec.Command(bin[0], bin[1:]...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
}
func TestSignalForwarding(t *testing.T) {
checkSignalForwardingTest(t)
defer func() {
os.Remove("libgo2.a")
os.Remove("libgo2.h")
os.Remove("testp")
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo2.a", "./libgo2")
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
checkLineComments(t, "libgo2.h")
ccArgs := append(cc, "-o", "testp"+exeSuffix, "main5.c", "libgo2.a")
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
cmd = exec.Command(bin[0], append(bin[1:], "1")...)
out, err := cmd.CombinedOutput()
t.Logf("%s", out)
expectSignal(t, err, syscall.SIGSEGV)
// Test SIGPIPE forwarding
cmd = exec.Command(bin[0], append(bin[1:], "3")...)
out, err = cmd.CombinedOutput()
t.Logf("%s", out)
expectSignal(t, err, syscall.SIGPIPE)
}
func TestSignalForwardingExternal(t *testing.T) {
if GOOS == "freebsd" || GOOS == "aix" {
t.Skipf("skipping on %s/%s; signal always goes to the Go runtime", GOOS, GOARCH)
} else if GOOS == "darwin" && GOARCH == "amd64" {
t.Skipf("skipping on %s/%s: runtime does not permit SI_USER SIGSEGV", GOOS, GOARCH)
}
checkSignalForwardingTest(t)
defer func() {
os.Remove("libgo2.a")
os.Remove("libgo2.h")
os.Remove("testp")
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo2.a", "./libgo2")
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
checkLineComments(t, "libgo2.h")
ccArgs := append(cc, "-o", "testp"+exeSuffix, "main5.c", "libgo2.a")
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
// We want to send the process a signal and see if it dies.
// Normally the signal goes to the C thread, the Go signal
// handler picks it up, sees that it is running in a C thread,
// and the program dies. Unfortunately, occasionally the
// signal is delivered to a Go thread, which winds up
// discarding it because it was sent by another program and
// there is no Go handler for it. To avoid this, run the
// program several times in the hopes that it will eventually
// fail.
const tries = 20
for i := 0; i < tries; i++ {
cmd = exec.Command(bin[0], append(bin[1:], "2")...)
stderr, err := cmd.StderrPipe()
if err != nil {
t.Fatal(err)
}
defer stderr.Close()
r := bufio.NewReader(stderr)
err = cmd.Start()
if err != nil {
t.Fatal(err)
}
// Wait for trigger to ensure that the process is started.
ok, err := r.ReadString('\n')
// Verify trigger.
if err != nil || ok != "OK\n" {
t.Fatalf("Did not receive OK signal")
}
// Give the program a chance to enter the sleep function.
time.Sleep(time.Millisecond)
cmd.Process.Signal(syscall.SIGSEGV)
err = cmd.Wait()
if err == nil {
continue
}
if expectSignal(t, err, syscall.SIGSEGV) {
return
}
}
t.Errorf("program succeeded unexpectedly %d times", tries)
}
// checkSignalForwardingTest calls t.Skip if the SignalForwarding test
// doesn't work on this platform.
func checkSignalForwardingTest(t *testing.T) {
switch GOOS {
case "darwin":
switch GOARCH {
case "arm", "arm64":
t.Skipf("skipping on %s/%s; see https://golang.org/issue/13701", GOOS, GOARCH)
}
case "windows":
t.Skip("skipping signal test on Windows")
}
}
// expectSignal checks that err, the exit status of a test program,
// shows a failure due to a specific signal. Returns whether we found
// the expected signal.
func expectSignal(t *testing.T, err error, sig syscall.Signal) bool {
if err == nil {
t.Error("test program succeeded unexpectedly")
} else if ee, ok := err.(*exec.ExitError); !ok {
t.Errorf("error (%v) has type %T; expected exec.ExitError", err, err)
} else if ws, ok := ee.Sys().(syscall.WaitStatus); !ok {
t.Errorf("error.Sys (%v) has type %T; expected syscall.WaitStatus", ee.Sys(), ee.Sys())
} else if !ws.Signaled() || ws.Signal() != sig {
t.Errorf("got %v; expected signal %v", ee, sig)
} else {
return true
}
return false
}
func TestOsSignal(t *testing.T) {
switch GOOS {
case "windows":
t.Skip("skipping signal test on Windows")
}
defer func() {
os.Remove("libgo3.a")
os.Remove("libgo3.h")
os.Remove("testp")
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo3.a", "./libgo3")
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
checkLineComments(t, "libgo3.h")
ccArgs := append(cc, "-o", "testp"+exeSuffix, "main3.c", "libgo3.a")
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
if out, err := exec.Command(bin[0], bin[1:]...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
}
func TestSigaltstack(t *testing.T) {
switch GOOS {
case "windows":
t.Skip("skipping signal test on Windows")
}
defer func() {
os.Remove("libgo4.a")
os.Remove("libgo4.h")
os.Remove("testp")
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo4.a", "./libgo4")
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
checkLineComments(t, "libgo4.h")
ccArgs := append(cc, "-o", "testp"+exeSuffix, "main4.c", "libgo4.a")
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
if out, err := exec.Command(bin[0], bin[1:]...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
}
const testar = `#!/usr/bin/env bash
while [[ $1 == -* ]] >/dev/null; do
shift
done
echo "testar" > $1
echo "testar" > PWD/testar.ran
`
func TestExtar(t *testing.T) {
switch GOOS {
case "windows":
t.Skip("skipping signal test on Windows")
}
if runtime.Compiler == "gccgo" {
t.Skip("skipping -extar test when using gccgo")
}
if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") {
t.Skip("shell scripts are not executable on iOS hosts")
}
defer func() {
os.Remove("libgo4.a")
os.Remove("libgo4.h")
os.Remove("testar")
os.Remove("testar.ran")
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
os.Remove("testar")
dir, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
s := strings.Replace(testar, "PWD", dir, 1)
if err := ioutil.WriteFile("testar", []byte(s), 0777); err != nil {
t.Fatal(err)
}
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-ldflags=-extar="+filepath.Join(dir, "testar"), "-o", "libgo4.a", "./libgo4")
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
checkLineComments(t, "libgo4.h")
if _, err := os.Stat("testar.ran"); err != nil {
if os.IsNotExist(err) {
t.Error("testar does not exist after go build")
} else {
t.Errorf("error checking testar: %v", err)
}
}
}
func TestPIE(t *testing.T) {
switch GOOS {
case "windows", "darwin", "plan9":
t.Skipf("skipping PIE test on %s", GOOS)
}
defer func() {
os.Remove("testp" + exeSuffix)
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
cmd := exec.Command("go", "install", "-i", "-buildmode=c-archive", "./libgo")
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
libgoa := "libgo.a"
if runtime.Compiler == "gccgo" {
libgoa = "liblibgo.a"
}
ccArgs := append(cc, "-fPIE", "-pie", "-o", "testp"+exeSuffix, "main.c", "main_unix.c", filepath.Join(libgodir, libgoa))
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
binArgs := append(bin, "arg1", "arg2")
cmd = exec.Command(binArgs[0], binArgs[1:]...)
if runtime.Compiler == "gccgo" {
cmd.Env = append(os.Environ(), "GCCGO=1")
}
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
if GOOS != "aix" {
f, err := elf.Open("testp" + exeSuffix)
if err != nil {
t.Fatal("elf.Open failed: ", err)
}
defer f.Close()
if hasDynTag(t, f, elf.DT_TEXTREL) {
t.Errorf("%s has DT_TEXTREL flag", "testp"+exeSuffix)
}
}
}
func hasDynTag(t *testing.T, f *elf.File, tag elf.DynTag) bool {
ds := f.SectionByType(elf.SHT_DYNAMIC)
if ds == nil {
t.Error("no SHT_DYNAMIC section")
return false
}
d, err := ds.Data()
if err != nil {
t.Errorf("can't read SHT_DYNAMIC contents: %v", err)
return false
}
for len(d) > 0 {
var t elf.DynTag
switch f.Class {
case elf.ELFCLASS32:
t = elf.DynTag(f.ByteOrder.Uint32(d[:4]))
d = d[8:]
case elf.ELFCLASS64:
t = elf.DynTag(f.ByteOrder.Uint64(d[:8]))
d = d[16:]
}
if t == tag {
return true
}
}
return false
}
func TestSIGPROF(t *testing.T) {
switch GOOS {
case "windows", "plan9":
t.Skipf("skipping SIGPROF test on %s", GOOS)
case "darwin":
t.Skipf("skipping SIGPROF test on %s; see https://golang.org/issue/19320", GOOS)
}
t.Parallel()
defer func() {
os.Remove("testp6" + exeSuffix)
os.Remove("libgo6.a")
os.Remove("libgo6.h")
}()
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo6.a", "./libgo6")
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
checkLineComments(t, "libgo6.h")
ccArgs := append(cc, "-o", "testp6"+exeSuffix, "main6.c", "libgo6.a")
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
argv := cmdToRun("./testp6")
cmd = exec.Command(argv[0], argv[1:]...)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
}
// TestCompileWithoutShared tests that if we compile code without the
// -shared option, we can put it into an archive. When we use the go
// tool with -buildmode=c-archive, it passes -shared to the compiler,
// so we override that. The go tool doesn't work this way, but Bazel
// will likely do it in the future. And it ought to work. This test
// was added because at one time it did not work on PPC GNU/Linux.
func TestCompileWithoutShared(t *testing.T) {
// For simplicity, reuse the signal forwarding test.
checkSignalForwardingTest(t)
defer func() {
os.Remove("libgo2.a")
os.Remove("libgo2.h")
}()
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-gcflags=-shared=false", "-o", "libgo2.a", "./libgo2")
t.Log(cmd.Args)
out, err := cmd.CombinedOutput()
t.Logf("%s", out)
if err != nil {
t.Fatal(err)
}
checkLineComments(t, "libgo2.h")
exe := "./testnoshared" + exeSuffix
// In some cases, -no-pie is needed here, but not accepted everywhere. First try
// if -no-pie is accepted. See #22126.
ccArgs := append(cc, "-o", exe, "-no-pie", "main5.c", "libgo2.a")
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
t.Log(ccArgs)
out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
// If -no-pie unrecognized, try -nopie if this is possibly clang
if err != nil && bytes.Contains(out, []byte("unknown")) && !strings.Contains(cc[0], "gcc") {
ccArgs = append(cc, "-o", exe, "-nopie", "main5.c", "libgo2.a")
t.Log(ccArgs)
out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
}
// Don't use either -no-pie or -nopie
if err != nil && bytes.Contains(out, []byte("unrecognized")) {
ccArgs := append(cc, "-o", exe, "main5.c", "libgo2.a")
t.Log(ccArgs)
out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
}
t.Logf("%s", out)
if err != nil {
t.Fatal(err)
}
defer os.Remove(exe)
binArgs := append(cmdToRun(exe), "3")
t.Log(binArgs)
out, err = exec.Command(binArgs[0], binArgs[1:]...).CombinedOutput()
t.Logf("%s", out)
expectSignal(t, err, syscall.SIGPIPE)
}
// Test that installing a second time recreates the header files.
func TestCachedInstall(t *testing.T) {
defer os.RemoveAll(filepath.Join(GOPATH, "pkg"))
h1 := filepath.Join(libgodir, "libgo.h")
h2 := filepath.Join(libgodir, "p.h")
buildcmd := []string{"go", "install", "-i", "-buildmode=c-archive", "./libgo"}
cmd := exec.Command(buildcmd[0], buildcmd[1:]...)
t.Log(buildcmd)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
if _, err := os.Stat(h1); err != nil {
t.Errorf("libgo.h not installed: %v", err)
}
if _, err := os.Stat(h2); err != nil {
t.Errorf("p.h not installed: %v", err)
}
if err := os.Remove(h1); err != nil {
t.Fatal(err)
}
if err := os.Remove(h2); err != nil {
t.Fatal(err)
}
cmd = exec.Command(buildcmd[0], buildcmd[1:]...)
t.Log(buildcmd)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
if _, err := os.Stat(h1); err != nil {
t.Errorf("libgo.h not installed in second run: %v", err)
}
if _, err := os.Stat(h2); err != nil {
t.Errorf("p.h not installed in second run: %v", err)
}
}
|
[
"\"GO_BUILDER_NAME\""
] |
[] |
[
"GO_BUILDER_NAME"
] |
[]
|
["GO_BUILDER_NAME"]
|
go
| 1 | 0 | |
AdapAug/train.py
|
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).parent.parent.absolute()))
import itertools
import json
import logging
import math
import os
from collections import OrderedDict
import torch
from torch.distributions import Categorical
from torch import nn, optim
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel import DistributedDataParallel
import torch.distributed as dist
from torchvision import transforms
from tqdm import tqdm
from theconf import Config as C, ConfigArgumentParser
from AdapAug.common import get_logger, EMA, add_filehandler
from AdapAug.data import get_dataloaders, Augmentation, CutoutDefault
from AdapAug.lr_scheduler import adjust_learning_rate_resnet
from AdapAug.metrics import accuracy, Accumulator, CrossEntropyLabelSmooth, Tracker
from AdapAug.networks import get_model, num_class
from AdapAug.tf_port.rmsprop import RMSpropTF
from AdapAug.aug_mixup import CrossEntropyMixUpLabelSmooth, mixup
from warmup_scheduler import GradualWarmupScheduler
import random, copy, numpy as np
logger = get_logger('Fast AutoAugment')
logger.setLevel(logging.INFO)
_CIFAR_MEAN, _CIFAR_STD = (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
def run_epoch(model, loader, loss_fn, optimizer, desc_default='', epoch=0, writer=None, verbose=1, scheduler=None, is_master=True, ema=None, wd=0.0, tqdm_disabled=False, \
data_parallel=False, trace=False, batch_multiplier=1, get_trace=[]):
if data_parallel:
model = DataParallel(model).cuda()
if verbose:
loader = tqdm(loader, disable=tqdm_disabled)
loader.set_description('[%s %04d/%04d]' % (desc_default, epoch, C.get()['epoch']))
params_without_bn = [params for name, params in model.named_parameters() if not ('_bn' in name or '.bn' in name)]
loss_ema = None
metrics = Accumulator()
if trace or batch_multiplier > 1:
tracker = Tracker()
cnt = 0
total_steps = len(loader)
steps = 0
for data, label in loader:
steps += 1
if isinstance(data, list):
data, clean_data, log_prob, policy = data
if batch_multiplier > 1:
log_prob = torch.cat([ log_prob[:,m] for m in range(batch_multiplier) ]) # [batch, M] -> [batch*M]
policy = torch.cat([ policy[:,m] for m in range(batch_multiplier) ]) # [batch, M, n_subpolicy, n_op, 3] -> [batch*M, n_subpolicy, n_op, 3]
clean_label = label.detach()
if batch_multiplier > 1:
data = torch.cat([ data[:,m] for m in range(batch_multiplier) ])
label = label.repeat(batch_multiplier)
data, label = data.cuda(), label.cuda()
if C.get().conf.get('mixup', 0.0) <= 0.0 or optimizer is None:
preds = model(data)
loss = loss_fn(preds, label)
else: # mixup
data, targets, shuffled_targets, lam = mixup(data, label, C.get()['mixup'])
preds = model(data)
loss = loss_fn(preds, targets, shuffled_targets, lam)
del shuffled_targets, lam
if 'clean_loss' in get_trace or 'clean_logits' in get_trace:
with torch.no_grad():
clean_logits = model(clean_data.cuda())
if 'clean_loss' in get_trace:
clean_loss = loss_fn(clean_logits, clean_label.cuda()).cpu().detach()
if trace or batch_multiplier > 1:
_loss = loss.cpu().detach()
loss = loss.mean()
if optimizer:
loss += wd * (1. / 2.) * sum([torch.sum(p ** 2) for p in params_without_bn])
loss.backward()
grad_clip = C.get()['optimizer'].get('clip', 5.0)
if grad_clip > 0:
nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
optimizer.step()
optimizer.zero_grad()
if ema is not None:
ema(model, (epoch - 1) * total_steps + steps)
top1, top5 = accuracy(preds, label, (1, 5))
metrics.add_dict({
'loss': loss.item() * len(data),
'top1': top1.item() * len(data),
'top5': top5.item() * len(data),
})
cnt += len(data)
if trace:
tracker.add_dict({
'cnt': len(data),
'clean_data': (clean_data.cpu().detach(), clean_label.cpu()),
'log_probs': log_prob.cpu().detach(),
'policy': policy.cpu().detach(),
'loss': _loss,
'acc': top1.item(),
})
del log_prob, policy, _loss, clean_data, clean_label
if 'clean_loss' in get_trace:
tracker.add('clean_loss', clean_loss)
del clean_loss
if 'logits' in get_trace:
tracker.add('logits', preds.cpu().detach())
if 'clean_logits' in get_trace:
tracker.add('clean_logits', clean_logits.cpu().detach())
elif batch_multiplier > 1:
tracker.add_dict({
'cnt': len(data),
'loss': _loss,
# 'acc': top1.item(),
})
del _loss
if loss_ema:
loss_ema = loss_ema * 0.9 + loss.item() * 0.1
else:
loss_ema = loss.item()
if verbose:
postfix = metrics / cnt
if optimizer:
postfix['lr'] = optimizer.param_groups[0]['lr']
postfix['loss_ema'] = loss_ema
loader.set_postfix(postfix)
if scheduler is not None:
scheduler.step(epoch - 1 + float(steps) / total_steps)
del preds, loss, top1, top5, data, label
if tqdm_disabled and verbose:
if optimizer:
logger.info('[%s %03d/%03d] %s lr=%.6f', desc_default, epoch, C.get()['epoch'], metrics / cnt, optimizer.param_groups[0]['lr'])
else:
logger.info('[%s %03d/%03d] %s', desc_default, epoch, C.get()['epoch'], metrics / cnt)
metrics /= cnt
if optimizer:
metrics.metrics['lr'] = optimizer.param_groups[0]['lr']
if verbose:
for key, value in metrics.items():
writer.add_scalar(key, value, epoch)
if trace or batch_multiplier > 1:
return tracker, metrics
return metrics
def train_and_eval(tag, dataloaders, dataroot, test_ratio=0.0, cv_fold=0, reporter=None, metric='last', save_path=None, only_eval=False, local_rank=-1, evaluation_interval=5, reduced=False, gr_assign=None, gr_dist=None, data_parallel=False):
total_batch = C.get()["batch"]
if test_ratio == 0. and 'test_dataset' in C.get().conf:
dataset = C.get()['test_dataset']
else:
dataset = C.get()["dataset"]
if dataloaders:
trainsampler, trainloader, validloader, testloader_ = dataloaders
else:
if gr_dist is not None:
m = Categorical(gr_dist)
gr_ids = m.sample().numpy()
else:
gr_ids = None
trainsampler, trainloader, validloader, testloader_ = get_dataloaders(dataset, C.get()['batch'], dataroot, test_ratio, split_idx=cv_fold, multinode=(local_rank >= 0), gr_assign=gr_assign, gr_ids=gr_ids)
if local_rank >= 0:
dist.init_process_group(backend='nccl', init_method='env://', world_size=int(os.environ['WORLD_SIZE']))
device = torch.device('cuda', local_rank)
torch.cuda.set_device(device)
C.get()['lr'] *= dist.get_world_size()
logger.info(f'local batch={C.get()["batch"]} world_size={dist.get_world_size()} ----> total batch={C.get()["batch"] * dist.get_world_size()}')
total_batch = C.get()["batch"] * dist.get_world_size()
is_master = local_rank < 0 or dist.get_rank() == 0
if is_master:
add_filehandler(logger, save_path + '.log')
if not reporter:
reporter = lambda **kwargs: 0
max_epoch = C.get()['epoch']
# create a model & an optimizer
model = get_model(C.get()['model'], num_class(dataset), local_rank=local_rank)
model_ema = get_model(C.get()['model'], num_class(dataset), local_rank=-1)
model_ema.eval()
criterion_ce = criterion = CrossEntropyLabelSmooth(num_class(dataset), C.get().conf.get('lb_smooth', 0))
if C.get().conf.get('mixup', 0.0) > 0.0:
criterion = CrossEntropyMixUpLabelSmooth(num_class(dataset), C.get().conf.get('lb_smooth', 0))
if C.get()['optimizer']['type'] == 'sgd':
optimizer = optim.SGD(
model.parameters(),
lr=C.get()['lr'],
momentum=C.get()['optimizer'].get('momentum', 0.9),
weight_decay=0.0,
nesterov=C.get()['optimizer'].get('nesterov', True)
)
elif C.get()['optimizer']['type'] == 'rmsprop':
optimizer = RMSpropTF(
model.parameters(),
lr=C.get()['lr'],
weight_decay=0.0,
alpha=0.9, momentum=0.9,
eps=0.001
)
else:
raise ValueError('invalid optimizer type=%s' % C.get()['optimizer']['type'])
lr_scheduler_type = C.get()['lr_schedule'].get('type', 'cosine')
if lr_scheduler_type == 'cosine':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=C.get()['epoch'], eta_min=0.)
elif lr_scheduler_type == 'resnet':
scheduler = adjust_learning_rate_resnet(optimizer)
elif lr_scheduler_type == 'efficientnet':
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 0.97 ** int((x + C.get()['lr_schedule']['warmup']['epoch']) / 2.4))
else:
raise ValueError('invalid lr_schduler=%s' % lr_scheduler_type)
if C.get()['lr_schedule'].get('warmup', None) and C.get()['lr_schedule']['warmup']['epoch'] > 0:
scheduler = GradualWarmupScheduler(
optimizer,
multiplier=C.get()['lr_schedule']['warmup']['multiplier'],
total_epoch=C.get()['lr_schedule']['warmup']['epoch'],
after_scheduler=scheduler
)
if not tag or not is_master:
from AdapAug.metrics import SummaryWriterDummy as SummaryWriter
logger.warning('tag not provided, no tensorboard log.')
else:
from tensorboardX import SummaryWriter
writers = [SummaryWriter(log_dir='./logs/%s/%s' % (tag, x)) for x in ['train', 'valid', 'test']]
if C.get()['optimizer']['ema'] > 0.0 and is_master:
# https://discuss.pytorch.org/t/how-to-apply-exponential-moving-average-decay-for-variables/10856/4?u=ildoonet
ema = EMA(C.get()['optimizer']['ema'])
else:
ema = None
result = OrderedDict()
epoch_start = 1
if save_path != 'test.pth': # and is_master: --> should load all data(not able to be broadcasted)
if save_path and os.path.exists(save_path):
logger.info('%s file found. loading...' % save_path)
data = torch.load(save_path)
key = 'model' if 'model' in data else 'state_dict'
if 'epoch' not in data:
model.load_state_dict(data)
else:
logger.info('checkpoint epoch@%d' % data['epoch'])
if not isinstance(model, (DataParallel, DistributedDataParallel)):
model.load_state_dict({k.replace('module.', ''): v for k, v in data[key].items()})
else:
model.load_state_dict({k if 'module.' in k else 'module.'+k: v for k, v in data[key].items()})
logger.info('optimizer.load_state_dict+')
if 'optimizer' in data:
optimizer.load_state_dict(data['optimizer'])
if data['epoch'] < C.get()['epoch']:
epoch_start = data['epoch']
else:
only_eval = True
if ema is not None:
ema.shadow = data.get('ema', {}) if isinstance(data.get('ema', {}), dict) else data['ema'].state_dict()
del data
else:
logger.info('"%s" file not found. skip to pretrain weights...' % save_path)
if only_eval:
logger.warning('model checkpoint not found. only-evaluation mode is off.')
only_eval = False
if local_rank >= 0:
for name, x in model.state_dict().items():
dist.broadcast(x, 0)
logger.info(f'multinode init. local_rank={dist.get_rank()} is_master={is_master}')
torch.cuda.synchronize()
tqdm_disabled = bool(os.environ.get('TASK_NAME', '')) and local_rank != 0 # KakaoBrain Environment
if only_eval:
logger.info('evaluation only+')
model.eval()
rs = dict()
with torch.no_grad():
rs['train'] = run_epoch(model, trainloader, criterion, None, desc_default='train', epoch=0, writer=writers[0], is_master=is_master, data_parallel=data_parallel)
rs['valid'] = run_epoch(model, validloader, criterion, None, desc_default='valid', epoch=0, writer=writers[1], is_master=is_master, data_parallel=data_parallel)
rs['test'] = run_epoch(model, testloader_, criterion, None, desc_default='*test', epoch=0, writer=writers[2], is_master=is_master, data_parallel=data_parallel)
if ema is not None and len(ema) > 0:
model_ema.load_state_dict({k.replace('module.', ''): v for k, v in ema.state_dict().items()})
rs['valid'] = run_epoch(model_ema, validloader, criterion_ce, None, desc_default='valid(EMA)', epoch=0, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled)
rs['test'] = run_epoch(model_ema, testloader_, criterion_ce, None, desc_default='*test(EMA)', epoch=0, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled)
for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'valid', 'test']):
if setname not in rs:
continue
result['%s_%s' % (key, setname)] = rs[setname][key]
result['epoch'] = 0
return result
# train loop
best_top1 = 0
for epoch in range(epoch_start, max_epoch + 1):
if local_rank >= 0:
trainsampler.set_epoch(epoch)
model.train()
rs = dict()
rs['train'] = run_epoch(model, trainloader, criterion, optimizer, desc_default='train', epoch=epoch, writer=writers[0], verbose=is_master, scheduler=scheduler, ema=ema, wd=C.get()['optimizer']['decay'], tqdm_disabled=tqdm_disabled, data_parallel=data_parallel)
model.eval()
if math.isnan(rs['train']['loss']):
raise Exception('train loss is NaN.')
if ema is not None and C.get()['optimizer']['ema_interval'] > 0 and epoch % C.get()['optimizer']['ema_interval'] == 0:
logger.info(f'ema synced+ rank={dist.get_rank()}')
if ema is not None:
model.load_state_dict(ema.state_dict())
for name, x in model.state_dict().items():
# print(name)
dist.broadcast(x, 0)
torch.cuda.synchronize()
logger.info(f'ema synced- rank={dist.get_rank()}')
if is_master and (epoch % evaluation_interval == 0 or epoch == max_epoch):
with torch.no_grad():
rs['valid'] = run_epoch(model, validloader, criterion_ce, None, desc_default='valid', epoch=epoch, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled, data_parallel=data_parallel)
rs['test'] = run_epoch(model, testloader_, criterion_ce, None, desc_default='*test', epoch=epoch, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled, data_parallel=data_parallel)
if ema is not None:
model_ema.load_state_dict({k.replace('module.', ''): v for k, v in ema.state_dict().items()})
rs['valid'] = run_epoch(model_ema, validloader, criterion_ce, None, desc_default='valid(EMA)', epoch=epoch, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled)
rs['test'] = run_epoch(model_ema, testloader_, criterion_ce, None, desc_default='*test(EMA)', epoch=epoch, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled)
logger.info(
f'epoch={epoch} '
f'[train] loss={rs["train"]["loss"]:.4f} top1={rs["train"]["top1"]:.4f} '
f'[valid] loss={rs["valid"]["loss"]:.4f} top1={rs["valid"]["top1"]:.4f} '
f'[test] loss={rs["test"]["loss"]:.4f} top1={rs["test"]["top1"]:.4f} '
)
if metric == 'last' or rs[metric]['top1'] > best_top1:
if metric != 'last':
best_top1 = rs[metric]['top1']
for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'valid', 'test']):
result['%s_%s' % (key, setname)] = rs[setname][key]
result['epoch'] = epoch
writers[1].add_scalar('valid_top1/best', rs['valid']['top1'], epoch)
writers[2].add_scalar('test_top1/best', rs['test']['top1'], epoch)
reporter(
loss_valid=rs['valid']['loss'], top1_valid=rs['valid']['top1'],
loss_test=rs['test']['loss'], top1_test=rs['test']['top1']
)
# save checkpoint
if is_master and save_path and epoch_start != max_epoch:
logger.info('save model@%d to %s, err=%.4f' % (epoch, save_path, 1 - best_top1))
torch.save({
'epoch': epoch,
'log': {
'train': rs['train'].get_dict(),
'valid': rs['valid'].get_dict(),
'test': rs['test'].get_dict(),
},
'optimizer': optimizer.state_dict(),
'model': model.state_dict(),
'ema': ema.state_dict() if ema is not None else None,
}, save_path)
if gr_dist is not None:
gr_ids = m.sample().numpy()
trainsampler, trainloader, validloader, testloader_ = get_dataloaders(dataset, C.get()['batch'], dataroot, test_ratio, split_idx=cv_fold, multinode=(local_rank >= 0), gr_assign=gr_assign, gr_ids=gr_ids)
del model
# result['top1_test'] = best_top1
return result
if __name__ == '__main__':
parser = ConfigArgumentParser(conflict_handler='resolve')
parser.add_argument('--tag', type=str, default='')
parser.add_argument('--dataroot', type=str, default='/data/private/pretrainedmodels', help='torchvision data folder')
parser.add_argument('--save', type=str, default='test.pth')
parser.add_argument('--cv-ratio', type=float, default=0.0)
parser.add_argument('--cv', type=int, default=0)
parser.add_argument('--local_rank', type=int, default=-1)
parser.add_argument('--evaluation-interval', type=int, default=5)
parser.add_argument('--only-eval', action='store_true')
args = parser.parse_args()
assert (args.only_eval and args.save) or not args.only_eval, 'checkpoint path not provided in evaluation mode.'
if not args.only_eval:
if args.save:
logger.info('checkpoint will be saved at %s' % args.save)
else:
logger.warning('Provide --save argument to save the checkpoint. Without it, training result will not be saved!')
import time
t = time.time()
result = train_and_eval(args.tag, args.dataroot, test_ratio=args.cv_ratio, cv_fold=args.cv, save_path=args.save, only_eval=args.only_eval, local_rank=args.local_rank, metric='test', evaluation_interval=args.evaluation_interval)
elapsed = time.time() - t
logger.info('done.')
logger.info('model: %s' % C.get()['model'])
logger.info('augmentation: %s' % C.get()['aug'])
logger.info('\n' + json.dumps(result, indent=4))
logger.info('elapsed time: %.3f Hours' % (elapsed / 3600.))
logger.info('top1 error in testset: %.4f' % (1. - result['top1_test']))
logger.info(args.save)
|
[] |
[] |
[
"TASK_NAME",
"WORLD_SIZE"
] |
[]
|
["TASK_NAME", "WORLD_SIZE"]
|
python
| 2 | 0 | |
pgtype/testutil/testutil.go
|
package testutil
import (
"context"
"database/sql"
"fmt"
"os"
"reflect"
"testing"
"github.com/nuqz/pgx"
"github.com/nuqz/pgx/pgtype"
_ "github.com/nuqz/pgx/stdlib"
_ "github.com/lib/pq"
)
func MustConnectDatabaseSQL(t testing.TB, driverName string) *sql.DB {
var sqlDriverName string
switch driverName {
case "github.com/lib/pq":
sqlDriverName = "postgres"
case "github.com/nuqz/pgx/stdlib":
sqlDriverName = "pgx"
default:
t.Fatalf("Unknown driver %v", driverName)
}
db, err := sql.Open(sqlDriverName, os.Getenv("PGX_TEST_DATABASE"))
if err != nil {
t.Fatal(err)
}
return db
}
func MustConnectPgx(t testing.TB) *pgx.Conn {
config, err := pgx.ParseConnectionString(os.Getenv("PGX_TEST_DATABASE"))
if err != nil {
t.Fatal(err)
}
conn, err := pgx.Connect(config)
if err != nil {
t.Fatal(err)
}
return conn
}
func MustClose(t testing.TB, conn interface {
Close() error
}) {
err := conn.Close()
if err != nil {
t.Fatal(err)
}
}
type forceTextEncoder struct {
e pgtype.TextEncoder
}
func (f forceTextEncoder) EncodeText(ci *pgtype.ConnInfo, buf []byte) ([]byte, error) {
return f.e.EncodeText(ci, buf)
}
type forceBinaryEncoder struct {
e pgtype.BinaryEncoder
}
func (f forceBinaryEncoder) EncodeBinary(ci *pgtype.ConnInfo, buf []byte) ([]byte, error) {
return f.e.EncodeBinary(ci, buf)
}
func ForceEncoder(e interface{}, formatCode int16) interface{} {
switch formatCode {
case pgx.TextFormatCode:
if e, ok := e.(pgtype.TextEncoder); ok {
return forceTextEncoder{e: e}
}
case pgx.BinaryFormatCode:
if e, ok := e.(pgtype.BinaryEncoder); ok {
return forceBinaryEncoder{e: e.(pgtype.BinaryEncoder)}
}
}
return nil
}
func TestSuccessfulTranscode(t testing.TB, pgTypeName string, values []interface{}) {
TestSuccessfulTranscodeEqFunc(t, pgTypeName, values, func(a, b interface{}) bool {
return reflect.DeepEqual(a, b)
})
}
func TestSuccessfulTranscodeEqFunc(t testing.TB, pgTypeName string, values []interface{}, eqFunc func(a, b interface{}) bool) {
TestPgxSuccessfulTranscodeEqFunc(t, pgTypeName, values, eqFunc)
TestPgxSimpleProtocolSuccessfulTranscodeEqFunc(t, pgTypeName, values, eqFunc)
for _, driverName := range []string{"github.com/lib/pq", "github.com/nuqz/pgx/stdlib"} {
TestDatabaseSQLSuccessfulTranscodeEqFunc(t, driverName, pgTypeName, values, eqFunc)
}
}
func TestPgxSuccessfulTranscodeEqFunc(t testing.TB, pgTypeName string, values []interface{}, eqFunc func(a, b interface{}) bool) {
conn := MustConnectPgx(t)
defer MustClose(t, conn)
ps, err := conn.Prepare("test", fmt.Sprintf("select $1::%s", pgTypeName))
if err != nil {
t.Fatal(err)
}
formats := []struct {
name string
formatCode int16
}{
{name: "TextFormat", formatCode: pgx.TextFormatCode},
{name: "BinaryFormat", formatCode: pgx.BinaryFormatCode},
}
for i, v := range values {
for _, fc := range formats {
ps.FieldDescriptions[0].FormatCode = fc.formatCode
vEncoder := ForceEncoder(v, fc.formatCode)
if vEncoder == nil {
t.Logf("Skipping: %#v does not implement %v", v, fc.name)
continue
}
// Derefence value if it is a pointer
derefV := v
refVal := reflect.ValueOf(v)
if refVal.Kind() == reflect.Ptr {
derefV = refVal.Elem().Interface()
}
result := reflect.New(reflect.TypeOf(derefV))
err := conn.QueryRow("test", ForceEncoder(v, fc.formatCode)).Scan(result.Interface())
if err != nil {
t.Errorf("%v %d: %v", fc.name, i, err)
}
if !eqFunc(result.Elem().Interface(), derefV) {
t.Errorf("%v %d: expected %v, got %v", fc.name, i, derefV, result.Elem().Interface())
}
}
}
}
func TestPgxSimpleProtocolSuccessfulTranscodeEqFunc(t testing.TB, pgTypeName string, values []interface{}, eqFunc func(a, b interface{}) bool) {
conn := MustConnectPgx(t)
defer MustClose(t, conn)
for i, v := range values {
// Derefence value if it is a pointer
derefV := v
refVal := reflect.ValueOf(v)
if refVal.Kind() == reflect.Ptr {
derefV = refVal.Elem().Interface()
}
result := reflect.New(reflect.TypeOf(derefV))
err := conn.QueryRowEx(
context.Background(),
fmt.Sprintf("select ($1)::%s", pgTypeName),
&pgx.QueryExOptions{SimpleProtocol: true},
v,
).Scan(result.Interface())
if err != nil {
t.Errorf("Simple protocol %d: %v", i, err)
}
if !eqFunc(result.Elem().Interface(), derefV) {
t.Errorf("Simple protocol %d: expected %v, got %v", i, derefV, result.Elem().Interface())
}
}
}
func TestDatabaseSQLSuccessfulTranscodeEqFunc(t testing.TB, driverName, pgTypeName string, values []interface{}, eqFunc func(a, b interface{}) bool) {
conn := MustConnectDatabaseSQL(t, driverName)
defer MustClose(t, conn)
ps, err := conn.Prepare(fmt.Sprintf("select $1::%s", pgTypeName))
if err != nil {
t.Fatal(err)
}
for i, v := range values {
// Derefence value if it is a pointer
derefV := v
refVal := reflect.ValueOf(v)
if refVal.Kind() == reflect.Ptr {
derefV = refVal.Elem().Interface()
}
result := reflect.New(reflect.TypeOf(derefV))
err := ps.QueryRow(v).Scan(result.Interface())
if err != nil {
t.Errorf("%v %d: %v", driverName, i, err)
}
if !eqFunc(result.Elem().Interface(), derefV) {
t.Errorf("%v %d: expected %v, got %v", driverName, i, derefV, result.Elem().Interface())
}
}
}
type NormalizeTest struct {
SQL string
Value interface{}
}
func TestSuccessfulNormalize(t testing.TB, tests []NormalizeTest) {
TestSuccessfulNormalizeEqFunc(t, tests, func(a, b interface{}) bool {
return reflect.DeepEqual(a, b)
})
}
func TestSuccessfulNormalizeEqFunc(t testing.TB, tests []NormalizeTest, eqFunc func(a, b interface{}) bool) {
TestPgxSuccessfulNormalizeEqFunc(t, tests, eqFunc)
for _, driverName := range []string{"github.com/lib/pq", "github.com/nuqz/pgx/stdlib"} {
TestDatabaseSQLSuccessfulNormalizeEqFunc(t, driverName, tests, eqFunc)
}
}
func TestPgxSuccessfulNormalizeEqFunc(t testing.TB, tests []NormalizeTest, eqFunc func(a, b interface{}) bool) {
conn := MustConnectPgx(t)
defer MustClose(t, conn)
formats := []struct {
name string
formatCode int16
}{
{name: "TextFormat", formatCode: pgx.TextFormatCode},
{name: "BinaryFormat", formatCode: pgx.BinaryFormatCode},
}
for i, tt := range tests {
for _, fc := range formats {
psName := fmt.Sprintf("test%d", i)
ps, err := conn.Prepare(psName, tt.SQL)
if err != nil {
t.Fatal(err)
}
ps.FieldDescriptions[0].FormatCode = fc.formatCode
if ForceEncoder(tt.Value, fc.formatCode) == nil {
t.Logf("Skipping: %#v does not implement %v", tt.Value, fc.name)
continue
}
// Derefence value if it is a pointer
derefV := tt.Value
refVal := reflect.ValueOf(tt.Value)
if refVal.Kind() == reflect.Ptr {
derefV = refVal.Elem().Interface()
}
result := reflect.New(reflect.TypeOf(derefV))
err = conn.QueryRow(psName).Scan(result.Interface())
if err != nil {
t.Errorf("%v %d: %v", fc.name, i, err)
}
if !eqFunc(result.Elem().Interface(), derefV) {
t.Errorf("%v %d: expected %v, got %v", fc.name, i, derefV, result.Elem().Interface())
}
}
}
}
func TestDatabaseSQLSuccessfulNormalizeEqFunc(t testing.TB, driverName string, tests []NormalizeTest, eqFunc func(a, b interface{}) bool) {
conn := MustConnectDatabaseSQL(t, driverName)
defer MustClose(t, conn)
for i, tt := range tests {
ps, err := conn.Prepare(tt.SQL)
if err != nil {
t.Errorf("%d. %v", i, err)
continue
}
// Derefence value if it is a pointer
derefV := tt.Value
refVal := reflect.ValueOf(tt.Value)
if refVal.Kind() == reflect.Ptr {
derefV = refVal.Elem().Interface()
}
result := reflect.New(reflect.TypeOf(derefV))
err = ps.QueryRow().Scan(result.Interface())
if err != nil {
t.Errorf("%v %d: %v", driverName, i, err)
}
if !eqFunc(result.Elem().Interface(), derefV) {
t.Errorf("%v %d: expected %v, got %v", driverName, i, derefV, result.Elem().Interface())
}
}
}
|
[
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\""
] |
[] |
[
"PGX_TEST_DATABASE"
] |
[]
|
["PGX_TEST_DATABASE"]
|
go
| 1 | 0 | |
setupext/platform.py
|
# -*- coding: utf-8 -*-
# *****************************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See NOTICE file for details.
#
# *****************************************************************************
import setupext
import os
import sys
import sysconfig
import distutils.log
# This handles all of the work to make our platform specific extension options.
def Platform(include_dirs=None, sources=None, platform=sys.platform):
if include_dirs is None:
include_dirs = []
if sources is None:
sources = []
platform_specific = {
'include_dirs': include_dirs,
'sources': setupext.utils.find_sources(sources),
}
fallback_jni = os.path.join('native', 'jni_include')
# try to include JNI first from eventually given JAVA_HOME, then from distributed
java_home = os.getenv('JAVA_HOME', '')
found_jni = False
if os.path.exists(java_home):
platform_specific['include_dirs'] += [os.path.join(java_home, 'include')]
# check if jni.h can be found
for d in platform_specific['include_dirs']:
if os.path.exists(os.path.join(str(d), 'jni.h')):
distutils.log.info("Found native jni.h at %s", d)
found_jni = True
break
if not found_jni:
distutils.log.warn('Falling back to provided JNI headers, since your provided'
' JAVA_HOME "%s" does not provide jni.h', java_home)
if not found_jni:
platform_specific['include_dirs'] += [fallback_jni]
platform_specific['extra_link_args'] = []
distutils.log.info("Configure platform to", platform)
static = True
if platform == 'win32':
distutils.log.info("Add windows settings")
# platform_specific['libraries'] = ['Advapi32']
platform_specific['define_macros'] = [('WIN32', 1)]
if sys.version > '3':
platform_specific['extra_compile_args'] = [
'/Zi', '/EHsc', '/std:c++14']
else:
platform_specific['extra_compile_args'] = ['/Zi', '/EHsc']
# platform_specific['extra_link_args'] = ['/DEBUG']
jni_md_platform = 'win32'
elif platform == 'darwin':
distutils.log.info("Add darwin settings")
platform_specific['libraries'] = ['dl']
platform_specific['define_macros'] = [('MACOSX', 1)]
platform_specific['extra_compile_args'] = ['-g0', '-std=c++11', '-O2']
jni_md_platform = 'darwin'
elif platform.startswith('linux'):
distutils.log.info("Add linux settings")
platform_specific['libraries'] = ['dl']
platform_specific['extra_compile_args'] = ['-g0', '-std=c++11', '-O2']
jni_md_platform = 'linux'
elif platform.startswith('aix7'):
distutils.log.info("Add aix settings")
platform_specific['libraries'] = ['dl']
platform_specific['extra_compile_args'] = ['-g3', '-std=c++11', '-O2']
jni_md_platform = 'aix7'
elif platform.startswith('freebsd'):
distutils.log.info("Add freebsd settings")
jni_md_platform = 'freebsd'
elif platform.startswith('android'):
distutils.log.info("Add android settings")
platform_specific['libraries'] = ['dl', 'c++_shared', 'SDL2']
platform_specific['extra_compile_args'] = ['-g0', '-std=c++11', '-fexceptions', '-frtti', '-O2']
print("PLATFORM_SPECIFIC:", platform_specific)
jni_md_platform = 'linux'
static = False
elif platform == 'zos':
distutils.log.info("Add zos settings")
jni_md_platform = 'zos'
else:
jni_md_platform = None
distutils.log.warn("Your platform '%s' is not being handled explicitly."
" It may work or not!", platform)
# This code is used to include python library in the build when starting Python from
# within Java. It will be used in the future, but is not currently required.
# if static and sysconfig.get_config_var('BLDLIBRARY') is not None:
# platform_specific['extra_link_args'].append(sysconfig.get_config_var('BLDLIBRARY'))
if found_jni:
distutils.log.info("Add JNI directory %s" % os.path.join(java_home, 'include', jni_md_platform))
platform_specific['include_dirs'] += \
[os.path.join(java_home, 'include', jni_md_platform)]
return platform_specific
# include this stolen from FindJNI.cmake
"""
FIND_PATH(JAVA_INCLUDE_PATH2 jni_md.h
${JAVA_INCLUDE_PATH}
${JAVA_INCLUDE_PATH}/win32
${JAVA_INCLUDE_PATH}/linux
${JAVA_INCLUDE_PATH}/freebsd
${JAVA_INCLUDE_PATH}/solaris
${JAVA_INCLUDE_PATH}/hp-ux
${JAVA_INCLUDE_PATH}/alpha
)"""
|
[] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
python
| 1 | 0 | |
src/xSACdb/settings/common.py
|
"""All settings common to debug and production"""
import os
from sys import path
# Staging tag, when DEBUG is false and this true allows some unsafe behaviour.
STAGING = False
FAKER_LOCALE = 'en_GB'
RANDOM_SEED = 'The quick brown fox jumped over the lazy ocean diver'
# Make HTTPResponse do unicode
DEFAULT_CHARSET = 'utf-8'
ADMIN_MEDIA_PREFIX = ''
# Define project paths
PROJECT_PATH = os.path.join(os.path.dirname(__file__), '../../..')
SRC_PATH = os.path.join(PROJECT_PATH, 'src')
ASSETS_PATH = os.path.join(PROJECT_PATH, 'assets')
DIST_PATH = os.path.join(PROJECT_PATH, 'dist')
TMP_PATH = os.path.join(PROJECT_PATH, 'tmp')
CONF_PATH = os.path.join(PROJECT_PATH, 'conf')
# Add config dir to path
path.append(CONF_PATH)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
LOGIN_URL = '/accounts/login/'
LOGIN_EXEMPT_URLS = (
# r'^media/', # allow any URL under /media/* This has facebook avatars, so NO!
r'^static/', # allow any URL under /static/*
r'^facebook/', # allow any URL under /facebook/*
r'^accounts/',
r'^hijack/', # have their own protection
r'^health/', # Needs to be publicly accessible
r'^favicon.ico$',
r'^manifest.json$',
r'^inspect.json', # Uses an API key
)
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_PATH, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Media files (css, images etc) for development server
STATIC_DOC_ROOT = os.path.join(DIST_PATH, 'static')
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(DIST_PATH, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(CONF_PATH, 'static'),
os.path.join(DIST_PATH, 'webpack'),
ASSETS_PATH,
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
)
# Caching for Django Whitenoise
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# HACK: WN doesn't seem to pick up dj compressor files as forever-cacheable
# TODO: Is this needed anymore?
WHITENOISE_MAX_AGE = 315360000
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': False, # set to true in production.py
'BUNDLE_DIR_NAME': '/', # must end with slash
'STATS_FILE': os.path.join(DIST_PATH, 'webpack-stats.json'),
'POLL_INTERVAL': 0.1,
'TIMEOUT': 20,
'IGNORE': [r'.+\.hot-update.js', r'.+\.map']
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [
os.path.join(CONF_PATH, 'templates'),
os.path.join(SRC_PATH, 'templates_global'),
],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'xSACdb.context_processors.xsd_vars',
],
},
},
]
MIDDLEWARE = (
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'xSACdb.middleware.LoginRequiredMiddleware',
'xSACdb.middleware.NewbieProfileFormRedirectMiddleware',
)
ROOT_URLCONF = 'xSACdb.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'xSACdb.wsgi.application'
AUTHENTICATION_BACKENDS = (
# 'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
FIXTURE_DIRS = (
os.path.join(SRC_PATH, 'xSACdb', 'fixtures'),
)
INSTALLED_APPS = (
'redis_cache', # https://github.com/sebleier/django-redis-cache
'django_rq', # https://github.com/ui/django-rq
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.humanize',
'xsd_auth.apps.AuthConfig',
'xsd_frontend.apps.FrontendConfig',
'xsd_members.apps.MembersConfig',
'xsd_training.apps.TrainingConfig',
'xsd_trips.apps.TripsConfig',
'xsd_sites.apps.SitesConfig',
'xsd_kit',
'xsd_about',
'xsd_help',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'bootstrap3',
'widget_tweaks',
'geoposition',
'reversion', # https://github.com/etianen/django-reversion
'reversion_compare', # https://github.com/jedie/django-reversion-compare
# Must be after apps creating activities
'actstream', # https://github.com/justquick/django-activity-stream
'hijack',
'compat',
'health_check',
# 'health_check_celery',
'health_check.db',
'health_check.cache',
'health_check.storage',
'webpack_loader',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake'
}
}
AUTH_USER_MODEL = 'xsd_auth.User'
USER_MODEL = AUTH_USER_MODEL
AUTH_PROFILE_MODEL = 'xsd_members.MemberProfile'
LOGIN_REDIRECT_URL = '/'
ACCOUNT_LOGOUT_REDIRECT_URL = '/accounts/login'
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7
ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_USER_DISPLAY = 'xsd_auth.utils.get_user_display'
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
# FIXME: Deprecated - use Django's AUTH_PASSWORD_VALIDATORS instead. (NEED DJ >=1.9)
ACCOUNT_PASSWORD_MIN_LENGTH = 8
SOCIALACCOUNT_FORMS = {
'signup': 'xsd_auth.forms.SignupForm'
}
TEST_FIXTURES = [
os.path.join(TMP_PATH, 'bsac_data.yaml'),
'groups',
'socialapp-test',
]
HIJACK_NOTIFY_USER = True
HIJACK_DISPLAY_ADMIN_BUTTON = False
ACTSTREAM_SETTINGS = {
# 'MANAGER': 'myapp.managers.MyActionManager',
'FETCH_RELATIONS': True,
'USE_JSONFIELD': True,
}
PAGINATE_BY = 20
# Browser config
BROWSER_THEME_COLOUR = "#171f26"
# Inspect API
INSPECT_API_KEY_HASH = "033be85008caa4f26e04df5da463ee92218e4ff3bbf6565b4cee51abb0b0e973"
INSPECT_API_KEY_SALT = "EMQ2b6iDt96N"
if os.environ.get('XSACDB_XMLTESTRUNNER'):
TEST_RUNNER = 'xmlrunner.extra.djangotestrunner.XMLTestRunner'
TEST_OUTPUT_FILE_NAME = 'junit-django.xml'
BASE_DIR = 'D:/Workspace/xSACdb/src/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
[] |
[] |
[
"XSACDB_XMLTESTRUNNER"
] |
[]
|
["XSACDB_XMLTESTRUNNER"]
|
python
| 1 | 0 | |
tests/test_phockup.py
|
#!/usr/bin/env python3
import logging
import os
import shutil
import sys
from datetime import datetime
import pytest
from src.dependency import check_dependencies
from src.exif import Exif
from src.phockup import Phockup
os.chdir(os.path.dirname(__file__))
def test_check_dependencies(mocker):
mocker.patch('shutil.which', return_value='exiftool')
mocker.patch('sys.exit')
check_dependencies()
assert not sys.exit.called
def test_check_dependencies_missing(mocker):
mocker.patch('shutil.which', return_value=None)
mocker.patch('sys.exit')
with pytest.raises(Exception, match="Exiftool is not installed. \
Visit http://www.sno.phy.queensu.ca/~phil/exiftool/"):
check_dependencies()
def test_exception_if_missing_input_directory(mocker):
mocker.patch('os.makedirs')
mocker.patch('sys.exit')
with pytest.raises(RuntimeError, match="Input directory 'in' does not exist"):
Phockup('in', 'out')
def test_exception_if_input_not_directory(mocker):
mocker.patch('os.makedirs')
mocker.patch('sys.exit')
with pytest.raises(RuntimeError, match="Input directory 'input/exif.jpg' is not a directory"):
Phockup('input/exif.jpg', 'out')
def test_removing_trailing_slash_for_input_output(mocker):
mocker.patch('os.makedirs')
mocker.patch('sys.exit')
mocker.patch.object(Phockup, 'check_directories')
if sys.platform == 'win32':
phockup = Phockup('in\\', 'out\\')
else:
phockup = Phockup('in/', 'out/')
assert phockup.input_dir == 'in'
assert phockup.output_dir == 'out'
def test_exception_for_no_write_access_when_creating_output_dir(mocker):
mocker.patch.object(Phockup, 'walk_directory')
if sys.platform == 'win32':
protected_dir = f"{os.getenv('WINDIR')}/phockup"
else:
protected_dir = '/root/phockup'
with pytest.raises(OSError, match="Cannot create output.*"):
Phockup('input', protected_dir)
def test_walking_directory():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output')
validate_copy_operation()
shutil.rmtree('output', ignore_errors=True)
def test_dry_run():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output', dry_run=True)
assert not os.path.isdir('output')
dir1 = 'output/2017/01/01'
dir2 = 'output/2017/10/06'
dir3 = 'output/unknown'
dir4 = 'output/2018/01/01/'
assert not os.path.isdir(dir1)
assert not os.path.isdir(dir2)
assert not os.path.isdir(dir3)
assert not os.path.isdir(dir4)
def test_progress():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output', progress=True)
dir1 = 'output/2017/01/01'
dir2 = 'output/2017/10/06'
dir3 = 'output/unknown'
dir4 = 'output/2018/01/01/'
assert os.path.isdir(dir1)
assert os.path.isdir(dir2)
assert os.path.isdir(dir3)
assert os.path.isdir(dir4)
assert len([name for name in os.listdir(dir1) if
os.path.isfile(os.path.join(dir1, name))]) == 3
assert len([name for name in os.listdir(dir2) if
os.path.isfile(os.path.join(dir2, name))]) == 1
assert len([name for name in os.listdir(dir3) if
os.path.isfile(os.path.join(dir3, name))]) == 1
assert len([name for name in os.listdir(dir4) if
os.path.isfile(os.path.join(dir4, name))]) == 1
shutil.rmtree('output', ignore_errors=True)
def test_get_file_type(mocker):
mocker.patch.object(Phockup, 'check_directories')
assert Phockup('in', '.').get_file_type("image/jpeg")
assert Phockup('in', '.').get_file_type("video/mp4")
assert not Phockup('in', '.').get_file_type("foo/bar")
def test_get_file_name(mocker):
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
date = {
"date": datetime(2017, 1, 1, 1, 1, 1),
"subseconds": "20"
}
assert Phockup('in', 'out').get_file_name("Bar/Foo.jpg", date) == \
"20170101-01010120.jpg"
def test_get_file_name_is_original_on_exception(mocker):
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
assert Phockup('in', 'out').get_file_name("Bar/Foo.jpg", None) == "Foo.jpg"
def test_process_file_with_filename_date(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg"
}
Phockup('input', 'output').process_file("input/date_20170101_010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_link_to_file_with_filename_date(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file(
"input/link_to_date_20170101_010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_broken_link(mocker, caplog):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
with caplog.at_level(logging.WARNING):
Phockup('input', 'output').process_file("input/not_a_file.jpg")
assert 'skipped, no such file or directory' in caplog.text
shutil.rmtree('output', ignore_errors=True)
def test_process_broken_link_move(mocker, caplog):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
phockup = Phockup('input', 'output', move=True)
phockup.process_file("input/not_a_file.jpg")
with caplog.at_level(logging.WARNING):
Phockup('input', 'output').process_file("input/not_a_file.jpg")
assert 'skipped, no such file or directory' in caplog.text
shutil.rmtree('output', ignore_errors=True)
def test_process_image_exif_date(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/exif.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_image_xmp(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/xmp.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg.xmp")
shutil.rmtree('output', ignore_errors=True)
def test_process_image_xmp_noext(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/xmp_noext.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.xmp")
shutil.rmtree('output', ignore_errors=True)
def test_process_image_xmp_ext_and_noext(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/xmp_ext.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.xmp")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg.xmp")
shutil.rmtree('output', ignore_errors=True)
def test_process_image_unknown(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg"
}
Phockup('input', 'output').process_file("input/UNKNOWN.jpg")
assert os.path.isfile("output/unknown/unknown.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_other(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/other.txt")
assert os.path.isfile("output/unknown/other.txt")
shutil.rmtree('output', ignore_errors=True)
def test_process_move(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg"
}
phockup = Phockup('input', 'output', move=True)
open("input/tmp_20170101_010101.jpg", "w").close()
open("input/tmp_20170101_010101.xmp", "w").close()
phockup.process_file("input/tmp_20170101_010101.jpg")
phockup.process_file("input/tmp_20170101_010101.xmp")
assert not os.path.isfile("input/tmp_20170101_010101.jpg")
assert not os.path.isfile("input/tmp_20170101_010101.xmp")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.xmp")
shutil.rmtree('output', ignore_errors=True)
def test_process_link(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg"
}
phockup = Phockup('input', 'output', link=True)
open("input/tmp_20170101_010101.jpg", "w").close()
open("input/tmp_20170101_010101.xmp", "w").close()
phockup.process_file("input/tmp_20170101_010101.jpg")
phockup.process_file("input/tmp_20170101_010101.xmp")
assert os.path.isfile("input/tmp_20170101_010101.jpg")
assert os.path.isfile("input/tmp_20170101_010101.xmp")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.xmp")
shutil.rmtree('output', ignore_errors=True)
os.remove("input/tmp_20170101_010101.jpg")
os.remove("input/tmp_20170101_010101.xmp")
def test_process_exists_same(mocker, caplog):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
phockup = Phockup('input', 'output')
phockup.process_file("input/exif.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
with caplog.at_level(logging.INFO):
phockup.process_file("input/exif.jpg")
assert 'skipped, duplicated file' in caplog.text
shutil.rmtree('output', ignore_errors=True)
def test_process_same_date_different_files_rename(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
phockup = Phockup('input', 'output')
phockup.process_file("input/exif.jpg")
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg",
"CreateDate": "2017:01:01 01:01:01"
}
phockup.process_file("input/date_20170101_010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101-2.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_skip_xmp(mocker):
# Assume no errors == skip XMP file
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
phockup = Phockup('input', 'output')
phockup.process_file("skip.xmp")
def test_process_skip_ignored_file():
shutil.rmtree('output', ignore_errors=True)
shutil.rmtree('input_ignored', ignore_errors=True)
os.mkdir('input_ignored')
open("input_ignored/.DS_Store", "w").close()
Phockup('input_ignored', 'output')
assert not os.path.isfile("output/unknown/.DS_Store")
shutil.rmtree('output', ignore_errors=True)
shutil.rmtree('input_ignored', ignore_errors=True)
def test_keep_original_filenames(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output', original_filenames=True).process_file(
"input/exif.jpg")
assert os.path.isfile("output/2017/01/01/exif.jpg")
assert not os.path.isfile("output/2017/01/01/20170101-010101.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_keep_original_filenames_and_filenames_case(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output', original_filenames=True).process_file(
"input/UNKNOWN.jpg")
assert os.path.isfile("output/2017/10/06/UNKNOWN.jpg")
assert 'unknown.jpg' not in os.listdir("output/2017/10/06")
shutil.rmtree('output', ignore_errors=True)
def test_maxdepth_zero():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output', maxdepth=0)
dir1 = 'output/2017/01/01'
dir2 = 'output/2017/10/06'
dir3 = 'output/unknown'
assert os.path.isdir(dir1)
assert os.path.isdir(dir2)
assert os.path.isdir(dir3)
assert len([name for name in os.listdir(dir1) if
os.path.isfile(os.path.join(dir1, name))]) == 3
assert len([name for name in os.listdir(dir2) if
os.path.isfile(os.path.join(dir2, name))]) == 1
assert len([name for name in os.listdir(dir3) if
os.path.isfile(os.path.join(dir3, name))]) == 1
shutil.rmtree('output', ignore_errors=True)
def test_maxdepth_one():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output', maxdepth=1)
validate_copy_operation()
shutil.rmtree('output', ignore_errors=True)
def test_maxconcurrency_none():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output', max_concurrency=0)
validate_copy_operation()
shutil.rmtree('output', ignore_errors=True)
def test_maxconcurrency_five():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output', max_concurrency=5)
validate_copy_operation()
shutil.rmtree('output', ignore_errors=True)
def validate_copy_operation():
dir1 = 'output/2017/01/01'
dir2 = 'output/2017/10/06'
dir3 = 'output/unknown'
dir4 = 'output/2018/01/01/'
assert os.path.isdir(dir1)
assert os.path.isdir(dir2)
assert os.path.isdir(dir3)
assert os.path.isdir(dir4)
assert len([name for name in os.listdir(dir1) if
os.path.isfile(os.path.join(dir1, name))]) == 3
assert len([name for name in os.listdir(dir2) if
os.path.isfile(os.path.join(dir2, name))]) == 1
assert len([name for name in os.listdir(dir3) if
os.path.isfile(os.path.join(dir3, name))]) == 1
assert len([name for name in os.listdir(dir4) if
os.path.isfile(os.path.join(dir4, name))]) == 1
|
[] |
[] |
[
"WINDIR"
] |
[]
|
["WINDIR"]
|
python
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.