filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
profiles_project/settings.py | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qzvk%vwkxut!srui*&lhb6j^mx)@i(fs*ijz3+_h!!^aq^hcja'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 1)))
#DEBUG = True
ALLOWED_HOSTS = [
'ec2-18-190-26-30.us-east-2.compute.amazonaws.com',
'127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
STATIC_ROOT = 'static/'
| []
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | python | 1 | 0 | |
SAW/proof/SHA512/SHA512-384-check-entrypoint.go | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package main
import (
utility "aws-lc-verification/proof/common"
"log"
"os"
"sync"
)
const sha_process_limit int = 15
func main() {
log.Printf("Started SHA512-384 check.")
// When 'SHA512_384_SELECTCHECK' is undefined, quickcheck is executed.
env_var := os.Getenv("SHA512_384_SELECTCHECK")
if len(env_var) == 0 {
utility.RunSawScript("verify-SHA512-384-quickcheck.saw")
return
}
// When 'SHA512_384_SELECTCHECK' is defined, formal verification is executed with all `len` given a 'num'.
// Due to memory usage (each select_check takes 8GB memory) and limit of container size (largest one has 145GB memory),
// not all nums are used to run formal verification. Only below nums are selected.
target_nums := []int{0, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 127}
// Generate saw scripts based on above verification template and target num ranges.
var wg sync.WaitGroup
process_count := 0
for _, num := range target_nums {
wg.Add(1)
saw_template := "verify-SHA512-384-selectcheck-template.txt"
placeholder_name := "TARGET_NUM_PLACEHOLDER"
go utility.CreateAndRunSawScript(saw_template, placeholder_name, num, &wg)
utility.Wait(&process_count, sha_process_limit, &wg)
}
wg.Wait()
log.Printf("Completed SHA512-384 check.")
}
| [
"\"SHA512_384_SELECTCHECK\""
]
| []
| [
"SHA512_384_SELECTCHECK"
]
| [] | ["SHA512_384_SELECTCHECK"] | go | 1 | 0 | |
tests/test_zdo2021.py | import pytest
import os
import skimage.io
import glob
import numpy as np
import sklearn.metrics
import matplotlib.pyplot as plt
import json
from pathlib import Path
from skimage.draw import polygon
import zdo2021.main
# cd ZDO2021
# python -m pytest
def test_run_random():
vdd = zdo2021.main.VarroaDetector()
# Nastavte si v operačním systém proměnnou prostředí 'VARROA_DATA_PATH' s cestou k datasetu.
# Pokud není nastavena, využívá se testovací dataset tests/test_dataset
dataset_path = os.getenv('VARROA_DATA_PATH_', default=Path(__file__).parent / 'test_dataset/')
# print(f'dataset_path = {dataset_path}')
files = glob.glob(f'{dataset_path}/images/*.jpg')
cislo_obrazku = np.random.randint(0, len(files))
filename = files[cislo_obrazku]
im = skimage.io.imread(filename)
imgs = np.expand_dims(im, axis=0)
# print(f"imgs.shape={imgs.shape}")
prediction = vdd.predict(imgs)
assert prediction.shape[0] == imgs.shape[0]
# Toto se bude spouštět všude mimo GitHub
if not os.getenv('CI'):
ann_pth = Path(dataset_path)/"annotations/instances_default.json"
assert ann_pth.exists()
# gt_ann = json.loads(str(ann_pth))
with open(ann_pth, 'r') as infile:
gt_ann = json.load(infile)
ground_true_mask = prepare_ground_true_mask(gt_ann, filename)
plt.imshow(prediction[0], cmap='gray')
plt.contour(ground_true_mask)
plt.show()
f1 = f1score(ground_true_mask, prediction)
print(f"f1score={f1}")
# assert f1 > 0.55
def test_run_all():
vdd = zdo2021.main.VarroaDetector()
# Nastavte si v operačním systém proměnnou prostředí 'VARROA_DATA_PATH' s cestou k datasetu.
# Pokud není nastavena, využívá se testovací dataset tests/test_dataset
dataset_path = os.getenv('VARROA_DATA_PATH_', default=Path(__file__).parent / 'test_dataset/')
# dataset_path = Path(r"H:\biology\orig\zdo_varroa_detection_coco_001")
# print(f'dataset_path = {dataset_path}')
files = glob.glob(f'{dataset_path}/images/*.jpg')
f1s = []
for filename in files:
im = skimage.io.imread(filename)
imgs = np.expand_dims(im, axis=0)
# print(f"imgs.shape={imgs.shape}")
prediction = vdd.predict(imgs)
ann_pth = Path(dataset_path)/"annotations/instances_default.json"
assert ann_pth.exists()
# gt_ann = json.loads(str(ann_pth))
with open(ann_pth, 'r') as infile:
gt_ann = json.load(infile)
ground_true_mask = prepare_ground_true_mask(gt_ann, filename)
# plt.imshow(prediction[0])
# plt.contour(ground_true_mask)
# plt.show()
f1i = f1score(ground_true_mask, prediction)
# assert f1i > 0.55
f1s.append(f1i)
f1 = np.mean(f1s)
print(f"f1score={f1}")
print(f1s)
# assert f1 > 0.55
def f1score(ground_true_mask:np.ndarray, prediction:np.ndarray):
"""
Measure f1 score for one image
:param ground_true_mask:
:param prediction:
:return:
"""
f1 = sklearn.metrics.f1_score(ground_true_mask.astype(bool).flatten(), prediction.astype(bool).flatten())
return f1
def prepare_ground_true_mask(gt_ann, filename):
name = None
for ann_im in gt_ann['images']:
if ann_im["file_name"] == Path(filename).name:
# mask = np.zeros([], dtype=bool)
M = np.zeros((ann_im["width"], ann_im["height"]), dtype=bool)
immage_id = ann_im["id"]
for ann in gt_ann['annotations']:
if ann["image_id"] == immage_id:
S = ann['segmentation']
for s in S:
N = len(s)
rr, cc = polygon(np.array(s[0:N:2]), np.array(s[1:N:2])) # (y, x)
M[rr, cc] = True
return M | []
| []
| [
"CI",
"VARROA_DATA_PATH_"
]
| [] | ["CI", "VARROA_DATA_PATH_"] | python | 2 | 0 | |
covidframe/config/__init__.py | import os
from dotenv import load_dotenv
load_dotenv()
paths = {
"datasets_base_path": os.getenv("DATASETS_BASE_PATH", "."),
"output_folder": os.getenv("OUTPUT_FOLDER", ".")
}
categories = [
"pneumonia",
"normal",
"covid-19"
]
databases = [
{
"type": "metadata",
"source": "github",
"folder_name": "covid-chestxray-dataset",
"image_relative_path": "images",
"type_own": {
"metadata_file": "metadata.csv",
"selection": [{
"column": "view",
"values": ["PA",
"AP",
"AP Supine",
"AP semi erect",
"AP erect"]
}],
"column_category": "finding",
"column_file_names": "filename",
"column_id": "patientid",
"mappings": {
'Pneumonia/Viral/COVID-19': "covid-19",
'Pneumonia': "pneumonia",
'Pneumonia/Viral/SARS': "pneumonia",
'Pneumonia/Bacterial/Streptococcus': "pneumonia",
'No Finding': "normal",
'Pneumonia/Bacterial/Chlamydophila': "pneumonia",
'Pneumonia/Bacterial/E.Coli': "pneumonia",
'Pneumonia/Bacterial/Klebsiella': "pneumonia",
'Pneumonia/Bacterial/Legionella': "pneumonia",
'Pneumonia/Viral/Varicella': "pneumonia",
'Pneumonia/Bacterial': "pneumonia",
'Pneumonia/Bacterial/Mycoplasma': "pneumonia",
'Pneumonia/Viral/Influenza': "pneumonia",
'Tuberculosis': "pneumonia",
'Pneumonia/Viral/Influenza/H1N1': "pneumonia",
'Pneumonia/Aspiration': "pneumonia",
'Pneumonia/Bacterial/Nocardia': "pneumonia",
'Pneumonia/Viral/MERS-CoV': "pneumonia",
'Pneumonia/Bacterial/Staphylococcus/MRSA': "pneumonia",
}},
},
{
"type": "metadata",
"source": "github",
"folder_name": "Actualmed-COVID-chestxray-dataset",
"image_relative_path": "images",
"type_own": {
"metadata_file": "metadata.csv",
"column_category": "finding",
"column_file_names": "imagename",
"column_id": "patientid",
"mappings": {
"No finding": "normal",
"COVID-19": "covid-19",
"Pneumonia": "pneumonia"
}
}},
{
"type": "metadata",
"source": "github",
"folder_name": "Figure1-COVID-chestxray-dataset",
"image_relative_path": "images",
"type_own": {
"metadata_file": "metadata.csv",
"file_encoding": "ISO-8859-1",
"column_category": "finding",
"column_file_names": "patientid",
"column_id": "patientid",
"include_suffix": False,
"mappings": {
"No finding": "normal",
"COVID-19": "covid-19",
"Pneumonia": "pneumonia"
}
}
},
{
"type": "folders",
"source": "kaggle",
"folder_name": "Covid19-dataset",
"image_relative_path": "",
"type_own": {
"sub_dirs": ["test",
"train"],
"mappings": {
"Covid": "covid-19",
"Normal": "normal",
"Viral Pneumonia": "pneumonia"
},
},
},
{
"type": "folders-metadata",
"source": "kaggle",
"folder_name": "COVID-19_Radiography_Dataset",
"image_relative_path": "",
"type_own": {
"sub_dirs": [],
"mappings": {
"COVID": "covid-19",
"Normal": "normal",
"Viral Pneumonia": "pneumonia",
"Lung_Opacity": "pneumonia"
},
"metadata_files": {
"COVID": "COVID.metadata.xlsx",
"Normal": "Normal.metadata.xlsx",
"Viral Pneumonia": "Viral Pneumonia.metadata.xlsx",
"Lung_Opacity": "Lung_Opacity.metadata.xlsx"
},
}
},
]
database_types = [
"metadata",
"folders",
"folders-metadata"
]
image_extensions = [
".png", ".jpg", ".jpeg", ".tiff", ".bmp", '.PNG', '.JPG', '.JPEG'
]
cnn = [
{
"name": "COVID-Net",
"mappings": {
"covid-19": "COVID-19",
"normal": "normal",
"pneumonia": "pneumonia",
},
}
]
| []
| []
| [
"OUTPUT_FOLDER",
"DATASETS_BASE_PATH"
]
| [] | ["OUTPUT_FOLDER", "DATASETS_BASE_PATH"] | python | 2 | 0 | |
framework/main.py | import os
import signal
import pprint as pp
import time
from urllib.parse import quote
from requests.exceptions import ConnectionError
from argparse import ArgumentParser
from rlockertools.resourcelocker import ResourceLocker
import sys
def init_argparser():
"""
Initialization of argument parse library with it's arguments
Args:
None
Returns:
object: Parsed arguments - returned from parser.parse_args()
"""
parser = ArgumentParser()
parser.add_argument(
"--server-url",
help="The URL of the Resource Locker Server",
required=True,
action="store",
)
parser.add_argument(
"--token",
help="Token of the user that creates API calls",
required=True,
action="store",
)
parser.add_argument(
"--release", help="Use this argument to release a resource", action="store_true"
)
parser.add_argument(
"--lock", help="Use this argument to lock a resource", action="store_true"
)
parser.add_argument(
"--resume-on-connection-error", help="Use this argument in case you don't want to break queue execution"
" in the middle of waiting for queue status being FINISHED", action="store_true"
)
parser.add_argument(
"--signoff",
help="Use this when lock=True, locking a resource requires signoff",
action="store",
)
parser.add_argument(
"--priority",
help="Use this when lock=True, specify the level of priority the resource should be locked",
action="store",
)
parser.add_argument(
"--search-string",
help="Use this when lock=True, specify the lable or the name of the lockable resource",
action="store",
)
parser.add_argument(
"--link",
help="Use this when lock=True, specify the link of the CI/CD pipeline that locks the resource",
action="store",
)
parser.add_argument(
"--interval",
help="Use this when lock=True, how many seconds to wait between each call"
" while checking for a free resource",
type=int,
action="store",
)
parser.add_argument(
"--attempts",
help="Use this when lock=True, how many times to create an API call"
" that will check for a free resource ",
type=int,
action="store",
)
return parser.parse_args()
def run(args):
"""
Function of the actions against the Resource Locker endpoint
Args:
args (object): Parsed arguments - returned from parser.parse_args()
Returns:
None
"""
try:
# Instantiate the connection vs Resource locker:
inst = ResourceLocker(instance_url=args.server_url, token=args.token)
if args.release:
resource_to_release = inst.get_lockable_resources(signoff=args.signoff)
if resource_to_release:
release_attempt = inst.release(resource_to_release[0])
print(release_attempt.text)
else:
print(f"There is no resource: {args.signoff} locked, ignoring!")
if args.lock:
new_queue = inst.find_resource(
search_string=args.search_string,
signoff=args.signoff,
priority=int(args.priority),
link=quote(args.link, safe="") if args.link else None,
)
# We should verify that the resource has been locked by checking
# if the queue is finished.
# timeout is -> attempts * interval
abort_action = inst.abort_queue
abort_action_args = {
"queue_id": new_queue.json().get("id"),
"abort_msg": "Queue has been aborted in the middle of a CI/CD Pipeline \n"
"or during manual execution.",
}
def signal_handler(sig, frame):
abort_action(**abort_action_args)
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
verify_lock = inst.wait_until_finished(
queue_id=new_queue.json().get("id"),
interval=args.interval,
attempts=args.attempts,
silent=False,
abort_on_timeout=True,
resume_on_connection_error=args.resume_on_connection_error,
)
# If it will return any object, it means the condition is achieved:
if verify_lock:
print("Resource Locked Successfully! Info: \n")
# We print json response, it is better to visualize it nicer:
pp.pprint(verify_lock)
except (ConnectionError) as e:
print(
"Connection Error! \n"
"Error is: \n"
f"{str(e)}"
)
if args.resume_on_connection_error:
print(f"You chose to continue on connection errors, will try again in {args.interval} seconds!")
time.sleep(args.interval)
run(args)
else:
print("You chose to NOT continue on connection errors. \n"
"To prevent this, you can run next time with --resume-on-connection-error! \n"
"Exiting ... ")
except Exception as e:
print("An unexpected error occured!")
raise
def main():
os.environ["PYTHONUNBUFFERED"] = "1"
args = init_argparser()
run(args)
| []
| []
| [
"PYTHONUNBUFFERED"
]
| [] | ["PYTHONUNBUFFERED"] | python | 1 | 0 | |
aws-golang-dynamo-stream-to-elasticsearch/cmd/aws-golang-dynamo-stream-to-elasticsearch/main.go | package main
import (
"fmt"
"os"
"strings"
"github.com/olivere/elastic"
"github.com/serverless/examples/aws-golang-dynamo-stream-to-elasticsearch/dstream"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
var awsSession = session.Must(session.NewSession(&aws.Config{}))
var dynamoSvc = dynamodb.New(awsSession)
var esclient = new(dstream.Elasticsearch)
func handler(e events.DynamoDBEvent) error {
var item map[string]events.DynamoDBAttributeValue
fmt.Println("Beginning ES Sync")
for _, v := range e.Records {
switch v.EventName {
case "INSERT":
fallthrough
case "MODIFY":
tableName := strings.Split(v.EventSourceArn, "/")[1]
item = v.Change.NewImage
details, err := (&dstream.DynamoDetails{
DynamoDBAPI: dynamoSvc,
}).Get(tableName)
if err != nil {
return err
}
svc, err := elastic.NewClient(
elastic.SetSniff(false),
elastic.SetURL(fmt.Sprintf("https://%s", os.Getenv("ELASTICSEARCH_URL"))),
)
if err != nil {
return err
}
esclient.Client = svc
resp, err := esclient.Update(details, item)
if err != nil {
return err
}
fmt.Println(resp.Result)
default:
}
}
return nil
}
func main() {
lambda.Start(handler)
}
| [
"\"ELASTICSEARCH_URL\""
]
| []
| [
"ELASTICSEARCH_URL"
]
| [] | ["ELASTICSEARCH_URL"] | go | 1 | 0 | |
wal/decoder.go | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import (
"bufio"
"encoding/binary"
"hash"
"io"
"sync"
"etcd/pkg/crc"
"etcd/pkg/pbutil"
"etcd/raft/raftpb"
"etcd/wal/walpb"
)
const minSectorSize = 512
// frameSizeBytes is frame size in bytes, including record size and padding size.
const frameSizeBytes = 8
type decoder struct {
mu sync.Mutex
brs []*bufio.Reader
// lastValidOff file offset following the last valid decoded record
lastValidOff int64
crc hash.Hash32
}
func newDecoder(r ...io.Reader) *decoder {
readers := make([]*bufio.Reader, len(r))
for i := range r {
readers[i] = bufio.NewReader(r[i])
}
return &decoder{
brs: readers,
crc: crc.New(0, crcTable),
}
}
func (d *decoder) decode(rec *walpb.Record) error {
rec.Reset()
d.mu.Lock()
defer d.mu.Unlock()
return d.decodeRecord(rec)
}
func (d *decoder) decodeRecord(rec *walpb.Record) error {
if len(d.brs) == 0 {
return io.EOF
}
l, err := readInt64(d.brs[0])
if err == io.EOF || (err == nil && l == 0) {
// hit end of file or preallocated space
d.brs = d.brs[1:]
if len(d.brs) == 0 {
return io.EOF
}
d.lastValidOff = 0
return d.decodeRecord(rec)
}
if err != nil {
return err
}
recBytes, padBytes := decodeFrameSize(l)
data := make([]byte, recBytes+padBytes)
if _, err = io.ReadFull(d.brs[0], data); err != nil {
// ReadFull returns io.EOF only if no bytes were read
// the decoder should treat this as an ErrUnexpectedEOF instead.
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return err
}
if err := rec.Unmarshal(data[:recBytes]); err != nil {
if d.isTornEntry(data) {
return io.ErrUnexpectedEOF
}
return err
}
// skip crc checking if the record type is crcType
if rec.Type != crcType {
d.crc.Write(rec.Data)
if err := rec.Validate(d.crc.Sum32()); err != nil {
if d.isTornEntry(data) {
return io.ErrUnexpectedEOF
}
return err
}
}
// record decoded as valid; point last valid offset to end of record
d.lastValidOff += frameSizeBytes + recBytes + padBytes
return nil
}
func decodeFrameSize(lenField int64) (recBytes int64, padBytes int64) {
// the record size is stored in the lower 56 bits of the 64-bit length
recBytes = int64(uint64(lenField) & ^(uint64(0xff) << 56))
// non-zero padding is indicated by set MSb / a negative length
if lenField < 0 {
// padding is stored in lower 3 bits of length MSB
padBytes = int64((uint64(lenField) >> 56) & 0x7)
}
return recBytes, padBytes
}
// isTornEntry determines whether the last entry of the WAL was partially written
// and corrupted because of a torn write.
func (d *decoder) isTornEntry(data []byte) bool {
if len(d.brs) != 1 {
return false
}
fileOff := d.lastValidOff + frameSizeBytes
curOff := 0
chunks := [][]byte{}
// split data on sector boundaries
for curOff < len(data) {
chunkLen := int(minSectorSize - (fileOff % minSectorSize))
if chunkLen > len(data)-curOff {
chunkLen = len(data) - curOff
}
chunks = append(chunks, data[curOff:curOff+chunkLen])
fileOff += int64(chunkLen)
curOff += chunkLen
}
// if any data for a sector chunk is all 0, it's a torn write
for _, sect := range chunks {
isZero := true
for _, v := range sect {
if v != 0 {
isZero = false
break
}
}
if isZero {
return true
}
}
return false
}
func (d *decoder) updateCRC(prevCrc uint32) {
d.crc = crc.New(prevCrc, crcTable)
}
func (d *decoder) lastCRC() uint32 {
return d.crc.Sum32()
}
func (d *decoder) lastOffset() int64 { return d.lastValidOff }
func mustUnmarshalEntry(d []byte) raftpb.Entry {
var e raftpb.Entry
pbutil.MustUnmarshal(&e, d)
return e
}
func mustUnmarshalState(d []byte) raftpb.HardState {
var s raftpb.HardState
pbutil.MustUnmarshal(&s, d)
return s
}
func readInt64(r io.Reader) (int64, error) {
var n int64
err := binary.Read(r, binary.LittleEndian, &n)
return n, err
}
| []
| []
| []
| [] | [] | go | null | null | null |
vendor/code.cloudfoundry.org/cli/command/v2/update_security_group_command.go | package v2
import (
"os"
"code.cloudfoundry.org/cli/cf/cmd"
"code.cloudfoundry.org/cli/command"
"code.cloudfoundry.org/cli/command/flag"
)
type UpdateSecurityGroupCommand struct {
RequiredArgs flag.SecurityGroupArgs `positional-args:"yes"`
usage interface{} `usage:"CF_NAME update-security-group SECURITY_GROUP PATH_TO_JSON_RULES_FILE\n\n The provided path can be an absolute or relative path to a file.\n It should have a single array with JSON objects inside describing the rules.\n\n Valid json file example:\n [\n {\n \"protocol\": \"tcp\",\n \"destination\": \"10.0.11.0/24\",\n \"ports\": \"80,443\",\n \"description\": \"Allow http and https traffic from ZoneA\"\n }\n ]\n\nTIP: Changes will not apply to existing running applications until they are restarted."`
relatedCommands interface{} `related_commands:"restage, security-groups"`
}
func (_ UpdateSecurityGroupCommand) Setup(config command.Config, ui command.UI) error {
return nil
}
func (_ UpdateSecurityGroupCommand) Execute(args []string) error {
cmd.Main(os.Getenv("CF_TRACE"), os.Args)
return nil
}
| [
"\"CF_TRACE\""
]
| []
| [
"CF_TRACE"
]
| [] | ["CF_TRACE"] | go | 1 | 0 | |
appengine/endpoints/sign.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package endpoints provides the functions used to receive requests
// and serve data via imaging.
package endpoints
import (
"context"
"crypto"
"crypto/rsa"
"crypto/x509"
"encoding/hex"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"regexp"
"strings"
"time"
"github.com/google/fresnel/models"
"google.golang.org/appengine"
"google.golang.org/appengine/log"
"cloud.google.com/go/storage"
"github.com/patrickmn/go-cache"
"gopkg.in/yaml.v2"
)
var (
c = cache.New(1*time.Hour, 90*time.Minute)
macRegEx = "([^0-9,a-f,A-F,:])"
bucketFileFinder = bucketFileHandle
)
// SignRequestHandler implements http.Handler for signed URL requests.
type SignRequestHandler struct{}
func (SignRequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
errResp := `{"Status":"%s","ErrorCode":%d}`
ctx := appengine.NewContext(r)
w.Header().Set("Content-Type", "application/json")
resp := signResponse(ctx, r)
if resp.ErrorCode != models.StatusSuccess {
w.WriteHeader(http.StatusInternalServerError)
}
jsonResponse, err := json.Marshal(resp)
if err != nil {
es := fmt.Sprintf("json.Marshall(%#v): %v", resp, err)
log.Errorf(ctx, es)
http.Error(w, fmt.Sprintf(errResp, err, models.StatusJSONError), http.StatusInternalServerError)
return
}
if _, err = w.Write(jsonResponse); err != nil {
log.Errorf(ctx, fmt.Sprintf("failed to write response to client: %s", err))
return
}
log.Infof(ctx, "successfully returned response %#v to client", resp)
return
}
// signResponse processes a signed URL request and provides a valid response to the client.
func signResponse(ctx context.Context, r *http.Request) models.SignResponse {
bucket := os.Getenv("BUCKET")
if bucket == "" {
log.Errorf(ctx, "BUCKET environment variable not set for %v", ctx)
return models.SignResponse{Status: "BUCKET environment variable not set", ErrorCode: models.StatusConfigError}
}
d := os.Getenv("SIGNED_URL_DURATION")
if d == "" {
log.Errorf(ctx, "SIGNED_URL_DURATION environment variable not set for %v", ctx)
return models.SignResponse{Status: "SIGNED_URL_DURATION environment variable not set", ErrorCode: models.StatusConfigError}
}
duration, err := time.ParseDuration(d)
if err != nil {
log.Errorf(ctx, "SIGNED_URL_DURATION was %q, which is not a valid time duration.", d)
return models.SignResponse{Status: "SIGNED_URL_DURATION environment variable not set", ErrorCode: models.StatusConfigError}
}
resp, req := ProcessSignRequest(ctx, r, bucket, duration)
if resp.ErrorCode != models.StatusSuccess {
log.Warningf(ctx, "could not process SignRequest %v", resp)
}
if resp.ErrorCode == models.StatusSuccess {
log.Infof(ctx, "successfully processed SignRequest for seed issued to %#v at:%#v Response: %q", req.Seed.Username, req.Seed.Issued, resp.SignedURL)
}
return resp
}
// ProcessSignRequest takes a models.SignRequest that is provided by a client,
// validates and processes it. A response is always provided using models.SignResponse.
func ProcessSignRequest(ctx context.Context, r *http.Request, bucket string, duration time.Duration) (models.SignResponse, models.SignRequest) {
req, code, err := unmarshalSignRequest(r)
if err != nil {
log.Errorf(ctx, "unmarshalSignRequest called with: %#v, returned error: %s", r, err)
return models.SignResponse{
Status: err.Error(),
ErrorCode: code,
}, req
}
if err := validSignRequest(ctx, req); err != nil {
return models.SignResponse{
Status: err.Error(),
ErrorCode: models.StatusSignError,
}, req
}
url, err := signedURL(ctx, bucket, req.Path, duration)
if err != nil {
return models.SignResponse{
Status: err.Error(),
ErrorCode: models.StatusSignError,
}, req
}
return models.SignResponse{
Status: "Success",
ErrorCode: models.StatusSuccess,
SignedURL: url,
}, req
}
// unmarshalSignRequest takes an incoming request, returning a models.SignRequest and
// and a models.StatusCode code representing whether it was read successfully.
func unmarshalSignRequest(r *http.Request) (models.SignRequest, models.StatusCode, error) {
var signRequest models.SignRequest
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return models.SignRequest{},
models.StatusReqUnreadable,
errors.New("unable to read HTTP request body")
}
if len(body) == 0 {
return models.SignRequest{},
models.StatusJSONError,
errors.New("empty HTTP JSON request body")
}
if err = json.Unmarshal(body, &signRequest); err != nil {
return models.SignRequest{},
models.StatusJSONError,
fmt.Errorf("unable to unmarshal JSON request, error: %v", err)
}
return signRequest,
models.StatusSuccess,
nil
}
func validSignRequest(ctx context.Context, sr models.SignRequest) error {
for _, mac := range sr.Mac {
m := strings.Replace(mac, ":", "", -1)
// A valid Mac is neither shorter nor longer than 12 characters.
if len(m) < 12 {
return fmt.Errorf("%s is too short(%d) to be a Mac address", m, len(m))
}
if len(m) > 12 {
return fmt.Errorf("%s is too long(%d) to be a Mac address", m, len(m))
}
// A valid Mac address can only contain hexadecimal characters and colons.
matched, err := regexp.MatchString(macRegEx, mac)
if err != nil {
return fmt.Errorf("regexp.MatchString(%s): %v", mac, err)
}
if matched {
return fmt.Errorf("%s is not a valid mac address", mac)
}
}
hashCheck := os.Getenv("VERIFY_SIGN_HASH")
if hashCheck != "true" {
log.Infof(ctx, "VERIFY_SIGN_HASH is not set to true, hash validation will be logged but not enforced")
}
err := validSignHash(ctx, sr.Hash)
if err != nil {
log.Warningf(ctx, "failed to validate sign request hash: %v", err)
}
if err != nil && hashCheck == "true" {
return fmt.Errorf("validSignHash: %v", err)
}
// insert hash into seed to validate signature
sr.Seed.Hash = sr.Hash
if err := validSeed(ctx, sr.Seed, sr.Signature); err != nil {
return fmt.Errorf("validSeed: %v", err)
}
if len(sr.Path) < 1 {
return errors.New("sign request path cannot be empty")
}
return nil
}
// validSignHash takes the current context and the hash submitted with the sign
// request and determines if the submitted hash is in a list of acceptable hashes
// which is stored in a cloud bucket.
func validSignHash(ctx context.Context, requestHash []byte) error {
b := os.Getenv("BUCKET")
if b == "" {
return fmt.Errorf("BUCKET environment variable not set for %v", ctx)
}
acceptedHashes, err := populateAllowlist(ctx)
if err != nil {
return fmt.Errorf("cache.Get(acceptedHashes): %v", err)
}
log.Infof(ctx, "retrieved acceptable hashes: %#v", acceptedHashes)
h := hex.EncodeToString(requestHash)
if _, ok := acceptedHashes[h]; ok {
log.Infof(ctx, "%v passed validation", h)
return nil
}
return fmt.Errorf("submitted hash %v not in accepted hash list", hex.EncodeToString(requestHash))
}
// validSeed takes a seed and its signature, verifies the seed contents and
// optionally the signature. Verification attempts to use the current set
// of appengine.PublicCertificates first, and can fall back to those included
// in the seed. If the requested validation fails, an error is returned.
func validSeed(ctx context.Context, seed models.Seed, sig []byte) error {
// Return immediately if seed verification is disabled.
enabled := os.Getenv("VERIFY_SEED")
if enabled != "true" {
log.Infof(ctx, "VERIFY_SEED=%s or not set, skipping seed verification.", enabled)
return nil
}
// Check that the username is present
if len(seed.Username) < 3 {
return fmt.Errorf("the username %q is invalid or empty", seed.Username)
}
// Check that the seed is not expired or invalid.
validityPeriod := os.Getenv("SEED_VALIDITY_DURATION")
if validityPeriod == "" {
return errors.New("SEED_VALIDITY_DURATION environment variable is not present")
}
d, err := time.ParseDuration(validityPeriod)
if err != nil {
return fmt.Errorf("time.parseDuration(%s): %v", validityPeriod, err)
}
expires := seed.Issued.Add(d)
now := time.Now()
if seed.Issued.After(now) {
return fmt.Errorf("seed issued in the future %s", seed.Issued)
}
if expires.Before(now) {
return fmt.Errorf("seed expired on %s, current date is %s", expires, now)
}
// Skip signature verification if it is not enabled.
sigCheck := os.Getenv("VERIFY_SEED_SIGNATURE")
if sigCheck != "true" {
log.Infof(ctx, "VERIFY_SEED_SIGNATURE=%s or not set, skipping seed signature check", sigCheck)
return nil
}
if err := validSeedSignature(ctx, seed, sig); err != nil {
return fmt.Errorf("validSeedSignature: %v", err)
}
return nil
}
func validSeedSignature(ctx context.Context, seed models.Seed, sig []byte) error {
// Check the seed signature using the App Identity.
// https://cloud.google.com/appengine/docs/standard/go/appidentity/
certs, err := appengine.PublicCertificates(ctx)
if err != nil {
return fmt.Errorf("appengine.PublicCertificates(%+v): %v", ctx, err)
}
enableFallback := os.Getenv("VERIFY_SEED_SIGNATURE_FALLBACK")
if enableFallback == "true" {
log.Infof(ctx, "VERIFY_SEED_SIGNATURE_FALLBACK=%s, adding certificates from seed for fallback verification", enableFallback)
certs = append(certs, seed.Certs...)
}
log.Infof(ctx, "attempting signature verification using %d certs", len(certs))
for _, cert := range certs {
block, _ := pem.Decode(cert.Data)
if block == nil {
log.Infof(ctx, "pem.Decode returned an empty block for data %q.", cert.Data)
continue
}
x509Cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
log.Infof(ctx, "x509.ParseCertificate(%s): %v.", block.Bytes, err)
continue
}
pubkey, ok := x509Cert.PublicKey.(*rsa.PublicKey)
if !ok {
log.Infof(ctx, "certificate '%v' issued by '%v' is does not contain an RSA public key.", x509Cert.Subject, x509Cert.Issuer)
continue
}
jsonSeed, err := json.Marshal(seed)
if err != nil {
log.Warningf(ctx, "failed to marshal seed for signature verification: %v", err)
continue
}
seedHash := crypto.SHA256
h := seedHash.New()
h.Write(jsonSeed)
hashed := h.Sum(nil)
if err := rsa.VerifyPKCS1v15(pubkey, seedHash, hashed, sig); err != nil {
log.Infof(ctx, "unable to verify seed %#v with signature %q using certificate '%#v'", seed, sig, x509Cert.Subject)
continue
}
log.Infof(ctx, "successfully verified signature using certificate '%#v'", x509Cert.Subject)
return nil
}
return fmt.Errorf("unable to verify signature for seed issued on '%v' to %s", seed.Issued, seed.Username)
}
// signedURL takes a bucket name and relative file path, and returns an
// equivalent signed URL using the appengine built-in service account.
// https://cloud.google.com/appengine/docs/standard/go/appidentity/
func signedURL(ctx context.Context, bucket, file string, duration time.Duration) (string, error) {
sa, err := appengine.ServiceAccount(ctx)
if err != nil {
return "", fmt.Errorf("appengine.ServiceAccount: %v", err)
}
return storage.SignedURL(bucket, file, &storage.SignedURLOptions{
GoogleAccessID: sa,
SignBytes: func(b []byte) ([]byte, error) {
_, sig, err := appengine.SignBytes(ctx, b)
return sig, err
},
Method: "GET",
Expires: time.Now().Add(time.Minute * duration),
})
}
// getAllowlist returns a map of hashes and whether they are acceptable.
func getAllowlist(ctx context.Context, b string, f string) (map[string]bool, error) {
log.Infof(ctx, "reading acceptable hashes from cloud bucket")
h, err := bucketFileFinder(ctx, b, f)
if err != nil {
return nil, fmt.Errorf("bucketFileFinder(%s, %s): %v", b, f, err)
}
y, err := ioutil.ReadAll(h)
if err != nil {
return nil, fmt.Errorf("reading allowlist contents: %v", err)
}
var wls []string
if err := yaml.Unmarshal(y, &wls); err != nil {
return nil, fmt.Errorf("failed parsing allowlist: %v", err)
}
mwl := make(map[string]bool)
for _, e := range wls {
mwl[strings.ToLower(e)] = true
}
return mwl, nil
}
func bucketFileHandle(ctx context.Context, b string, f string) (io.Reader, error) {
client, err := storage.NewClient(ctx)
if err != nil {
return nil, fmt.Errorf("failed to create cloud storage client: %v", err)
}
bh := client.Bucket(b)
fh := bh.Object(f)
return fh.NewReader(ctx)
}
| [
"\"BUCKET\"",
"\"SIGNED_URL_DURATION\"",
"\"VERIFY_SIGN_HASH\"",
"\"BUCKET\"",
"\"VERIFY_SEED\"",
"\"SEED_VALIDITY_DURATION\"",
"\"VERIFY_SEED_SIGNATURE\"",
"\"VERIFY_SEED_SIGNATURE_FALLBACK\""
]
| []
| [
"SEED_VALIDITY_DURATION",
"SIGNED_URL_DURATION",
"BUCKET",
"VERIFY_SEED",
"VERIFY_SEED_SIGNATURE_FALLBACK",
"VERIFY_SEED_SIGNATURE",
"VERIFY_SIGN_HASH"
]
| [] | ["SEED_VALIDITY_DURATION", "SIGNED_URL_DURATION", "BUCKET", "VERIFY_SEED", "VERIFY_SEED_SIGNATURE_FALLBACK", "VERIFY_SEED_SIGNATURE", "VERIFY_SIGN_HASH"] | go | 7 | 0 | |
frappe/commands/utils.py | # -*- coding: utf-8 -*-
import json
import os
import subprocess
import sys
from distutils.spawn import find_executable
import click
import frappe
from frappe.commands import get_site, pass_context
from frappe.exceptions import SiteNotSpecifiedError
from frappe.utils import get_bench_path, update_progress_bar, cint
@click.command('build')
@click.option('--app', help='Build assets for app')
@click.option('--hard-link', is_flag=True, default=False, help='Copy the files instead of symlinking')
@click.option('--make-copy', is_flag=True, default=False, help='[DEPRECATED] Copy the files instead of symlinking')
@click.option('--restore', is_flag=True, default=False, help='[DEPRECATED] Copy the files instead of symlinking with force')
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
@click.option('--force', is_flag=True, default=False, help='Force build assets instead of downloading available')
def build(app=None, hard_link=False, make_copy=False, restore=False, verbose=False, force=False):
"Minify + concatenate JS and CSS files, build translations"
frappe.init('')
# don't minify in developer_mode for faster builds
no_compress = frappe.local.conf.developer_mode or False
# dont try downloading assets if force used, app specified or running via CI
if not (force or app or os.environ.get('CI')):
# skip building frappe if assets exist remotely
skip_frappe = frappe.build.download_frappe_assets(verbose=verbose)
else:
skip_frappe = False
if make_copy or restore:
hard_link = make_copy or restore
click.secho(
"bench build: --make-copy and --restore options are deprecated in favour of --hard-link",
fg="yellow",
)
frappe.build.bundle(
skip_frappe=skip_frappe,
no_compress=no_compress,
hard_link=hard_link,
verbose=verbose,
app=app,
)
@click.command('watch')
def watch():
"Watch and concatenate JS and CSS files as and when they change"
import frappe.build
frappe.init('')
frappe.build.watch(True)
@click.command('clear-cache')
@pass_context
def clear_cache(context):
"Clear cache, doctype cache and defaults"
import frappe.sessions
import frappe.website.render
from frappe.desk.notifications import clear_notifications
for site in context.sites:
try:
frappe.connect(site)
frappe.clear_cache()
clear_notifications()
frappe.website.render.clear_cache()
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('clear-website-cache')
@pass_context
def clear_website_cache(context):
"Clear website cache"
import frappe.website.render
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.website.render.clear_cache()
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('destroy-all-sessions')
@click.option('--reason')
@pass_context
def destroy_all_sessions(context, reason=None):
"Clear sessions of all users (logs them out)"
import frappe.sessions
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.sessions.clear_all_sessions(reason)
frappe.db.commit()
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('show-config')
@click.option("--format", "-f", type=click.Choice(["text", "json"]), default="text")
@pass_context
def show_config(context, format):
"Print configuration file to STDOUT in speified format"
if not context.sites:
raise SiteNotSpecifiedError
sites_config = {}
sites_path = os.getcwd()
from frappe.utils.commands import render_table
def transform_config(config, prefix=None):
prefix = f"{prefix}." if prefix else ""
site_config = []
for conf, value in config.items():
if isinstance(value, dict):
site_config += transform_config(value, prefix=f"{prefix}{conf}")
else:
log_value = json.dumps(value) if isinstance(value, list) else value
site_config += [[f"{prefix}{conf}", log_value]]
return site_config
for site in context.sites:
frappe.init(site)
if len(context.sites) != 1 and format == "text":
if context.sites.index(site) != 0:
click.echo()
click.secho(f"Site {site}", fg="yellow")
configuration = frappe.get_site_config(sites_path=sites_path, site_path=site)
if format == "text":
data = transform_config(configuration)
data.insert(0, ['Config','Value'])
render_table(data)
if format == "json":
sites_config[site] = configuration
frappe.destroy()
if format == "json":
click.echo(frappe.as_json(sites_config))
@click.command('reset-perms')
@pass_context
def reset_perms(context):
"Reset permissions for all doctypes"
from frappe.permissions import reset_perms
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
for d in frappe.db.sql_list("""select name from `tabDocType`
where istable=0 and custom=0"""):
frappe.clear_cache(doctype=d)
reset_perms(d)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('execute')
@click.argument('method')
@click.option('--args')
@click.option('--kwargs')
@click.option('--profile', is_flag=True, default=False)
@pass_context
def execute(context, method, args=None, kwargs=None, profile=False):
"Execute a function"
for site in context.sites:
ret = ""
try:
frappe.init(site=site)
frappe.connect()
if args:
try:
args = eval(args)
except NameError:
args = [args]
else:
args = ()
if kwargs:
kwargs = eval(kwargs)
else:
kwargs = {}
if profile:
import cProfile
pr = cProfile.Profile()
pr.enable()
try:
ret = frappe.get_attr(method)(*args, **kwargs)
except Exception:
ret = frappe.safe_eval(method + "(*args, **kwargs)", eval_globals=globals(), eval_locals=locals())
if profile:
import pstats
from six import StringIO
pr.disable()
s = StringIO()
pstats.Stats(pr, stream=s).sort_stats('cumulative').print_stats(.5)
print(s.getvalue())
if frappe.db:
frappe.db.commit()
finally:
frappe.destroy()
if ret:
from frappe.utils.response import json_handler
print(json.dumps(ret, default=json_handler))
if not context.sites:
raise SiteNotSpecifiedError
@click.command('add-to-email-queue')
@click.argument('email-path')
@pass_context
def add_to_email_queue(context, email_path):
"Add an email to the Email Queue"
site = get_site(context)
if os.path.isdir(email_path):
with frappe.init_site(site):
frappe.connect()
for email in os.listdir(email_path):
with open(os.path.join(email_path, email)) as email_data:
kwargs = json.load(email_data)
kwargs['delayed'] = True
frappe.sendmail(**kwargs)
frappe.db.commit()
@click.command('export-doc')
@click.argument('doctype')
@click.argument('docname')
@pass_context
def export_doc(context, doctype, docname):
"Export a single document to csv"
import frappe.modules
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.modules.export_doc(doctype, docname)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('export-json')
@click.argument('doctype')
@click.argument('path')
@click.option('--name', help='Export only one document')
@pass_context
def export_json(context, doctype, path, name=None):
"Export doclist as json to the given path, use '-' as name for Singles."
from frappe.core.doctype.data_import.data_import import export_json
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
export_json(doctype, path, name=name)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('export-csv')
@click.argument('doctype')
@click.argument('path')
@pass_context
def export_csv(context, doctype, path):
"Export data import template with data for DocType"
from frappe.core.doctype.data_import.data_import import export_csv
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
export_csv(doctype, path)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('export-fixtures')
@click.option('--app', default=None, help='Export fixtures of a specific app')
@pass_context
def export_fixtures(context, app=None):
"Export fixtures"
from frappe.utils.fixtures import export_fixtures
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
export_fixtures(app=app)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('import-doc')
@click.argument('path')
@pass_context
def import_doc(context, path, force=False):
"Import (insert/update) doclist. If the argument is a directory, all files ending with .json are imported"
from frappe.core.doctype.data_import.data_import import import_doc
if not os.path.exists(path):
path = os.path.join('..', path)
if not os.path.exists(path):
print('Invalid path {0}'.format(path))
sys.exit(1)
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
import_doc(path)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('import-csv')
@click.argument('path')
@click.option('--only-insert', default=False, is_flag=True, help='Do not overwrite existing records')
@click.option('--submit-after-import', default=False, is_flag=True, help='Submit document after importing it')
@click.option('--ignore-encoding-errors', default=False, is_flag=True, help='Ignore encoding errors while coverting to unicode')
@click.option('--no-email', default=True, is_flag=True, help='Send email if applicable')
@pass_context
def import_csv(context, path, only_insert=False, submit_after_import=False, ignore_encoding_errors=False, no_email=True):
"Import CSV using data import"
from frappe.core.doctype.data_import_legacy import importer
from frappe.utils.csvutils import read_csv_content
site = get_site(context)
if not os.path.exists(path):
path = os.path.join('..', path)
if not os.path.exists(path):
print('Invalid path {0}'.format(path))
sys.exit(1)
with open(path, 'r') as csvfile:
content = read_csv_content(csvfile.read())
frappe.init(site=site)
frappe.connect()
try:
importer.upload(content, submit_after_import=submit_after_import, no_email=no_email,
ignore_encoding_errors=ignore_encoding_errors, overwrite=not only_insert,
via_console=True)
frappe.db.commit()
except Exception:
print(frappe.get_traceback())
frappe.destroy()
@click.command('data-import')
@click.option('--file', 'file_path', type=click.Path(), required=True, help="Path to import file (.csv, .xlsx)")
@click.option('--doctype', type=str, required=True)
@click.option('--type', 'import_type', type=click.Choice(['Insert', 'Update'], case_sensitive=False), default='Insert', help="Insert New Records or Update Existing Records")
@click.option('--submit-after-import', default=False, is_flag=True, help='Submit document after importing it')
@click.option('--mute-emails', default=True, is_flag=True, help='Mute emails during import')
@pass_context
def data_import(context, file_path, doctype, import_type=None, submit_after_import=False, mute_emails=True):
"Import documents in bulk from CSV or XLSX using data import"
from frappe.core.doctype.data_import.data_import import import_file
site = get_site(context)
frappe.init(site=site)
frappe.connect()
import_file(doctype, file_path, import_type, submit_after_import, console=True)
frappe.destroy()
@click.command('bulk-rename')
@click.argument('doctype')
@click.argument('path')
@pass_context
def bulk_rename(context, doctype, path):
"Rename multiple records via CSV file"
from frappe.model.rename_doc import bulk_rename
from frappe.utils.csvutils import read_csv_content
site = get_site(context)
with open(path, 'r') as csvfile:
rows = read_csv_content(csvfile.read())
frappe.init(site=site)
frappe.connect()
bulk_rename(doctype, rows, via_console = True)
frappe.destroy()
@click.command('mariadb')
@pass_context
def mariadb(context):
"""
Enter into mariadb console for a given site.
"""
import os
site = get_site(context)
if not site:
raise SiteNotSpecifiedError
frappe.init(site=site)
# This is assuming you're within the bench instance.
mysql = find_executable('mysql')
os.execv(mysql, [
mysql,
'-u', frappe.conf.db_name,
'-p'+frappe.conf.db_password,
frappe.conf.db_name,
'-h', frappe.conf.db_host or "localhost",
'--pager=less -SFX',
'--safe-updates',
"-A"])
@click.command('postgres')
@pass_context
def postgres(context):
"""
Enter into postgres console for a given site.
"""
site = get_site(context)
frappe.init(site=site)
# This is assuming you're within the bench instance.
psql = find_executable('psql')
subprocess.run([ psql, '-d', frappe.conf.db_name])
@click.command('jupyter')
@pass_context
def jupyter(context):
installed_packages = (r.split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'], encoding='utf8'))
if 'jupyter' not in installed_packages:
subprocess.check_output([sys.executable, '-m', 'pip', 'install', 'jupyter'])
site = get_site(context)
frappe.init(site=site)
jupyter_notebooks_path = os.path.abspath(frappe.get_site_path('jupyter_notebooks'))
sites_path = os.path.abspath(frappe.get_site_path('..'))
try:
os.stat(jupyter_notebooks_path)
except OSError:
print('Creating folder to keep jupyter notebooks at {}'.format(jupyter_notebooks_path))
os.mkdir(jupyter_notebooks_path)
bin_path = os.path.abspath('../env/bin')
print('''
Starting Jupyter notebook
Run the following in your first cell to connect notebook to frappe
```
import frappe
frappe.init(site='{site}', sites_path='{sites_path}')
frappe.connect()
frappe.local.lang = frappe.db.get_default('lang')
frappe.db.connect()
```
'''.format(site=site, sites_path=sites_path))
os.execv('{0}/jupyter'.format(bin_path), [
'{0}/jupyter'.format(bin_path),
'notebook',
jupyter_notebooks_path,
])
@click.command('console')
@pass_context
def console(context):
"Start ipython console for a site"
import warnings
site = get_site(context)
frappe.init(site=site)
frappe.connect()
frappe.local.lang = frappe.db.get_default("lang")
import IPython
all_apps = frappe.get_installed_apps()
failed_to_import = []
for app in all_apps:
try:
locals()[app] = __import__(app)
except ModuleNotFoundError:
failed_to_import.append(app)
all_apps.remove(app)
print("Apps in this namespace:\n{}".format(", ".join(all_apps)))
if failed_to_import:
print("\nFailed to import:\n{}".format(", ".join(failed_to_import)))
warnings.simplefilter('ignore')
IPython.embed(display_banner="", header="", colors="neutral")
@click.command('run-tests')
@click.option('--app', help="For App")
@click.option('--doctype', help="For DocType")
@click.option('--doctype-list-path', help="Path to .txt file for list of doctypes. Example erpnext/tests/server/agriculture.txt")
@click.option('--test', multiple=True, help="Specific test")
@click.option('--ui-tests', is_flag=True, default=False, help="Run UI Tests")
@click.option('--module', help="Run tests in a module")
@click.option('--profile', is_flag=True, default=False)
@click.option('--coverage', is_flag=True, default=False)
@click.option('--skip-test-records', is_flag=True, default=False, help="Don't create test records")
@click.option('--skip-before-tests', is_flag=True, default=False, help="Don't run before tests hook")
@click.option('--junit-xml-output', help="Destination file path for junit xml report")
@click.option('--failfast', is_flag=True, default=False, help="Stop the test run on the first error or failure")
@pass_context
def run_tests(context, app=None, module=None, doctype=None, test=(), profile=False,
coverage=False, junit_xml_output=False, ui_tests = False, doctype_list_path=None,
skip_test_records=False, skip_before_tests=False, failfast=False):
"Run tests"
import frappe.test_runner
tests = test
site = get_site(context)
allow_tests = frappe.get_conf(site).allow_tests
if not (allow_tests or os.environ.get('CI')):
click.secho('Testing is disabled for the site!', bold=True)
click.secho('You can enable tests by entering following command:')
click.secho('bench --site {0} set-config allow_tests true'.format(site), fg='green')
return
frappe.init(site=site)
frappe.flags.skip_before_tests = skip_before_tests
frappe.flags.skip_test_records = skip_test_records
if coverage:
from coverage import Coverage
from frappe.coverage import STANDARD_INCLUSIONS, STANDARD_EXCLUSIONS, FRAPPE_EXCLUSIONS
# Generate coverage report only for app that is being tested
source_path = os.path.join(get_bench_path(), 'apps', app or 'frappe')
omit = STANDARD_EXCLUSIONS[:]
if not app or app == 'frappe':
omit.extend(FRAPPE_EXCLUSIONS)
cov = Coverage(source=[source_path], omit=omit, include=STANDARD_INCLUSIONS)
cov.start()
ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests,
force=context.force, profile=profile, junit_xml_output=junit_xml_output,
ui_tests=ui_tests, doctype_list_path=doctype_list_path, failfast=failfast)
if coverage:
cov.stop()
cov.save()
if len(ret.failures) == 0 and len(ret.errors) == 0:
ret = 0
if os.environ.get('CI'):
sys.exit(ret)
@click.command('run-parallel-tests')
@click.option('--app', help="For App", default='frappe')
@click.option('--build-number', help="Build number", default=1)
@click.option('--total-builds', help="Total number of builds", default=1)
@click.option('--with-coverage', is_flag=True, help="Build coverage file")
@click.option('--use-orchestrator', is_flag=True, help="Use orchestrator to run parallel tests")
@pass_context
def run_parallel_tests(context, app, build_number, total_builds, with_coverage=False, use_orchestrator=False):
site = get_site(context)
if use_orchestrator:
from frappe.parallel_test_runner import ParallelTestWithOrchestrator
ParallelTestWithOrchestrator(app, site=site, with_coverage=with_coverage)
else:
from frappe.parallel_test_runner import ParallelTestRunner
ParallelTestRunner(app, site=site, build_number=build_number, total_builds=total_builds, with_coverage=with_coverage)
@click.command('run-ui-tests')
@click.argument('app')
@click.option('--headless', is_flag=True, help="Run UI Test in headless mode")
@click.option('--parallel', is_flag=True, help="Run UI Test in parallel mode")
@click.option('--ci-build-id')
@pass_context
def run_ui_tests(context, app, headless=False, parallel=True, ci_build_id=None):
"Run UI tests"
site = get_site(context)
app_base_path = os.path.abspath(os.path.join(frappe.get_app_path(app), '..'))
site_url = frappe.utils.get_site_url(site)
admin_password = frappe.get_conf(site).admin_password
# override baseUrl using env variable
site_env = f'CYPRESS_baseUrl={site_url}'
password_env = f'CYPRESS_adminPassword={admin_password}' if admin_password else ''
os.chdir(app_base_path)
node_bin = subprocess.getoutput("npm bin")
cypress_path = f"{node_bin}/cypress"
plugin_path = f"{node_bin}/../cypress-file-upload"
testing_library_path = f"{node_bin}/../@testing-library"
# check if cypress in path...if not, install it.
if not (
os.path.exists(cypress_path)
and os.path.exists(plugin_path)
and os.path.exists(testing_library_path)
and cint(subprocess.getoutput("npm view cypress version")[:1]) >= 6
):
# install cypress
click.secho("Installing Cypress...", fg="yellow")
frappe.commands.popen("yarn add cypress@^6 cypress-file-upload@^5 @testing-library/cypress@^8 --no-lockfile")
# run for headless mode
run_or_open = 'run --browser chrome --record' if headless else 'open'
command = '{site_env} {password_env} {cypress} {run_or_open}'
formatted_command = command.format(site_env=site_env, password_env=password_env, cypress=cypress_path, run_or_open=run_or_open)
if parallel:
formatted_command += ' --parallel'
if ci_build_id:
formatted_command += f' --ci-build-id {ci_build_id}'
click.secho("Running Cypress...", fg="yellow")
frappe.commands.popen(formatted_command, cwd=app_base_path, raise_err=True)
@click.command('serve')
@click.option('--port', default=8000)
@click.option('--profile', is_flag=True, default=False)
@click.option('--noreload', "no_reload", is_flag=True, default=False)
@click.option('--nothreading', "no_threading", is_flag=True, default=False)
@pass_context
def serve(context, port=None, profile=False, no_reload=False, no_threading=False, sites_path='.', site=None):
"Start development web server"
import frappe.app
if not context.sites:
site = None
else:
site = context.sites[0]
frappe.app.serve(port=port, profile=profile, no_reload=no_reload, no_threading=no_threading, site=site, sites_path='.')
@click.command('request')
@click.option('--args', help='arguments like `?cmd=test&key=value` or `/api/request/method?..`')
@click.option('--path', help='path to request JSON')
@pass_context
def request(context, args=None, path=None):
"Run a request as an admin"
import frappe.handler
import frappe.api
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
if args:
if "?" in args:
frappe.local.form_dict = frappe._dict([a.split("=") for a in args.split("?")[-1].split("&")])
else:
frappe.local.form_dict = frappe._dict()
if args.startswith("/api/method"):
frappe.local.form_dict.cmd = args.split("?")[0].split("/")[-1]
elif path:
with open(os.path.join('..', path), 'r') as f:
args = json.loads(f.read())
frappe.local.form_dict = frappe._dict(args)
frappe.handler.execute_cmd(frappe.form_dict.cmd)
print(frappe.response)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('make-app')
@click.argument('destination')
@click.argument('app_name')
def make_app(destination, app_name):
"Creates a boilerplate app"
from frappe.utils.boilerplate import make_boilerplate
make_boilerplate(destination, app_name)
@click.command('set-config')
@click.argument('key')
@click.argument('value')
@click.option('-g', '--global', 'global_', is_flag=True, default=False, help='Set value in bench config')
@click.option('-p', '--parse', is_flag=True, default=False, help='Evaluate as Python Object')
@click.option('--as-dict', is_flag=True, default=False, help='Legacy: Evaluate as Python Object')
@pass_context
def set_config(context, key, value, global_=False, parse=False, as_dict=False):
"Insert/Update a value in site_config.json"
from frappe.installer import update_site_config
if as_dict:
from frappe.utils.commands import warn
warn("--as-dict will be deprecated in v14. Use --parse instead", category=PendingDeprecationWarning)
parse = as_dict
if parse:
import ast
value = ast.literal_eval(value)
if global_:
sites_path = os.getcwd()
common_site_config_path = os.path.join(sites_path, 'common_site_config.json')
update_site_config(key, value, validate=False, site_config_path=common_site_config_path)
else:
for site in context.sites:
frappe.init(site=site)
update_site_config(key, value, validate=False)
frappe.destroy()
@click.command("version")
@click.option("-f", "--format", "output",
type=click.Choice(["plain", "table", "json", "legacy"]), help="Output format", default="legacy")
def get_version(output):
"""Show the versions of all the installed apps."""
from git import Repo
from frappe.utils.commands import render_table
from frappe.utils.change_log import get_app_branch
frappe.init("")
data = []
for app in sorted(frappe.get_all_apps()):
module = frappe.get_module(app)
app_hooks = frappe.get_module(app + ".hooks")
repo = Repo(frappe.get_app_path(app, ".."))
app_info = frappe._dict()
app_info.app = app
app_info.branch = get_app_branch(app)
app_info.commit = repo.head.object.hexsha[:7]
app_info.version = getattr(app_hooks, f"{app_info.branch}_version", None) or module.__version__
data.append(app_info)
{
"legacy": lambda: [
click.echo(f"{app_info.app} {app_info.version}")
for app_info in data
],
"plain": lambda: [
click.echo(f"{app_info.app} {app_info.version} {app_info.branch} ({app_info.commit})")
for app_info in data
],
"table": lambda: render_table(
[["App", "Version", "Branch", "Commit"]] +
[
[app_info.app, app_info.version, app_info.branch, app_info.commit]
for app_info in data
]
),
"json": lambda: click.echo(json.dumps(data, indent=4)),
}[output]()
@click.command('rebuild-global-search')
@click.option('--static-pages', is_flag=True, default=False, help='Rebuild global search for static pages')
@pass_context
def rebuild_global_search(context, static_pages=False):
'''Setup help table in the current site (called after migrate)'''
from frappe.utils.global_search import (get_doctypes_with_global_search, rebuild_for_doctype,
get_routes_to_index, add_route_to_global_search, sync_global_search)
for site in context.sites:
try:
frappe.init(site)
frappe.connect()
if static_pages:
routes = get_routes_to_index()
for i, route in enumerate(routes):
add_route_to_global_search(route)
frappe.local.request = None
update_progress_bar('Rebuilding Global Search', i, len(routes))
sync_global_search()
else:
doctypes = get_doctypes_with_global_search()
for i, doctype in enumerate(doctypes):
rebuild_for_doctype(doctype)
update_progress_bar('Rebuilding Global Search', i, len(doctypes))
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
commands = [
build,
clear_cache,
clear_website_cache,
jupyter,
console,
destroy_all_sessions,
execute,
export_csv,
export_doc,
export_fixtures,
export_json,
get_version,
import_csv,
data_import,
import_doc,
make_app,
mariadb,
postgres,
request,
reset_perms,
run_tests,
run_ui_tests,
serve,
set_config,
show_config,
watch,
bulk_rename,
add_to_email_queue,
rebuild_global_search,
run_parallel_tests
]
| []
| []
| [
"CI"
]
| [] | ["CI"] | python | 1 | 0 | |
functions/validate_training_job_tags/app.py | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import os
def validate_tags(tagNames, trainingJobTags):
print('Validating the Existance of {} in {}'.format(tagNames, trainingJobTags))
passedValidation = []
failedValidation = []
# Validate Tags Exist by enumerating Training Job Tags, Matching to Configured Tag Names, and Looking for Values
# Note: Could be extended to perform additional validatation on Tag Value (ie: regex)
for tag in tagNames:
if tag['Value'] in trainingJobTags:
print('Tag {} Exists in Training Job Tags'.format(tag['Value']))
passedValidation.append(tag)
else:
print('Tag {} Does Not Exist in Training Job Tags'.format(tag['Value']))
failedValidation.append(tag)
if len(failedValidation) > 0:
print('The Folllowing Tags Failed Validation: {}'.format(failedValidation))
return 'FAILED'
else:
return 'PASSED'
def lambda_handler(event, context):
print(event)
trainingJobName = event['trainingJobName']
trainingJobTags = event['trainingJobTags']
# Map Tag Names to CF Parameters
tagNames = [
{
'Name': 'TAG_MODEL_DB_SYNC',
'Value': os.environ['TAG_MODEL_DB_SYNC'],
},
{
'Name': 'TAG_MODEL_DB_PROJECT_NAME',
'Value': os.environ['TAG_MODEL_DB_PROJECT_NAME'],
},
{
'Name': 'TAG_MODEL_DB_PROJECT_USER',
'Value': os.environ['TAG_MODEL_DB_PROJECT_USER'],
},
{
'Name': 'TAG_MODEL_DB_PROJECT_DESC',
'Value': os.environ['TAG_MODEL_DB_PROJECT_DESC'],
},
{
'Name': 'TAG_MODEL_DB_MODEL_NAME',
'Value': os.environ['TAG_MODEL_DB_MODEL_NAME'],
},
{
'Name': 'TAG_MODEL_DB_MODEL_TYPE',
'Value': os.environ['TAG_MODEL_DB_MODEL_TYPE'],
}
]
try:
# Perform Validation
validationResult = validate_tags(tagNames, trainingJobTags)
return {
'trainingJobName': trainingJobName,
'tagNames': tagNames,
'trainingJobTags': trainingJobTags,
'trainingJobTagValidation': validationResult
}
except Exception as e:
message = 'Error validating Training Job Tags: {}'.format(e)
raise Exception(message) | []
| []
| [
"TAG_MODEL_DB_PROJECT_USER",
"TAG_MODEL_DB_PROJECT_DESC",
"TAG_MODEL_DB_MODEL_TYPE",
"TAG_MODEL_DB_MODEL_NAME",
"TAG_MODEL_DB_SYNC",
"TAG_MODEL_DB_PROJECT_NAME"
]
| [] | ["TAG_MODEL_DB_PROJECT_USER", "TAG_MODEL_DB_PROJECT_DESC", "TAG_MODEL_DB_MODEL_TYPE", "TAG_MODEL_DB_MODEL_NAME", "TAG_MODEL_DB_SYNC", "TAG_MODEL_DB_PROJECT_NAME"] | python | 6 | 0 | |
freezer/scheduler/scheduler_job.py | """
Copyright 2015 Hewlett-Packard
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import json
import os
import subprocess
import tempfile
import time
from freezer.utils import utils
from oslo_config import cfg
from oslo_log import log
from six.moves import configparser
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class StopState(object):
@staticmethod
def stop(job, doc):
job.job_doc = doc
job.event = Job.NO_EVENT
job.job_doc_status = Job.STOP_STATUS
job.scheduler.update_job(job.id, job.job_doc)
return Job.NO_EVENT
@staticmethod
def abort(job, doc):
return StopState.stop(job, doc)
@staticmethod
def start(job, doc):
job.job_doc = doc
job.event = Job.NO_EVENT
job.job_doc_status = Job.STOP_STATUS
job.schedule()
job.scheduler.update_job(job.id, job.job_doc)
return Job.NO_EVENT
@staticmethod
def remove(job):
job.unschedule()
job.job_doc_status = Job.REMOVED_STATUS
return Job.NO_EVENT
class ScheduledState(object):
@staticmethod
def stop(job, doc):
job.unschedule()
job.scheduler.update_job(job.id, job.job_doc)
return Job.STOP_EVENT
@staticmethod
def abort(job, doc):
return StopState.stop(job, doc)
@staticmethod
def start(job, doc):
job.event = Job.NO_EVENT
job.scheduler.update_job(job.id, job.job_doc)
return Job.NO_EVENT
@staticmethod
def remove(job):
job.unschedule()
job.job_doc_status = Job.REMOVED_STATUS
return Job.NO_EVENT
class RunningState(object):
@staticmethod
def stop(job, doc):
job.event = Job.STOP_EVENT
return Job.NO_EVENT
@staticmethod
def abort(job, doc):
job.event = Job.ABORT_EVENT
job.scheduler.update_job(job.id, job.job_doc)
return Job.ABORTED_RESULT
@staticmethod
def start(job, doc):
job.event = Job.NO_EVENT
job.scheduler.update_job(job.id, job.job_doc)
return Job.NO_EVENT
@staticmethod
def remove(job):
job.event = Job.REMOVE_EVENT
return Job.NO_EVENT
class Job(object):
NO_EVENT = ''
STOP_EVENT = 'stop'
START_EVENT = 'start'
ABORT_EVENT = 'abort'
REMOVE_EVENT = 'remove'
STOP_STATUS = 'stop'
SCHEDULED_STATUS = 'scheduled'
RUNNING_STATUS = 'running'
REMOVED_STATUS = 'removed'
COMPLETED_STATUS = 'completed'
FAIL_RESULT = 'fail'
SUCCESS_RESULT = 'success'
ABORTED_RESULT = 'aborted'
TIME_NULL = -1
@staticmethod
def create(scheduler, executable, job_doc):
job = Job(scheduler, executable, job_doc)
if job.job_doc_status in ['running', 'scheduled']:
LOG.warning('Resetting {0} status from job {1}'
.format(job.job_doc_status, job.id))
if job.job_doc_status == 'stop' and not job.event:
LOG.info('Job {0} was stopped.'.format(job.id))
job.event = Job.STOP_EVENT
elif not job.event:
LOG.info('Autostart Job {0}'.format(job.id))
job.event = Job.START_EVENT
return job
def __init__(self, scheduler, executable, job_doc):
self.scheduler = scheduler
self.executable = executable
self.job_doc = job_doc
self.process = None
self.state = StopState
def remove(self):
with self.scheduler.lock:
# delegate to state object
LOG.info('REMOVE job {0}'.format(self.id))
self.state.remove(self)
@property
def id(self):
return self.job_doc['job_id']
@property
def session_id(self):
return self.job_doc.get('session_id', '')
@session_id.setter
def session_id(self, value):
self.job_doc['session_id'] = value
@property
def session_tag(self):
return self.job_doc.get('session_tag', 0)
@session_tag.setter
def session_tag(self, value):
self.job_doc['session_tag'] = value
@property
def event(self):
return self.job_doc['job_schedule'].get('event', '')
@event.setter
def event(self, value):
self.job_doc['job_schedule']['event'] = value
@property
def job_doc_status(self):
return self.job_doc['job_schedule'].get('status', '')
@job_doc_status.setter
def job_doc_status(self, value):
self.job_doc['job_schedule']['status'] = value
@property
def result(self):
return self.job_doc['job_schedule'].get('result', '')
@result.setter
def result(self, value):
self.job_doc['job_schedule']['result'] = value
def can_be_removed(self):
return self.job_doc_status == Job.REMOVED_STATUS
@staticmethod
def save_action_to_file(action, f):
parser = configparser.ConfigParser()
parser.add_section('action')
for action_k, action_v in action.items():
parser.set('action', action_k, action_v)
parser.write(f)
f.seek(0)
@property
def schedule_date(self):
return self.job_doc['job_schedule'].get('schedule_date', '')
@property
def schedule_interval(self):
return self.job_doc['job_schedule'].get('schedule_interval', '')
@property
def schedule_start_date(self):
return self.job_doc['job_schedule'].get('schedule_start_date', '')
@property
def schedule_end_date(self):
return self.job_doc['job_schedule'].get('schedule_end_date', '')
@property
def schedule_cron_fields(self):
cron_fields = ['year', 'month', 'day', 'week', 'day_of_week',
'hour', 'minute', 'second']
cron_schedule = {}
for cron in self.job_doc['job_schedule'].keys():
if cron.startswith('schedule_'):
cron_key = cron.split('_', 1)[1]
cron_schedule.update({
cron_key: self.job_doc['job_schedule'][cron]})
return {key: value
for key, value in cron_schedule.items()
if key in cron_fields}
@property
def scheduled(self):
return self.scheduler.is_scheduled(self.id)
def get_schedule_args(self):
def get_start_date(date):
# start_date format "%Y-%m-%dT%H:%M:%S"
now = datetime.datetime.now()
start_date = now + datetime.timedelta(0, 2, 0)
if (utils.date_to_timestamp(date) >
utils.date_to_timestamp(now.isoformat().split('.')[0])):
start_date = datetime.datetime.strptime(
date, "%Y-%m-%dT%H:%M:%S")
return start_date
def get_end_date(start, end):
# start end format "%Y-%m-%dT%H:%M:%S"
end_date = datetime.datetime.strptime(end, "%Y-%m-%dT%H:%M:%S")
if (utils.date_to_timestamp(start) > utils.date_to_timestamp(end)):
end_date = None
return end_date
kwargs_date = {}
if self.schedule_start_date:
kwargs_date.update({
'start_date': get_start_date(self.schedule_start_date)
})
if self.schedule_end_date:
end_date = get_end_date(self.schedule_start_date,
self.schedule_end_date)
kwargs_date.update({
'end_date': end_date
})
if self.schedule_date:
return {'trigger': 'date',
'run_date': self.schedule_date}
elif self.schedule_interval:
kwargs = {'trigger': 'interval'}
kwargs.update(kwargs_date)
if self.schedule_interval == 'continuous':
kwargs.update({'seconds': 1})
else:
val, unit = self.schedule_interval.split(' ')
kwargs.update({unit: int(val)})
return kwargs
elif self.schedule_cron_fields:
kwargs = {'trigger': 'cron'}
kwargs.update(kwargs_date)
cron_fields = self.schedule_cron_fields
kwargs.update(cron_fields)
return kwargs
else:
# no scheduling information, schedule to start within a few seconds
return {'trigger': 'date',
'run_date': datetime.datetime.now() +
datetime.timedelta(0, 2, 0)}
def process_event(self, job_doc):
with self.scheduler.lock:
next_event = job_doc['job_schedule'].get('event', '')
while next_event:
if next_event == Job.STOP_EVENT:
if isinstance(self.state(), StopState):
LOG.info('JOB {0} event: STOP'.format(self.id))
next_event = self.state.stop(self, job_doc)
elif next_event == Job.START_EVENT:
LOG.info('JOB {0} event: START'.format(self.id))
next_event = self.state.start(self, job_doc)
elif next_event == Job.ABORT_EVENT:
LOG.info('JOB {0} event: ABORT'.format(self.id))
next_event = self.state.abort(self, job_doc)
elif next_event == Job.ABORTED_RESULT:
LOG.info('JOB {0} aborted.'.format(self.id))
break
def upload_metadata(self, metadata_string):
try:
metadata = json.loads(metadata_string)
if metadata:
metadata['job_id'] = self.id
self.scheduler.upload_metadata(metadata)
LOG.info("Job {0}, freezer action metadata uploaded"
.format(self.id))
except Exception as e:
LOG.error('metrics upload error: {0}'.format(e))
def execute_job_action(self, job_action):
max_tries = (job_action.get('max_retries', 0) + 1)
tries = max_tries
freezer_action = job_action.get('freezer_action', {})
max_retries_interval = job_action.get('max_retries_interval', 60)
action_name = freezer_action.get('action', '')
while tries:
with tempfile.NamedTemporaryFile(delete=False) as config_file:
self.save_action_to_file(freezer_action, config_file)
config_file_name = config_file.name
freezer_command = '{0} --metadata-out - --config {1}'.\
format(self.executable, config_file.name)
self.process = subprocess.Popen(freezer_command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ.copy())
# store the pid for this process in the api
try:
self.job_doc['job_schedule']['current_pid'] = \
self.process.pid
self.scheduler.update_job(self.job_doc['job_id'],
self.job_doc)
except Exception as error:
LOG.error("Error saving the process id {}".format(error))
output, error = self.process.communicate()
# ensure the tempfile gets deleted
utils.delete_file(config_file_name)
if error:
LOG.error("Freezer client error: {0}".format(error))
elif output:
self.upload_metadata(output)
if self.process.returncode == -15:
# This means the job action was aborted by the scheduler
LOG.warning('Freezer-agent was killed by the scheduler. '
'Cleanup should be done manually: container, '
'mountpoint and lvm snapshots.')
return Job.ABORTED_RESULT
elif self.process.returncode:
# ERROR
tries -= 1
if tries:
LOG.warning('Job {0} failed {1} action,'
' retrying in {2} seconds'
.format(self.id, action_name,
max_retries_interval))
time.sleep(max_retries_interval)
else:
# SUCCESS
LOG.info('Job {0} action {1}'
' returned success exit code'.
format(self.id, action_name))
return Job.SUCCESS_RESULT
LOG.error('Job {0} action {1} failed after {2} tries'
.format(self.id, action_name, max_tries))
return Job.FAIL_RESULT
def contains_exec(self):
jobs = self.job_doc.get('job_actions')
for job in jobs:
freezer_action = job.get('freezer_action')
action = freezer_action.get('action')
if action == 'exec':
return True
return False
def update_job_schedule_doc(self, **kwargs):
"""
Updates the job_schedule section of the job doc with the
provided keyword args. No checks about accepted key/values
are being made here since they may vary in the future.
:param kwargs: keyword args to add
:return: None
"""
job_schedule = self.job_doc['job_schedule']
job_schedule.update(kwargs)
def execute(self):
result = Job.SUCCESS_RESULT
with self.scheduler.lock:
LOG.info('job {0} running'.format(self.id))
self.state = RunningState
self.update_job_schedule_doc(status=Job.RUNNING_STATUS,
result="",
time_started=int(time.time()),
time_ended=Job.TIME_NULL)
self.scheduler.update_job_schedule(
self.id,
self.job_doc['job_schedule'])
self.start_session()
# if the job contains exec action and the scheduler passes the
# parameter --disable-exec job execution should fail
if self.contains_exec() and CONF.disable_exec:
LOG.info("Job {0} failed because it contains exec action "
"and exec actions are disabled by scheduler"
.format(self.id))
self.result = Job.FAIL_RESULT
self.finish()
return
for job_action in self.job_doc.get('job_actions', []):
if job_action.get('mandatory', False) or\
(result == Job.SUCCESS_RESULT):
action_result = self.execute_job_action(job_action)
if action_result == Job.FAIL_RESULT:
result = Job.FAIL_RESULT
if action_result == Job.ABORTED_RESULT:
result = Job.ABORTED_RESULT
else:
freezer_action = job_action.get('freezer_action', {})
action_name = freezer_action.get('action', '')
LOG.warning("skipping {0} action".
format(action_name))
self.result = result
self.finish()
def finish(self):
self.update_job_schedule_doc(time_ended=int(time.time()))
self.end_session(self.result)
with self.scheduler.lock:
if self.event == Job.REMOVE_EVENT:
self.unschedule()
self.job_doc_status = Job.REMOVED_STATUS
return
if not self.scheduled:
self.job_doc_status = Job.COMPLETED_STATUS
self.state = StopState
self.scheduler.update_job(self.id, self.job_doc)
return
if self.event in [Job.STOP_EVENT, Job.ABORT_EVENT]:
self.unschedule()
self.job_doc_status = Job.COMPLETED_STATUS
self.scheduler.update_job(self.id, self.job_doc)
else:
self.job_doc_status = Job.SCHEDULED_STATUS
self.state = ScheduledState
self.scheduler.update_job_schedule(
self.id,
self.job_doc['job_schedule'])
def start_session(self):
if not self.session_id:
return
retry = 5
while retry:
try:
resp = self.scheduler.start_session(self.session_id,
self.id,
self.session_tag)
if resp['result'] == 'success':
self.session_tag = resp['session_tag']
return
except Exception as e:
LOG.error('Error while starting session {0}. {1}'.
format(self.session_id, e))
LOG.warning('Retrying to start session {0}'.
format(self.session_id))
retry -= 1
LOG.error('Unable to start session {0}'.format(self.session_id))
def end_session(self, result):
if not self.session_id:
return
retry = 5
while retry:
try:
resp = self.scheduler.end_session(self.session_id,
self.id,
self.session_tag,
result)
if resp['result'] == 'success':
return
except Exception as e:
LOG.error('Error while ending session {0}. {1}'.
format(self.session_id, e))
LOG.warning('Retrying to end session {0}'.
format(self.session_id))
retry -= 1
LOG.error('Unable to end session {0}'.format(self.session_id))
def schedule(self):
try:
kwargs = self.get_schedule_args()
self.scheduler.add_job(self.execute, id=self.id,
executor='threadpool',
misfire_grace_time=3600, **kwargs)
except Exception as e:
LOG.error("Unable to schedule job {0}: {1}".
format(self.id, e))
LOG.info('scheduler job with parameters {0}'.format(kwargs))
if self.scheduled:
self.job_doc_status = Job.SCHEDULED_STATUS
self.state = ScheduledState
else:
# job not scheduled or already started and waiting for lock
self.job_doc_status = Job.COMPLETED_STATUS
self.state = StopState
def unschedule(self):
try:
# already executing job are not present in the apscheduler list
self.scheduler.remove_job(job_id=self.id)
except Exception:
pass
self.event = Job.NO_EVENT
self.job_doc_status = Job.STOP_STATUS
self.state = StopState
def terminate(self):
if self.process:
self.process.terminate()
def kill(self):
if self.process:
self.process.kill()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/app/app.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"fmt"
"os"
"path/filepath"
"github.com/pkg/errors"
"k8s.io/klog"
"sigs.k8s.io/kubetest2/pkg/exec"
"sigs.k8s.io/kubetest2/pkg/metadata"
"sigs.k8s.io/kubetest2/pkg/types"
)
// Main implements the kubetest2 deployer binary entrypoint
// Each deployer binary should invoke this, in addition to loading deployers
func Main(deployerName string, newDeployer types.NewDeployer) {
// see cmd.go for the rest of the CLI boilerplate
if err := Run(deployerName, newDeployer); err != nil {
// only print the error if it's not an IncorrectUsage (which we've)
// already output along with usage
if _, isUsage := err.(types.IncorrectUsage); !isUsage {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
}
os.Exit(1)
}
}
// RealMain contains nearly all of the application logic / control flow
// beyond the command line boilerplate
func RealMain(opts types.Options, d types.Deployer, tester types.Tester) (result error) {
/*
Now for the core kubetest2 logic:
- build
- cluster up
- test
- cluster down
Throughout this, collecting metadata and writing it out on exit
*/
// TODO(bentheelder): signal handling & timeout
klog.Infof("RunDir for this run: %q", opts.RunDir())
// ensure the run dir
if err := os.MkdirAll(opts.RunDir(), os.ModePerm); err != nil {
return err
}
if err := writeVersionToMetadataJSON(opts, d); err != nil {
return err
}
// setup junit writer
junitRunner, err := os.Create(
filepath.Join(opts.RunDir(), "junit_runner.xml"),
)
if err != nil {
return errors.Wrap(err, "could not create runner output")
}
writer := metadata.NewWriter("kubetest2", junitRunner)
// defer writing out the metadata on exit
// NOTE: defer is LIFO, so this should actually be the finish time
defer func() {
// TODO(bentheelder): instead of keeping the first error, consider
// a multi-error type
if err := writer.Finish(); err != nil && result == nil {
result = err
}
if err := junitRunner.Sync(); err != nil && result == nil {
result = err
}
if err := junitRunner.Close(); err != nil && result == nil {
result = err
}
}()
klog.Infof("ID for this run: %q", opts.RunID())
// build if specified
if opts.ShouldBuild() {
if err := writer.WrapStep("Build", d.Build); err != nil {
// we do not continue to up / test etc. if build fails
return err
}
}
// ensure tearing down the cluster happens last, even if up or test fails.
defer func() {
if opts.ShouldDown() {
// TODO(bentheelder): instead of keeping the first error, consider
// a multi-error type
if err := writer.WrapStep("Down", d.Down); err != nil && result == nil {
result = err
}
}
}()
// up a cluster
if opts.ShouldUp() {
// TODO(bentheelder): this should write out to JUnit
if err := writer.WrapStep("Up", d.Up); err != nil {
// we do not continue to test if build fails
return err
}
}
// and finally test, if a test was specified
if opts.ShouldTest() {
test := exec.Command(tester.TesterPath, tester.TesterArgs...)
exec.InheritOutput(test)
envsForTester := os.Environ()
// We expose both ARIFACTS and KUBETEST2_RUN_DIR so we can more granular about caching vs output in future.
// also add run_dir to $PATH for locally built binaries
updatedPath := opts.RunDir() + string(filepath.ListSeparator) + os.Getenv("PATH")
envsForTester = append(envsForTester, fmt.Sprintf("%s=%s", "PATH", updatedPath))
envsForTester = append(envsForTester, fmt.Sprintf("%s=%s", "ARTIFACTS", opts.RunDir()))
envsForTester = append(envsForTester, fmt.Sprintf("%s=%s", "KUBETEST2_RUN_DIR", opts.RunDir()))
envsForTester = append(envsForTester, fmt.Sprintf("%s=%s", "KUBETEST2_RUN_ID", opts.RunID()))
// If the deployer provides a kubeconfig pass it to the tester
// else assumes that it is handled offline by default methods like
// ~/.kube/config
if dWithKubeconfig, ok := d.(types.DeployerWithKubeconfig); ok {
if kconfig, err := dWithKubeconfig.Kubeconfig(); err == nil {
envsForTester = append(envsForTester, fmt.Sprintf("%s=%s", "KUBECONFIG", kconfig))
}
}
test.SetEnv(envsForTester...)
var testErr error
if !opts.SkipTestJUnitReport() {
testErr = writer.WrapStep("Test", test.Run)
} else {
testErr = test.Run()
}
if dWithPostTester, ok := d.(types.DeployerWithPostTester); ok {
if err := dWithPostTester.PostTest(testErr); err != nil {
return err
}
}
if testErr != nil {
return testErr
}
}
return nil
}
func writeVersionToMetadataJSON(opts types.Options, d types.Deployer) error {
// setup the json metadata writer
metadataJSON, err := os.Create(
filepath.Join(opts.RunDir(), "metadata.json"),
)
if err != nil {
return err
}
meta, err2 := metadata.NewCustomJSON(nil)
if err2 != nil {
return err2
}
if err := meta.Add("kubetest-version", os.Getenv("KUBETEST2_VERSION")); err != nil {
return err
}
if dWithVersion, ok := d.(types.DeployerWithVersion); ok {
if err := meta.Add("deployer-version", dWithVersion.Version()); err != nil {
return err
}
}
if err := meta.Write(metadataJSON); err != nil {
return err
}
if err := metadataJSON.Sync(); err != nil {
return err
}
if err := metadataJSON.Close(); err != nil {
return err
}
return nil
}
| [
"\"PATH\"",
"\"KUBETEST2_VERSION\""
]
| []
| [
"KUBETEST2_VERSION",
"PATH"
]
| [] | ["KUBETEST2_VERSION", "PATH"] | go | 2 | 0 | |
cmd/clone.go | // Package cmd encapsulates the logic for all cli commands
package cmd
import (
"bufio"
"fmt"
"log"
"os"
"os/exec"
"strconv"
"strings"
"github.com/gabrie30/ghorg/colorlog"
"github.com/gabrie30/ghorg/configs"
"github.com/gabrie30/ghorg/scm"
"github.com/korovkin/limiter"
"github.com/spf13/cobra"
)
var (
protocol string
path string
parentFolder string
branch string
token string
cloneType string
scmType string
bitbucketUsername string
namespace string
color string
baseURL string
concurrency string
outputDir string
topics string
skipArchived bool
skipForks bool
backup bool
args []string
cloneErrors []string
cloneInfos []string
targetCloneSource string
matchPrefix string
)
func init() {
rootCmd.PersistentFlags().StringVarP(&color, "color", "", "", "GHORG_COLOR - toggles colorful output on/off (default on)")
rootCmd.AddCommand(cloneCmd)
cloneCmd.Flags().StringVar(&protocol, "protocol", "", "GHORG_CLONE_PROTOCOL - protocol to clone with, ssh or https, (default https)")
cloneCmd.Flags().StringVarP(&path, "path", "p", "", "GHORG_ABSOLUTE_PATH_TO_CLONE_TO - absolute path the ghorg_* directory will be created. Must end with / (default $HOME/Desktop/ghorg)")
cloneCmd.Flags().StringVarP(&branch, "branch", "b", "", "GHORG_BRANCH - branch left checked out for each repo cloned (default master)")
cloneCmd.Flags().StringVarP(&token, "token", "t", "", "GHORG_GITHUB_TOKEN/GHORG_GITLAB_TOKEN/GHORG_GITEA_TOKEN/GHORG_BITBUCKET_APP_PASSWORD - scm token to clone with")
cloneCmd.Flags().StringVarP(&bitbucketUsername, "bitbucket-username", "", "", "GHORG_BITBUCKET_USERNAME - bitbucket only: username associated with the app password")
cloneCmd.Flags().StringVarP(&scmType, "scm", "s", "", "GHORG_SCM_TYPE - type of scm used, github, gitlab, gitea or bitbucket (default github)")
cloneCmd.Flags().StringVarP(&cloneType, "clone-type", "c", "", "GHORG_CLONE_TYPE - clone target type, user or org (default org)")
cloneCmd.Flags().BoolVar(&skipArchived, "skip-archived", false, "GHORG_SKIP_ARCHIVED - skips archived repos, github/gitlab/gitea only")
cloneCmd.Flags().BoolVar(&skipForks, "skip-forks", false, "GHORG_SKIP_FORKS - skips repo if its a fork, github/gitlab/gitea only")
cloneCmd.Flags().BoolVar(&skipArchived, "preserve-dir", false, "GHORG_PRESERVE_DIRECTORY_STRUCTURE - clones repos in a directory structure that matches gitlab namespaces eg company/unit/subunit/app would clone into */unit/subunit/app, gitlab only")
cloneCmd.Flags().BoolVar(&backup, "backup", false, "GHORG_BACKUP - backup mode, clone as mirror, no working copy (ignores branch parameter)")
cloneCmd.Flags().StringVarP(&baseURL, "base-url", "", "", "GHORG_SCM_BASE_URL - change SCM base url, for on self hosted instances (currently gitlab, gitea and github (use format of https://git.mydomain.com/api/v3))")
cloneCmd.Flags().StringVarP(&concurrency, "concurrency", "", "", "GHORG_CONCURRENCY - max goroutines to spin up while cloning (default 25)")
cloneCmd.Flags().StringVarP(&topics, "topics", "", "", "GHORG_TOPICS - comma separated list of github/gitea topics to filter for")
cloneCmd.Flags().StringVarP(&outputDir, "output-dir", "", "", "GHORG_OUTPUT_DIR - name of directory repos will be cloned into, will force underscores (default {org/repo being cloned})")
cloneCmd.Flags().StringVarP(&matchPrefix, "match-prefix", "", "", "GHORG_MATCH_PREFIX - only clone repos with matching prefix, can be a comma separated list (default \"\")")
}
var cloneCmd = &cobra.Command{
Use: "clone",
Short: "Clone user or org repos from GitHub, GitLab, Gitea or Bitbucket",
Long: `Clone user or org repos from GitHub, GitLab, Gitea or Bitbucket. See $HOME/ghorg/conf.yaml for defaults, its likely you will need to update some of these values of use the flags to overwrite them. Values are set first by a default value, then based off what is set in $HOME/ghorg/conf.yaml, finally the cli flags, which have the highest level of precedence.`,
Run: cloneFunc,
}
func cloneFunc(cmd *cobra.Command, argz []string) {
if cmd.Flags().Changed("color") {
colorToggle := cmd.Flag("color").Value.String()
if colorToggle == "on" {
os.Setenv("GHORG_COLOR", colorToggle)
} else {
os.Setenv("GHORG_COLOR", "off")
}
}
if cmd.Flags().Changed("path") {
absolutePath := ensureTrailingSlash(cmd.Flag("path").Value.String())
os.Setenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO", absolutePath)
}
if cmd.Flags().Changed("protocol") {
protocol := cmd.Flag("protocol").Value.String()
os.Setenv("GHORG_CLONE_PROTOCOL", protocol)
}
if cmd.Flags().Changed("branch") {
os.Setenv("GHORG_BRANCH", cmd.Flag("branch").Value.String())
}
if cmd.Flags().Changed("bitbucket-username") {
os.Setenv("GHORG_BITBUCKET_USERNAME", cmd.Flag("bitbucket-username").Value.String())
}
if cmd.Flags().Changed("clone-type") {
cloneType := strings.ToLower(cmd.Flag("clone-type").Value.String())
os.Setenv("GHORG_CLONE_TYPE", cloneType)
}
if cmd.Flags().Changed("scm") {
scmType := strings.ToLower(cmd.Flag("scm").Value.String())
os.Setenv("GHORG_SCM_TYPE", scmType)
}
if cmd.Flags().Changed("base-url") {
url := cmd.Flag("base-url").Value.String()
os.Setenv("GHORG_SCM_BASE_URL", url)
}
if cmd.Flags().Changed("concurrency") {
g := cmd.Flag("concurrency").Value.String()
os.Setenv("GHORG_CONCURRENCY", g)
}
if cmd.Flags().Changed("topics") {
topics := cmd.Flag("topics").Value.String()
os.Setenv("GHORG_TOPICS", topics)
}
if cmd.Flags().Changed("match-prefix") {
prefix := cmd.Flag("match-prefix").Value.String()
os.Setenv("GHORG_MATCH_PREFIX", prefix)
}
if cmd.Flags().Changed("skip-archived") {
os.Setenv("GHORG_SKIP_ARCHIVED", "true")
}
if cmd.Flags().Changed("skip-forks") {
os.Setenv("GHORG_SKIP_FORKS", "true")
}
if cmd.Flags().Changed("preserve-dir") {
os.Setenv("GHORG_PRESERVE_DIRECTORY_STRUCTURE", "true")
}
if cmd.Flags().Changed("backup") {
os.Setenv("GHORG_BACKUP", "true")
}
if cmd.Flags().Changed("output-dir") {
d := cmd.Flag("output-dir").Value.String()
os.Setenv("GHORG_OUTPUT_DIR", d)
}
if len(argz) < 1 {
if os.Getenv("GHORG_SCM_TYPE") == "github" && os.Getenv("GHORG_CLONE_TYPE") == "user" {
argz = append(argz, "")
} else {
colorlog.PrintError("You must provide an org or user to clone")
os.Exit(1)
}
}
configs.GetOrSetToken()
if cmd.Flags().Changed("token") {
if os.Getenv("GHORG_SCM_TYPE") == "github" {
os.Setenv("GHORG_GITHUB_TOKEN", cmd.Flag("token").Value.String())
} else if os.Getenv("GHORG_SCM_TYPE") == "gitlab" {
os.Setenv("GHORG_GITLAB_TOKEN", cmd.Flag("token").Value.String())
} else if os.Getenv("GHORG_SCM_TYPE") == "bitbucket" {
os.Setenv("GHORG_BITBUCKET_APP_PASSWORD", cmd.Flag("token").Value.String())
} else if os.Getenv("GHORG_SCM_TYPE") == "gitea" {
os.Setenv("GHORG_GITEA_TOKEN", cmd.Flag("token").Value.String())
}
}
err := configs.VerifyTokenSet()
if err != nil {
colorlog.PrintError(err)
os.Exit(1)
}
err = configs.VerifyConfigsSetCorrectly()
if err != nil {
colorlog.PrintError(err)
os.Exit(1)
}
parseParentFolder(argz)
args = argz
targetCloneSource = argz[0]
CloneAllRepos()
}
// TODO: Figure out how to use go channels for this
func getAllOrgCloneUrls() ([]scm.Repo, error) {
return getCloneUrls(true)
}
// TODO: Figure out how to use go channels for this
func getAllUserCloneUrls() ([]scm.Repo, error) {
return getCloneUrls(false)
}
func getCloneUrls(isOrg bool) ([]scm.Repo, error) {
asciiTime()
PrintConfigs()
scmType := strings.ToLower(os.Getenv("GHORG_SCM_TYPE"))
if len(scmType) == 0 {
colorlog.PrintError("GHORG_SCM_TYPE not set")
os.Exit(1)
}
client, err := scm.GetClient(scmType)
if err != nil {
colorlog.PrintError(err)
os.Exit(1)
}
if isOrg {
return client.GetOrgRepos(targetCloneSource)
}
return client.GetUserRepos(targetCloneSource)
}
func createDirIfNotExist() {
if _, err := os.Stat(os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO") + parentFolder); os.IsNotExist(err) {
err = os.MkdirAll(os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO"), 0700)
if err != nil {
panic(err)
}
}
}
func repoExistsLocally(path string) bool {
if _, err := os.Stat(path); os.IsNotExist(err) {
return false
}
return true
}
func getAppNameFromURL(url string) string {
withGit := strings.Split(url, "/")
appName := withGit[len(withGit)-1]
split := strings.Split(appName, ".")
return strings.Join(split[0:len(split)-1], ".")
}
func printRemainingMessages() {
if len(cloneInfos) > 0 {
fmt.Println()
colorlog.PrintInfo("============ Info ============")
fmt.Println()
for _, i := range cloneInfos {
colorlog.PrintInfo(i)
}
fmt.Println()
}
if len(cloneErrors) > 0 {
fmt.Println()
colorlog.PrintError("============ Issues ============")
fmt.Println()
for _, e := range cloneErrors {
colorlog.PrintError(e)
}
fmt.Println()
}
}
func readGhorgIgnore() ([]string, error) {
file, err := os.Open(configs.GhorgIgnoreLocation())
if err != nil {
return nil, err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
if scanner.Text() != "" {
lines = append(lines, scanner.Text())
}
}
return lines, scanner.Err()
}
// CloneAllRepos clones all repos
func CloneAllRepos() {
// resc, errc, infoc := make(chan string), make(chan error), make(chan error)
var cloneTargets []scm.Repo
var err error
if os.Getenv("GHORG_CLONE_TYPE") == "org" {
cloneTargets, err = getAllOrgCloneUrls()
} else if os.Getenv("GHORG_CLONE_TYPE") == "user" {
cloneTargets, err = getAllUserCloneUrls()
} else {
colorlog.PrintError("GHORG_CLONE_TYPE not set or unsupported")
os.Exit(1)
}
if err != nil {
colorlog.PrintError("Encountered an error, aborting")
fmt.Println(err)
os.Exit(1)
}
if len(cloneTargets) == 0 {
colorlog.PrintInfo("No repos found for " + os.Getenv("GHORG_SCM_TYPE") + " " + os.Getenv("GHORG_CLONE_TYPE") + ": " + targetCloneSource + ", check spelling and verify clone-type (user/org) is set correctly e.g. -c=user")
os.Exit(0)
}
// filter repos down based on ghorgignore if one exists
_, err = os.Stat(configs.GhorgIgnoreLocation())
if !os.IsNotExist(err) {
// Open the file parse each line and remove cloneTargets containing
toIgnore, err := readGhorgIgnore()
if err != nil {
colorlog.PrintError("Error parsing your ghorgignore, aborting")
fmt.Println(err)
os.Exit(1)
}
colorlog.PrintInfo("Using ghorgignore, filtering repos down...")
fmt.Println("")
filteredCloneTargets := []scm.Repo{}
var flag bool
for _, cloned := range cloneTargets {
flag = false
for _, ignore := range toIgnore {
if strings.Contains(cloned.URL, ignore) {
flag = true
}
}
if flag == false {
filteredCloneTargets = append(filteredCloneTargets, cloned)
}
}
cloneTargets = filteredCloneTargets
}
colorlog.PrintInfo(strconv.Itoa(len(cloneTargets)) + " repos found in " + targetCloneSource)
fmt.Println()
createDirIfNotExist()
l, err := strconv.Atoi(os.Getenv("GHORG_CONCURRENCY"))
if err != nil {
log.Fatal("Could not determine GHORG_CONCURRENCY")
}
limit := limiter.NewConcurrencyLimiter(l)
for _, target := range cloneTargets {
appName := getAppNameFromURL(target.URL)
branch := target.CloneBranch
repo := target
limit.Execute(func() {
path := appName
if repo.Path != "" && os.Getenv("GHORG_PRESERVE_DIRECTORY_STRUCTURE") == "true" {
path = repo.Path
}
repoDir := os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO") + parentFolder + "/" + path
if os.Getenv("GHORG_BACKUP") == "true" {
repoDir = os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO") + parentFolder + "_backup" + "/" + path
}
if repoExistsLocally(repoDir) == true {
if os.Getenv("GHORG_BACKUP") == "true" {
cmd := exec.Command("git", "remote", "update")
cmd.Dir = repoDir
err := cmd.Run()
if err != nil {
e := fmt.Sprintf("Could not update remotes in Repo: %s Error: %v", repo.URL, err)
cloneErrors = append(cloneErrors, e)
return
}
} else {
cmd := exec.Command("git", "checkout", branch)
cmd.Dir = repoDir
err := cmd.Run()
if err != nil {
e := fmt.Sprintf("Could not checkout out %s, branch may not exist, no changes made Repo: %s Error: %v", branch, repo.URL, err)
cloneInfos = append(cloneInfos, e)
return
}
cmd = exec.Command("git", "clean", "-f", "-d")
cmd.Dir = repoDir
err = cmd.Run()
if err != nil {
e := fmt.Sprintf("Problem running git clean: %s Error: %v", repo.URL, err)
cloneErrors = append(cloneErrors, e)
return
}
cmd = exec.Command("git", "reset", "--hard", "origin/"+branch)
cmd.Dir = repoDir
err = cmd.Run()
if err != nil {
e := fmt.Sprintf("Problem resetting %s Repo: %s Error: %v", branch, repo.URL, err)
cloneErrors = append(cloneErrors, e)
return
}
// TODO: handle case where repo was removed, should not give user an error
cmd = exec.Command("git", "pull", "origin", branch)
cmd.Dir = repoDir
err = cmd.Run()
if err != nil {
e := fmt.Sprintf("Problem trying to pull %v Repo: %s Error: %v", branch, repo.URL, err)
cloneErrors = append(cloneErrors, e)
return
}
}
} else {
// if https clone and github/gitlab add personal access token to url
args := []string{"clone", repo.CloneURL, repoDir}
if os.Getenv("GHORG_BACKUP") == "true" {
args = append(args, "--mirror")
}
cmd := exec.Command("git", args...)
err := cmd.Run()
if err != nil {
e := fmt.Sprintf("Problem trying to clone Repo: %s Error: %v", repo.URL, err)
cloneErrors = append(cloneErrors, e)
return
}
if os.Getenv("GHORG_BRANCH") != "" {
cmd = exec.Command("git", "checkout", branch)
cmd.Dir = repoDir
err = cmd.Run()
if err != nil {
e := fmt.Sprintf("Could not checkout out %s, branch may not exist, no changes made Repo: %s Error: %v", branch, repo.URL, err)
cloneInfos = append(cloneInfos, e)
return
}
}
// TODO: make configs around remote name
// we clone with api-key in clone url
args = []string{"remote", "set-url", "origin", repo.URL}
cmd = exec.Command("git", args...)
cmd.Dir = repoDir
err = cmd.Run()
if err != nil {
e := fmt.Sprintf("Problem trying to set remote on Repo: %s Error: %v", repo.URL, err)
cloneErrors = append(cloneErrors, e)
return
}
}
colorlog.PrintSuccess("Success cloning repo: " + repo.URL + " -> branch: " + branch)
})
}
limit.Wait()
printRemainingMessages()
// TODO: fix all these if else checks with ghorg_backups
if os.Getenv("GHORG_BACKUP") == "true" {
colorlog.PrintSuccess(fmt.Sprintf("Finished! %s%s_backup", os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO"), parentFolder))
} else {
colorlog.PrintSuccess(fmt.Sprintf("Finished! %s%s", os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO"), parentFolder))
}
}
func asciiTime() {
colorlog.PrintInfo(
`
+-+-+-+-+ +-+-+ +-+-+-+-+-+
|T|I|M|E| |T|O| |G|H|O|R|G|
+-+-+-+-+ +-+-+ +-+-+-+-+-+
`)
}
// PrintConfigs shows the user what is set before cloning
func PrintConfigs() {
colorlog.PrintInfo("*************************************")
colorlog.PrintInfo("* SCM : " + os.Getenv("GHORG_SCM_TYPE"))
colorlog.PrintInfo("* Type : " + os.Getenv("GHORG_CLONE_TYPE"))
colorlog.PrintInfo("* Protocol : " + os.Getenv("GHORG_CLONE_PROTOCOL"))
colorlog.PrintInfo("* Location : " + os.Getenv("GHORG_ABSOLUTE_PATH_TO_CLONE_TO"))
colorlog.PrintInfo("* Concurrency : " + os.Getenv("GHORG_CONCURRENCY"))
if os.Getenv("GHORG_BRANCH") != "" {
colorlog.PrintInfo("* Branch : " + getGhorgBranch())
}
if os.Getenv("GHORG_SCM_BASE_URL") != "" {
colorlog.PrintInfo("* Base URL : " + os.Getenv("GHORG_SCM_BASE_URL"))
}
if os.Getenv("GHORG_SKIP_ARCHIVED") == "true" {
colorlog.PrintInfo("* Skip Archived : " + os.Getenv("GHORG_SKIP_ARCHIVED"))
}
if os.Getenv("GHORG_SKIP_FORKS") == "true" {
colorlog.PrintInfo("* Skip Forks : " + os.Getenv("GHORG_SKIP_FORKS"))
}
if os.Getenv("GHORG_BACKUP") == "true" {
colorlog.PrintInfo("* Backup : " + os.Getenv("GHORG_BACKUP"))
}
if configs.GhorgIgnoreDetected() == true {
colorlog.PrintInfo("* Ghorgignore : true")
}
if os.Getenv("GHORG_OUTPUT_DIR") != "" {
colorlog.PrintInfo("* Output Dir : " + parentFolder)
}
colorlog.PrintInfo("*************************************")
fmt.Println("")
}
func getGhorgBranch() string {
if os.Getenv("GHORG_BRANCH") == "" {
return "default branch"
}
return os.Getenv("GHORG_BRANCH")
}
func ensureTrailingSlash(path string) string {
if string(path[len(path)-1]) == "/" {
return path
}
return path + "/"
}
func addTokenToHTTPSCloneURL(url string, token string) string {
splitURL := strings.Split(url, "https://")
if os.Getenv("GHORG_SCM_TYPE") == "gitlab" {
return "https://oauth2:" + token + "@" + splitURL[1]
}
return "https://" + token + "@" + splitURL[1]
}
func parseParentFolder(argz []string) {
if os.Getenv("GHORG_OUTPUT_DIR") != "" {
parentFolder = strings.ReplaceAll(os.Getenv("GHORG_OUTPUT_DIR"), "-", "_")
return
}
pf := strings.ReplaceAll(argz[0], "-", "_")
parentFolder = strings.ToLower(pf)
}
| [
"\"GHORG_SCM_TYPE\"",
"\"GHORG_CLONE_TYPE\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_CLONE_TYPE\"",
"\"GHORG_CLONE_TYPE\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_CLONE_TYPE\"",
"\"GHORG_CONCURRENCY\"",
"\"GHORG_PRESERVE_DIRECTORY_STRUCTURE\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_BACKUP\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_BACKUP\"",
"\"GHORG_BACKUP\"",
"\"GHORG_BRANCH\"",
"\"GHORG_BACKUP\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_CLONE_TYPE\"",
"\"GHORG_CLONE_PROTOCOL\"",
"\"GHORG_ABSOLUTE_PATH_TO_CLONE_TO\"",
"\"GHORG_CONCURRENCY\"",
"\"GHORG_BRANCH\"",
"\"GHORG_SCM_BASE_URL\"",
"\"GHORG_SCM_BASE_URL\"",
"\"GHORG_SKIP_ARCHIVED\"",
"\"GHORG_SKIP_ARCHIVED\"",
"\"GHORG_SKIP_FORKS\"",
"\"GHORG_SKIP_FORKS\"",
"\"GHORG_BACKUP\"",
"\"GHORG_BACKUP\"",
"\"GHORG_OUTPUT_DIR\"",
"\"GHORG_BRANCH\"",
"\"GHORG_BRANCH\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_OUTPUT_DIR\"",
"\"GHORG_OUTPUT_DIR\""
]
| []
| [
"GHORG_SKIP_FORKS",
"GHORG_CONCURRENCY",
"GHORG_CLONE_PROTOCOL",
"GHORG_PRESERVE_DIRECTORY_STRUCTURE",
"GHORG_CLONE_TYPE",
"GHORG_OUTPUT_DIR",
"GHORG_SCM_TYPE",
"GHORG_ABSOLUTE_PATH_TO_CLONE_TO",
"GHORG_SCM_BASE_URL",
"GHORG_BACKUP",
"GHORG_BRANCH",
"GHORG_SKIP_ARCHIVED"
]
| [] | ["GHORG_SKIP_FORKS", "GHORG_CONCURRENCY", "GHORG_CLONE_PROTOCOL", "GHORG_PRESERVE_DIRECTORY_STRUCTURE", "GHORG_CLONE_TYPE", "GHORG_OUTPUT_DIR", "GHORG_SCM_TYPE", "GHORG_ABSOLUTE_PATH_TO_CLONE_TO", "GHORG_SCM_BASE_URL", "GHORG_BACKUP", "GHORG_BRANCH", "GHORG_SKIP_ARCHIVED"] | go | 12 | 0 | |
pkg/vault/auth/cert/login_test.go | /*
Copyright The KubeVault Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"os"
"testing"
"github.com/gorilla/mux"
vaultapi "github.com/hashicorp/vault/api"
"github.com/stretchr/testify/assert"
core "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
appcat "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1"
)
const authResp = `
{
"auth": {
"client_token": "1234"
}
}
`
func NewFakeVaultServer() *httptest.Server {
router := mux.NewRouter()
router.HandleFunc("/v1/auth/cert/login", func(w http.ResponseWriter, r *http.Request) {
var v map[string]interface{}
defer r.Body.Close()
utilruntime.Must(json.NewDecoder(r.Body).Decode(&v))
if val, ok := v["name"]; ok {
if val.(string) == "good" {
w.WriteHeader(http.StatusOK)
_, err := w.Write([]byte(authResp))
utilruntime.Must(err)
return
}
}
w.WriteHeader(http.StatusBadRequest)
}).Methods(http.MethodPost)
router.HandleFunc("/v1/auth/test/login", func(w http.ResponseWriter, r *http.Request) {
var v map[string]interface{}
defer r.Body.Close()
utilruntime.Must(json.NewDecoder(r.Body).Decode(&v))
if val, ok := v["name"]; ok {
if val.(string) == "try" {
w.WriteHeader(http.StatusOK)
_, err := w.Write([]byte(authResp))
utilruntime.Must(err)
return
}
}
w.WriteHeader(http.StatusBadRequest)
}).Methods(http.MethodPost)
return httptest.NewServer(router)
}
func TestAuth_Login(t *testing.T) {
srv := NewFakeVaultServer()
defer srv.Close()
vc, err := vaultapi.NewClient(vaultapi.DefaultConfig())
if !assert.Nil(t, err) {
return
}
utilruntime.Must(vc.SetAddress(srv.URL))
cases := []struct {
testName string
au *auth
expectErr bool
}{
{
testName: "login success",
au: &auth{
vClient: vc,
name: "good",
path: "cert",
},
expectErr: false,
},
{
testName: "login success, auth enabled in another path",
au: &auth{
vClient: vc,
name: "try",
path: "test",
},
expectErr: false,
},
{
testName: "login failed, bad user/password",
au: &auth{
vClient: vc,
name: "bad",
path: "cert",
},
expectErr: true,
},
}
for _, c := range cases {
t.Run(c.testName, func(t *testing.T) {
token, err := c.au.Login()
if c.expectErr {
assert.NotNil(t, err)
} else {
if assert.Nil(t, err) {
assert.Condition(t, func() (success bool) {
return token == "1234"
})
}
}
})
}
}
func TestLogin(t *testing.T) {
addr := os.Getenv("VAULT_ADDR")
if addr == "" {
t.Skip()
}
app := &appcat.AppBinding{
Spec: appcat.AppBindingSpec{
ClientConfig: appcat.ClientConfig{
URL: &addr,
},
Parameters: &runtime.RawExtension{
Raw: []byte(fmt.Sprintf(`{ "name" : "demo" }`)),
},
},
}
au, err := New(app, &core.Secret{
Data: map[string][]byte{
"tls.key": []byte(`-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAvzavl3xpk8A08Mq2zjdLX7wbl6rny8dPJ2gPTwbMi+rto+q8
73+6vKfDsTN/p+Wr7r7oaXv2xd5Dd0WVaQzkJ3yAg5VhrhT9YCVOeQ8ND4+FQjO7
hcvXIzVyhphX2RFs0ASvT8bxAttyHZyaPyKhZ15XAnOyZNw3918cQbZT1TwtKXo7
dhJudyFADv05kIGND1obmrlh+VlbOIuUBSzeS6jCaFjEjK/l4pnqNvR2GK2jmETj
yJZaV7WkA/zx1kbrUhegREntCuA1cfl8biepadfCsw5RCa0DK4D9zYMWy9Uau0SA
ExefCuwv2h8UtOCW4bvLtruGiR1tyJu2CyNodwIDAQABAoIBAE6CCX5NGpwLYrEq
yfvJQC1CcqHHDfzhDGLFmuN7iyg3gPK4QnKjIuyqhPKQjm1FI16XC52jxCJhq5mg
/ENxg4ui1rEv+Dcdbxq2to2F3HrzFmekDe5VTzOpkigpDIiXWvHduo0qxXHC4AvA
bKRXd6WVWmwrTKeUs3Xhmxxv2+PEZx8QMJQXNu/uGqy01IPNp2KFKm6X0lWU/eH0
ZTjbWkpL4N1JwQQfJxEGStdNf7fjMc8WFHCqeFCJ29B8Z1V06E5Xgu0kDCmt/e1a
U7N7Rr5s74Z1VLU8uJHDDwsnFjiCl8A1MamtlumV0mOgRYUQf+CDGtokR+MtLlOa
Gj0BeCECgYEA7uTjwSUuDBojDxdx55cfjGm/ieuRWe6h+VfnmDQw0E6oS0wr8DwQ
ci1Zg7Gc0uolxe+q37ws/YvC7uwk4Ss/l2+YhTLUP1AAFTsmggFGIn6o+X7+k/Nm
tVrrdNg+zcU1cahcitTmtmZsWxRiVBmBqYcRf6k69y9KhSoLpzydFh0CgYEAzOfG
2ZEEjOrkcN+mZ3gzlRmhS72FG0jeFwg8GYaeVPI/37a7ZtlkoDF9eyG/YuSF3u5I
UKDWuZXtL5wfiQUuFu1Emirtu0ylXyoSthkrGvoQx5KfGhvHco2IoGEYlZj6Tm/U
MHfaNmXkEeA8m7fkrwJsifUUKIeh9HJ9gdt3ZKMCgYEAkEL4pnJlVDmUYlCuIERK
cOiLGiZ/J+fLOF+1I1yg/aoCRzZAclpTNB/epoBjS5rKJLWOYn2oTZRQqyc/Php3
1GM3n3gKZBFTe360yl0qlToXoFLoOUALDglRlsXfZzNoCrK4772RdSR02qt8lXyx
qEZAcu8nBI4yWigB0YPw+KECgYEAtAfMianFkr5qmdWW0gAlahILyo0oXvGl2Byv
GUpS4JW7kyZs/w9wPuNcuYvMKOpZyKYZOWYnYwWcUKFef7fiZ9ht1vpyx4avIa7I
o9/3JIujpIVpbroLgdVivm6w9/dhrPrKNw+G1RauzRn0hmiK70006f0/ieCpZioV
pbua6fsCgYEAkXYt+RRf1fMKJP/QuSUlH7xyxJCnNbTLC7G4R1E0zbNxcuheCVd7
UsZgpyOlbAaENmheid3lBke/z+lljcyJE5EUW3estslRAQWvaEQUtq/2NDVAMmFf
Gqe3rYqAdJ3bWBBk+8gi8zyY2pbQpTIg6MSSMazHFPoThdBZ1pWIkss=
-----END RSA PRIVATE KEY-----
`),
"tls.crt": []byte(`-----BEGIN CERTIFICATE-----
MIIC3jCCAcagAwIBAgIIL7N6jjiZ5p0wDQYJKoZIhvcNAQELBQAwDTELMAkGA1UE
AxMCY2EwHhcNMTgwOTI3MDQ1NDI5WhcNMTkwOTI4MDQ1MTM2WjAoMQ8wDQYDVQQK
EwZnb29nbGUxFTATBgNVBAMTDGFwcHNjb2RlLmNvbTCCASIwDQYJKoZIhvcNAQEB
BQADggEPADCCAQoCggEBAL82r5d8aZPANPDKts43S1+8G5eq58vHTydoD08GzIvq
7aPqvO9/urynw7Ezf6flq+6+6Gl79sXeQ3dFlWkM5Cd8gIOVYa4U/WAlTnkPDQ+P
hUIzu4XL1yM1coaYV9kRbNAEr0/G8QLbch2cmj8ioWdeVwJzsmTcN/dfHEG2U9U8
LSl6O3YSbnchQA79OZCBjQ9aG5q5YflZWziLlAUs3kuowmhYxIyv5eKZ6jb0dhit
o5hE48iWWle1pAP88dZG61IXoERJ7QrgNXH5fG4nqWnXwrMOUQmtAyuA/c2DFsvV
GrtEgBMXnwrsL9ofFLTgluG7y7a7hokdbcibtgsjaHcCAwEAAaMnMCUwDgYDVR0P
AQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMA0GCSqGSIb3DQEBCwUAA4IB
AQBNCahbHcO7Pdu8s/gIgn5cB4nWc3813jzVMDo0ujjVB1jl16pOb3vtzeTxoMJ4
ewB6C0EArTdjVK9d8PJuDL2cJwrdIuYaFzjwTpFOIWX89/p3XE2yRRMETLMccYBJ
PYskPkDz6TidYflX/H7KA9qsv+4N1KoB7PUIG4sHeVNFIN0xXZvzEXH5fUjPdpv5
W195cVunLFIlEVfJvYmMuKgGfLTj96t7GUTJUOjJtW2GWW8QI43L6BQZcCfSIdSI
YatctDlrGk9IQeKwea8u4LlRrX9eHBNDKTpxmxsiBuBWxwSkK3eyVC7PKUzerBj6
vZvzz7lCsjRshgwyDcgM5O+m
-----END CERTIFICATE-----
`),
},
})
assert.Nil(t, err)
token, err := au.Login()
if assert.Nil(t, err) {
fmt.Println(token)
}
}
| [
"\"VAULT_ADDR\""
]
| []
| [
"VAULT_ADDR"
]
| [] | ["VAULT_ADDR"] | go | 1 | 0 | |
core/chaincode/exectransaction_test.go | /*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package chaincode
import (
"fmt"
"net"
"os"
"strconv"
"strings"
"sync"
"testing"
"time"
"path/filepath"
"github.com/hyperledger/fabric/core/container"
"github.com/hyperledger/fabric/core/container/ccintf"
"github.com/hyperledger/fabric/core/crypto"
"github.com/hyperledger/fabric/core/ledger"
"github.com/hyperledger/fabric/core/ledger/kvledger"
"github.com/hyperledger/fabric/core/util"
"github.com/hyperledger/fabric/membersrvc/ca"
pb "github.com/hyperledger/fabric/protos"
putils "github.com/hyperledger/fabric/protos/utils"
"github.com/golang/protobuf/proto"
"github.com/spf13/viper"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
// attributes to request in the batch of tcerts while deploying, invoking or querying
var attributes = []string{"company", "position"}
func getNowMillis() int64 {
nanos := time.Now().UnixNano()
return nanos / 1000000
}
//initialize memberservices and startup
func initMemSrvc() (net.Listener, error) {
//start clean
finitMemSrvc(nil)
ca.CacheConfiguration() // Cache configuration
aca := ca.NewACA()
eca := ca.NewECA(aca)
tca := ca.NewTCA(eca)
tlsca := ca.NewTLSCA(eca)
sockp, err := net.Listen("tcp", viper.GetString("server.port"))
if err != nil {
return nil, err
}
var opts []grpc.ServerOption
server := grpc.NewServer(opts...)
aca.Start(server)
eca.Start(server)
tca.Start(server)
tlsca.Start(server)
go server.Serve(sockp)
return sockp, nil
}
//cleanup memberservice debris
func finitMemSrvc(lis net.Listener) {
closeListenerAndSleep(lis)
os.RemoveAll(filepath.Join(os.TempDir(), "ca"))
}
//initialize peer and start up. If security==enabled, login as vp
func initPeer() (net.Listener, error) {
//start clean
finitPeer(nil)
var opts []grpc.ServerOption
if viper.GetBool("peer.tls.enabled") {
creds, err := credentials.NewServerTLSFromFile(viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"))
if err != nil {
return nil, fmt.Errorf("Failed to generate credentials %v", err)
}
opts = []grpc.ServerOption{grpc.Creds(creds)}
}
grpcServer := grpc.NewServer(opts...)
ledgerPath := viper.GetString("peer.fileSystemPath")
kvledger.Initialize(ledgerPath)
peerAddress := viper.GetString("peer.address")
lis, err := net.Listen("tcp", peerAddress)
if err != nil {
return nil, fmt.Errorf("Error starting peer listener %s", err)
}
getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
return &pb.PeerEndpoint{ID: &pb.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
}
// Install security object for peer
var secHelper crypto.Peer
if viper.GetBool("security.enabled") {
enrollID := viper.GetString("security.enrollID")
enrollSecret := viper.GetString("security.enrollSecret")
if err = crypto.RegisterValidator(enrollID, nil, enrollID, enrollSecret); nil != err {
return nil, err
}
secHelper, err = crypto.InitValidator(enrollID, nil)
if nil != err {
return nil, err
}
}
ccStartupTimeout := time.Duration(chaincodeStartupTimeoutDefault) * time.Millisecond
pb.RegisterChaincodeSupportServer(grpcServer, NewChaincodeSupport(DefaultChain, getPeerEndpoint, false, ccStartupTimeout, secHelper))
RegisterSysCCs()
go grpcServer.Serve(lis)
return lis, nil
}
func finitPeer(lis net.Listener) {
if lis != nil {
deRegisterSysCCs()
ledgername := string(DefaultChain)
if lgr := kvledger.GetLedger(ledgername); lgr != nil {
lgr.Close()
}
closeListenerAndSleep(lis)
}
ledgerPath := viper.GetString("peer.fileSystemPath")
os.RemoveAll(ledgerPath)
os.RemoveAll(filepath.Join(os.TempDir(), "hyperledger"))
}
func startTxSimulation(ctxt context.Context) (context.Context, ledger.TxSimulator, error) {
ledgername := string(DefaultChain)
lgr := kvledger.GetLedger(ledgername)
txsim, err := lgr.NewTxSimulator()
if err != nil {
return nil, nil, err
}
ctxt = context.WithValue(ctxt, TXSimulatorKey, txsim)
return ctxt, txsim, nil
}
func endTxSimulation(txsim ledger.TxSimulator, payload []byte, commit bool) error {
txsim.Done()
ledgername := string(DefaultChain)
if lgr := kvledger.GetLedger(ledgername); lgr != nil {
if commit {
var txSimulationResults []byte
var err error
//get simulation results
if txSimulationResults, err = txsim.GetTxSimulationResults(); err != nil {
return err
}
tx, err := putils.CreateTx(pb.Header_CHAINCODE, util.ComputeCryptoHash([]byte("dummyProposal")), []byte("dummyCCEvents"), txSimulationResults, []*pb.Endorsement{&pb.Endorsement{}})
if err != nil {
return err
}
txBytes, err := proto.Marshal(tx)
if err != nil {
return err
}
//create the block with 1 transaction
block := &pb.Block2{Transactions: [][]byte{txBytes}}
if _, _, err = lgr.RemoveInvalidTransactionsAndPrepare(block); err != nil {
return err
}
//commit the block
if err := lgr.Commit(); err != nil {
return err
}
}
}
return nil
}
// Build a chaincode.
func getDeploymentSpec(context context.Context, spec *pb.ChaincodeSpec) (*pb.ChaincodeDeploymentSpec, error) {
fmt.Printf("getting deployment spec for chaincode spec: %v\n", spec)
codePackageBytes, err := container.GetChaincodePackageBytes(spec)
if err != nil {
return nil, err
}
chaincodeDeploymentSpec := &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec, CodePackage: codePackageBytes}
return chaincodeDeploymentSpec, nil
}
func createDeployTransaction(dspec *pb.ChaincodeDeploymentSpec, uuid string) (*pb.Transaction, error) {
var tx *pb.Transaction
var err error
var sec crypto.Client
if dspec.ChaincodeSpec.SecureContext != "" {
sec, err = crypto.InitClient(dspec.ChaincodeSpec.SecureContext, nil)
defer crypto.CloseClient(sec)
if nil != err {
return nil, err
}
tx, err = sec.NewChaincodeDeployTransaction(dspec, uuid, attributes...)
if nil != err {
return nil, err
}
} else {
tx, err = pb.NewChaincodeDeployTransaction(dspec, uuid)
if err != nil {
return nil, fmt.Errorf("Error deploying chaincode: %s ", err)
}
}
return tx, nil
}
func createTransaction(invokeTx bool, spec *pb.ChaincodeInvocationSpec, uuid string) (*pb.Transaction, error) {
var tx *pb.Transaction
var err error
var sec crypto.Client
if nil != sec {
sec, err = crypto.InitClient(spec.ChaincodeSpec.SecureContext, nil)
defer crypto.CloseClient(sec)
if nil != err {
return nil, err
}
if invokeTx {
tx, err = sec.NewChaincodeExecute(spec, uuid, attributes...)
} else {
tx, err = sec.NewChaincodeQuery(spec, uuid, attributes...)
}
if nil != err {
return nil, err
}
} else {
var t pb.Transaction_Type
if invokeTx {
t = pb.Transaction_CHAINCODE_INVOKE
} else {
t = pb.Transaction_CHAINCODE_QUERY
}
tx, err = pb.NewChaincodeExecute(spec, uuid, t)
if nil != err {
return nil, err
}
}
return tx, nil
}
//getDeployLCCCSpec gets the spec for the chaincode deployment to be sent to LCCC
func getDeployLCCCSpec(cds *pb.ChaincodeDeploymentSpec) (*pb.ChaincodeInvocationSpec, error) {
b, err := proto.Marshal(cds)
if err != nil {
return nil, err
}
//wrap the deployment in an invocation spec to lccc...
lcccSpec := &pb.ChaincodeInvocationSpec{ChaincodeSpec: &pb.ChaincodeSpec{Type: pb.ChaincodeSpec_GOLANG, ChaincodeID: &pb.ChaincodeID{Name: "lccc"}, CtorMsg: &pb.ChaincodeInput{Args: [][]byte{[]byte("deploy"), []byte("default"), b}}}}
return lcccSpec, nil
}
// Deploy a chaincode - i.e., build and initialize.
func deploy(ctx context.Context, spec *pb.ChaincodeSpec) (b []byte, err error) {
// First build and get the deployment spec
chaincodeDeploymentSpec, err := getDeploymentSpec(ctx, spec)
if err != nil {
return nil, err
}
return deploy2(ctx, chaincodeDeploymentSpec)
}
func deploy2(ctx context.Context, chaincodeDeploymentSpec *pb.ChaincodeDeploymentSpec) (b []byte, err error) {
cis, err := getDeployLCCCSpec(chaincodeDeploymentSpec)
if err != nil {
return nil, fmt.Errorf("Error creating lccc spec : %s\n", err)
}
tid := chaincodeDeploymentSpec.ChaincodeSpec.ChaincodeID.Name
// Now create the Transactions message and send to Peer.
transaction, err := createDeployTransaction(chaincodeDeploymentSpec, tid)
if err != nil {
return nil, fmt.Errorf("Error deploying chaincode: %s ", err)
}
ctx, txsim, err := startTxSimulation(ctx)
if err != nil {
return nil, fmt.Errorf("Failed to get handle to simulator: %s ", err)
}
defer func() {
//no error, lets try commit
if err == nil {
//capture returned error from commit
err = endTxSimulation(txsim, []byte("deployed"), true)
} else {
//there was an error, just close simulation and return that
endTxSimulation(txsim, []byte("deployed"), false)
}
}()
uuid := util.GenerateUUID()
var lccctx *pb.Transaction
if lccctx, err = createTransaction(true, cis, uuid); err != nil {
return nil, fmt.Errorf("Error creating lccc transaction: %s", err)
}
//write to lccc
if _, _, err = Execute(ctx, GetChain(DefaultChain), lccctx); err != nil {
return nil, fmt.Errorf("Error deploying chaincode: %s", err)
}
if b, _, err = Execute(ctx, GetChain(DefaultChain), transaction); err != nil {
return nil, fmt.Errorf("Error deploying chaincode: %s", err)
}
return b, nil
}
// Invoke or query a chaincode.
func invoke(ctx context.Context, spec *pb.ChaincodeSpec) (ccevt *pb.ChaincodeEvent, uuid string, retval []byte, err error) {
chaincodeInvocationSpec := &pb.ChaincodeInvocationSpec{ChaincodeSpec: spec}
// Now create the Transactions message and send to Peer.
uuid = util.GenerateUUID()
var transaction *pb.Transaction
transaction, err = createTransaction(true, chaincodeInvocationSpec, uuid)
if err != nil {
return nil, uuid, nil, fmt.Errorf("Error invoking chaincode: %s ", err)
}
var txsim ledger.TxSimulator
ctx, txsim, err = startTxSimulation(ctx)
if err != nil {
return nil, uuid, nil, fmt.Errorf("Failed to get handle to simulator: %s ", err)
}
defer func() {
//no error, lets try commit
if err == nil {
//capture returned error from commit
err = endTxSimulation(txsim, []byte("invoke"), true)
} else {
//there was an error, just close simulation and return that
endTxSimulation(txsim, []byte("invoke"), false)
}
}()
retval, ccevt, err = Execute(ctx, GetChain(DefaultChain), transaction)
if err != nil {
return nil, uuid, nil, fmt.Errorf("Error invoking chaincode: %s ", err)
}
return ccevt, uuid, retval, err
}
func closeListenerAndSleep(l net.Listener) {
if l != nil {
l.Close()
time.Sleep(2 * time.Second)
}
}
func executeDeployTransaction(t *testing.T, url string) {
lis, err := initPeer()
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis)
var ctxt = context.Background()
f := "init"
args := util.ToChaincodeArgs(f, "a", "100", "b", "200")
spec := &pb.ChaincodeSpec{Type: 1, ChaincodeID: &pb.ChaincodeID{Path: url}, CtorMsg: &pb.ChaincodeInput{Args: args}}
_, err = deploy(ctxt, spec)
chaincodeID := spec.ChaincodeID.Name
if err != nil {
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
t.Fail()
t.Logf("Error deploying <%s>: %s", chaincodeID, err)
return
}
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
}
// Test deploy of a transaction
func TestExecuteDeployTransaction(t *testing.T) {
executeDeployTransaction(t, "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example01")
}
// Test deploy of a transaction with a GOPATH with multiple elements
func TestGopathExecuteDeployTransaction(t *testing.T) {
// add a trailing slash to GOPATH
// and a couple of elements - it doesn't matter what they are
os.Setenv("GOPATH", os.Getenv("GOPATH")+string(os.PathSeparator)+string(os.PathListSeparator)+"/tmp/foo"+string(os.PathListSeparator)+"/tmp/bar")
executeDeployTransaction(t, "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example01")
}
// Test deploy of a transaction with a chaincode over HTTP.
func TestHTTPExecuteDeployTransaction(t *testing.T) {
// The chaincode used here cannot be from the fabric repo
// itself or it won't be downloaded because it will be found
// in GOPATH, which would defeat the test
executeDeployTransaction(t, "http://gopkg.in/mastersingh24/fabric-test-resources.v1")
}
// Check the correctness of the final state after transaction execution.
func checkFinalState(uuid string, chaincodeID string) error {
_, txsim, err := startTxSimulation(context.Background())
if err != nil {
return fmt.Errorf("Failed to get handle to simulator: %s ", err)
}
defer txsim.Done()
// Invoke ledger to get state
var Aval, Bval int
resbytes, resErr := txsim.GetState(chaincodeID, "a")
if resErr != nil {
return fmt.Errorf("Error retrieving state from ledger for <%s>: %s", chaincodeID, resErr)
}
fmt.Printf("Got string: %s\n", string(resbytes))
Aval, resErr = strconv.Atoi(string(resbytes))
if resErr != nil {
return fmt.Errorf("Error retrieving state from ledger for <%s>: %s", chaincodeID, resErr)
}
if Aval != 90 {
return fmt.Errorf("Incorrect result. Aval is wrong for <%s>", chaincodeID)
}
resbytes, resErr = txsim.GetState(chaincodeID, "b")
if resErr != nil {
return fmt.Errorf("Error retrieving state from ledger for <%s>: %s", chaincodeID, resErr)
}
Bval, resErr = strconv.Atoi(string(resbytes))
if resErr != nil {
return fmt.Errorf("Error retrieving state from ledger for <%s>: %s", chaincodeID, resErr)
}
if Bval != 210 {
return fmt.Errorf("Incorrect result. Bval is wrong for <%s>", chaincodeID)
}
// Success
fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval)
return nil
}
// Invoke chaincode_example02
func invokeExample02Transaction(ctxt context.Context, cID *pb.ChaincodeID, args []string, destroyImage bool) error {
f := "init"
argsDeploy := util.ToChaincodeArgs(f, "a", "100", "b", "200")
spec := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Args: argsDeploy}}
_, err := deploy(ctxt, spec)
chaincodeID := spec.ChaincodeID.Name
if err != nil {
return fmt.Errorf("Error deploying <%s>: %s", chaincodeID, err)
}
time.Sleep(time.Second)
if destroyImage {
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
dir := container.DestroyImageReq{CCID: ccintf.CCID{ChaincodeSpec: spec, NetworkID: GetChain(DefaultChain).peerNetworkID, PeerID: GetChain(DefaultChain).peerID}, Force: true, NoPrune: true}
_, err = container.VMCProcess(ctxt, container.DOCKER, dir)
if err != nil {
err = fmt.Errorf("Error destroying image: %s", err)
return err
}
}
f = "invoke"
invokeArgs := append([]string{f}, args...)
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Args: util.ToChaincodeArgs(invokeArgs...)}}
_, uuid, _, err := invoke(ctxt, spec)
if err != nil {
return fmt.Errorf("Error invoking <%s>: %s", chaincodeID, err)
}
err = checkFinalState(uuid, chaincodeID)
if err != nil {
return fmt.Errorf("Incorrect final state after transaction for <%s>: %s", chaincodeID, err)
}
// Test for delete state
f = "delete"
delArgs := util.ToChaincodeArgs(f, "a")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Args: delArgs}}
_, uuid, _, err = invoke(ctxt, spec)
if err != nil {
return fmt.Errorf("Error deleting state in <%s>: %s", chaincodeID, err)
}
return nil
}
func TestExecuteInvokeTransaction(t *testing.T) {
lis, err := initPeer()
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis)
var ctxt = context.Background()
url := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
chaincodeID := &pb.ChaincodeID{Path: url}
args := []string{"a", "b", "10"}
err = invokeExample02Transaction(ctxt, chaincodeID, args, true)
if err != nil {
t.Fail()
t.Logf("Error invoking transaction: %s", err)
} else {
fmt.Printf("Invoke test passed\n")
t.Logf("Invoke test passed")
}
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: chaincodeID}})
}
// Execute multiple transactions and queries.
func exec(ctxt context.Context, chaincodeID string, numTrans int, numQueries int) []error {
var wg sync.WaitGroup
errs := make([]error, numTrans+numQueries)
e := func(qnum int, typ pb.Transaction_Type) {
defer wg.Done()
var spec *pb.ChaincodeSpec
args := util.ToChaincodeArgs("invoke", "a", "b", "10")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeID: &pb.ChaincodeID{Name: chaincodeID}, CtorMsg: &pb.ChaincodeInput{Args: args}}
_, _, _, err := invoke(ctxt, spec)
if err != nil {
errs[qnum] = fmt.Errorf("Error executing <%s>: %s", chaincodeID, err)
return
}
}
wg.Add(numTrans + numQueries)
//execute transactions sequentially..
go func() {
for i := 0; i < numTrans; i++ {
e(i, pb.Transaction_CHAINCODE_INVOKE)
}
}()
wg.Wait()
return errs
}
// Test the execution of a query.
func TestExecuteQuery(t *testing.T) {
//we no longer do query... this function to be modified for concurrent invokes
t.Skip()
lis, err := initPeer()
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis)
var ctxt = context.Background()
url := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
cID := &pb.ChaincodeID{Path: url}
f := "init"
args := util.ToChaincodeArgs(f, "a", "100", "b", "200")
spec := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Args: args}}
_, err = deploy(ctxt, spec)
chaincodeID := spec.ChaincodeID.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", chaincodeID, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
time.Sleep(2 * time.Second)
//start := getNowMillis()
//fmt.Fprintf(os.Stderr, "Starting: %d\n", start)
numTrans := 2
numQueries := 10
errs := exec(ctxt, chaincodeID, numTrans, numQueries)
var numerrs int
for i := 0; i < numTrans+numQueries; i++ {
if errs[i] != nil {
t.Logf("Error doing query on %d %s", i, errs[i])
numerrs++
}
}
if numerrs == 0 {
t.Logf("Query test passed")
} else {
t.Logf("Query test failed(total errors %d)", numerrs)
t.Fail()
}
//end := getNowMillis()
//fmt.Fprintf(os.Stderr, "Ending: %d\n", end)
//fmt.Fprintf(os.Stderr, "Elapsed : %d millis\n", end-start)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
}
// Test the execution of an invalid transaction.
func TestExecuteInvokeInvalidTransaction(t *testing.T) {
lis, err := initPeer()
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis)
var ctxt = context.Background()
url := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
chaincodeID := &pb.ChaincodeID{Path: url}
//FAIL, FAIL!
args := []string{"x", "-1"}
err = invokeExample02Transaction(ctxt, chaincodeID, args, false)
//this HAS to fail with expectedDeltaStringPrefix
if err != nil {
errStr := err.Error()
t.Logf("Got error %s\n", errStr)
t.Logf("InvalidInvoke test passed")
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: chaincodeID}})
return
}
t.Fail()
t.Logf("Error invoking transaction %s", err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: chaincodeID}})
}
// Test the execution of a chaincode that invokes another chaincode.
func TestChaincodeInvokeChaincode(t *testing.T) {
lis, err := initPeer()
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis)
err = chaincodeInvokeChaincode(t, "")
if err != nil {
t.Fail()
t.Logf("Failed chaincode invoke chaincode : %s", err)
closeListenerAndSleep(lis)
return
}
closeListenerAndSleep(lis)
}
func chaincodeInvokeChaincode(t *testing.T, user string) (err error) {
var ctxt = context.Background()
// Deploy first chaincode
url1 := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
cID1 := &pb.ChaincodeID{Path: url1}
f := "init"
args := util.ToChaincodeArgs(f, "a", "100", "b", "200")
spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID1, CtorMsg: &pb.ChaincodeInput{Args: args}, SecureContext: user}
_, err = deploy(ctxt, spec1)
chaincodeID1 := spec1.ChaincodeID.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", chaincodeID1, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
return
}
t.Logf("deployed chaincode_example02 got cID1:% s,\n chaincodeID1:% s", cID1, chaincodeID1)
time.Sleep(time.Second)
// Deploy second chaincode
url2 := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example04"
cID2 := &pb.ChaincodeID{Path: url2}
f = "init"
args = util.ToChaincodeArgs(f, "e", "0")
spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}, SecureContext: user}
_, err = deploy(ctxt, spec2)
chaincodeID2 := spec2.ChaincodeID.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", chaincodeID2, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return
}
time.Sleep(time.Second)
// Invoke second chaincode, which will inturn invoke the first chaincode
f = "invoke"
args = util.ToChaincodeArgs(f, "e", "1")
spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}, SecureContext: user}
// Invoke chaincode
var uuid string
_, uuid, _, err = invoke(ctxt, spec2)
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", chaincodeID2, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return
}
// Check the state in the ledger
err = checkFinalState(uuid, chaincodeID1)
if err != nil {
t.Fail()
t.Logf("Incorrect final state after transaction for <%s>: %s", chaincodeID1, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return
}
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return
}
// Test the execution of a chaincode that invokes another chaincode with wrong parameters. Should receive error from
// from the called chaincode
func TestChaincodeInvokeChaincodeErrorCase(t *testing.T) {
lis, err := initPeer()
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis)
var ctxt = context.Background()
// Deploy first chaincode
url1 := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
cID1 := &pb.ChaincodeID{Path: url1}
f := "init"
args := util.ToChaincodeArgs(f, "a", "100", "b", "200")
spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID1, CtorMsg: &pb.ChaincodeInput{Args: args}}
_, err = deploy(ctxt, spec1)
chaincodeID1 := spec1.ChaincodeID.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", chaincodeID1, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
return
}
time.Sleep(time.Second)
// Deploy second chaincode
url2 := "github.com/hyperledger/fabric/examples/chaincode/go/passthru"
cID2 := &pb.ChaincodeID{Path: url2}
f = "init"
args = util.ToChaincodeArgs(f)
spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}}
_, err = deploy(ctxt, spec2)
chaincodeID2 := spec2.ChaincodeID.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", chaincodeID2, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return
}
time.Sleep(time.Second)
// Invoke second chaincode, which will inturn invoke the first chaincode but pass bad params
f = chaincodeID1
args = util.ToChaincodeArgs(f, "invoke", "a")
spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}}
// Invoke chaincode
_, _, _, err = invoke(ctxt, spec2)
if err == nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", chaincodeID2, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return
}
if strings.Index(err.Error(), "Incorrect number of arguments. Expecting 3") < 0 {
t.Fail()
t.Logf("Unexpected error %s", err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return
}
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
}
func chaincodeQueryChaincode(user string) error {
var ctxt = context.Background()
// Deploy first chaincode
url1 := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
cID1 := &pb.ChaincodeID{Path: url1}
f := "init"
args := util.ToChaincodeArgs(f, "a", "100", "b", "200")
spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID1, CtorMsg: &pb.ChaincodeInput{Args: args}, SecureContext: user}
_, err := deploy(ctxt, spec1)
chaincodeID1 := spec1.ChaincodeID.Name
if err != nil {
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
return fmt.Errorf("Error initializing chaincode %s(%s)", chaincodeID1, err)
}
time.Sleep(time.Second)
// Deploy second chaincode
url2 := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example05"
cID2 := &pb.ChaincodeID{Path: url2}
f = "init"
args = util.ToChaincodeArgs(f, "sum", "0")
spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}, SecureContext: user}
_, err = deploy(ctxt, spec2)
chaincodeID2 := spec2.ChaincodeID.Name
if err != nil {
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return fmt.Errorf("Error initializing chaincode %s(%s)", chaincodeID2, err)
}
time.Sleep(time.Second)
// Invoke second chaincode, which will inturn query the first chaincode
f = "invoke"
args = util.ToChaincodeArgs(f, chaincodeID1, "sum")
spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}, SecureContext: user}
// Invoke chaincode
var retVal []byte
_, _, retVal, err = invoke(ctxt, spec2)
if err != nil {
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return fmt.Errorf("Error invoking <%s>: %s", chaincodeID2, err)
}
// Check the return value
result, err := strconv.Atoi(string(retVal))
if err != nil || result != 300 {
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return fmt.Errorf("Incorrect final state after transaction for <%s>: %s", chaincodeID1, err)
}
// Query second chaincode, which will inturn query the first chaincode
f = "query"
args = util.ToChaincodeArgs(f, chaincodeID1, "sum")
spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}, SecureContext: user}
// Invoke chaincode
_, _, retVal, err = invoke(ctxt, spec2)
if err != nil {
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return fmt.Errorf("Error querying <%s>: %s", chaincodeID2, err)
}
// Check the return value
result, err = strconv.Atoi(string(retVal))
if err != nil || result != 300 {
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return fmt.Errorf("Incorrect final value after query for <%s>: %s", chaincodeID1, err)
}
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return nil
}
// Test the execution of a chaincode query that queries another chaincode without security enabled
func TestChaincodeQueryChaincode(t *testing.T) {
//no longer supporting Query
t.Skip()
var peerLis net.Listener
var err error
if peerLis, err = initPeer(); err != nil {
t.Fail()
t.Logf("Error registering user %s", err)
return
}
defer finitPeer(peerLis)
if err = chaincodeQueryChaincode(""); err != nil {
t.Fail()
t.Logf("Error executing test %s", err)
return
}
}
// Test the execution of a chaincode that queries another chaincode with invalid parameter. Should receive error from
// from the called chaincode
func TestChaincodeQueryChaincodeErrorCase(t *testing.T) {
//query no longer supported
t.Skip()
lis, err := initPeer()
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis)
var ctxt = context.Background()
// Deploy first chaincode
url1 := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
cID1 := &pb.ChaincodeID{Path: url1}
f := "init"
args := util.ToChaincodeArgs(f, "a", "100", "b", "200")
spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID1, CtorMsg: &pb.ChaincodeInput{Args: args}}
_, err = deploy(ctxt, spec1)
chaincodeID1 := spec1.ChaincodeID.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", chaincodeID1, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
return
}
time.Sleep(time.Second)
// Deploy second chaincode
url2 := "github.com/hyperledger/fabric/examples/chaincode/go/passthru"
cID2 := &pb.ChaincodeID{Path: url2}
f = "init"
args = util.ToChaincodeArgs(f)
spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}}
_, err = deploy(ctxt, spec2)
chaincodeID2 := spec2.ChaincodeID.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", chaincodeID2, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return
}
time.Sleep(time.Second)
// Invoke second chaincode, which will inturn invoke the first chaincode but pass bad params
f = chaincodeID1
args = util.ToChaincodeArgs(f, "query", "c")
spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}}
// Invoke chaincode
_, _, _, err = invoke(ctxt, spec2)
if err == nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", chaincodeID2, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return
}
if strings.Index(err.Error(), "Nil amount for c") < 0 {
t.Fail()
t.Logf("Unexpected error %s", err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
return
}
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
}
// Test the execution of a chaincode query that queries another chaincode with security enabled
// NOTE: this really needs to be a behave test. Remove when we have support in behave for multiple chaincodes
func TestChaincodeQueryChaincodeWithSec(t *testing.T) {
//query no longer supported
t.Skip()
viper.Set("security.enabled", "true")
//Initialize crypto
if err := crypto.Init(); err != nil {
panic(fmt.Errorf("Failed initializing the crypto layer [%s]", err))
}
//set paths for memberservice to pick up
viper.Set("peer.fileSystemPath", filepath.Join(os.TempDir(), "hyperledger", "production"))
viper.Set("server.rootpath", filepath.Join(os.TempDir(), "ca"))
var err error
var memSrvcLis net.Listener
if memSrvcLis, err = initMemSrvc(); err != nil {
t.Fail()
t.Logf("Error registering user %s", err)
return
}
defer finitMemSrvc(memSrvcLis)
time.Sleep(2 * time.Second)
var peerLis net.Listener
if peerLis, err = initPeer(); err != nil {
t.Fail()
t.Logf("Error registering user %s", err)
return
}
defer finitPeer(peerLis)
if err = crypto.RegisterClient("jim", nil, "jim", "6avZQLwcUe9b"); err != nil {
t.Fail()
t.Logf("Error registering user %s", err)
return
}
//login as jim and test chaincode-chaincode interaction with security
if err = chaincodeQueryChaincode("jim"); err != nil {
t.Fail()
t.Logf("Error executing test %s", err)
return
}
}
// Test the invocation of a transaction.
func TestRangeQuery(t *testing.T) {
//TODO enable after ledger enables RangeQuery
t.Skip()
lis, err := initPeer()
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis)
var ctxt = context.Background()
url := "github.com/hyperledger/fabric/examples/chaincode/go/map"
cID := &pb.ChaincodeID{Path: url}
f := "init"
args := util.ToChaincodeArgs(f)
spec := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Args: args}}
_, err = deploy(ctxt, spec)
chaincodeID := spec.ChaincodeID.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", chaincodeID, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
// Invoke second chaincode, which will inturn invoke the first chaincode
f = "keys"
args = util.ToChaincodeArgs(f)
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Args: args}}
_, _, _, err = invoke(ctxt, spec)
if err != nil {
t.Fail()
t.Logf("Error invoking <%s>: %s", chaincodeID, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
}
func TestGetEvent(t *testing.T) {
lis, err := initPeer()
if err != nil {
t.Fail()
t.Logf("Error creating peer: %s", err)
}
defer finitPeer(lis)
var ctxt = context.Background()
url := "github.com/hyperledger/fabric/examples/chaincode/go/eventsender"
cID := &pb.ChaincodeID{Path: url}
f := "init"
spec := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Args: util.ToChaincodeArgs(f)}}
_, err = deploy(ctxt, spec)
chaincodeID := spec.ChaincodeID.Name
if err != nil {
t.Fail()
t.Logf("Error initializing chaincode %s(%s)", chaincodeID, err)
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
return
}
time.Sleep(time.Second)
args := util.ToChaincodeArgs("", "i", "am", "satoshi")
spec = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Args: args}}
var ccevt *pb.ChaincodeEvent
ccevt, _, _, err = invoke(ctxt, spec)
if err != nil {
t.Logf("Error invoking chaincode %s(%s)", chaincodeID, err)
t.Fail()
}
if ccevt == nil {
t.Logf("Error ccevt is nil %s(%s)", chaincodeID, err)
t.Fail()
}
if ccevt.ChaincodeID != chaincodeID {
t.Logf("Error ccevt id(%s) != cid(%s)", ccevt.ChaincodeID, chaincodeID)
t.Fail()
}
if strings.Index(string(ccevt.Payload), "i,am,satoshi") < 0 {
t.Logf("Error expected event not found (%s)", string(ccevt.Payload))
t.Fail()
}
GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
}
func TestMain(m *testing.M) {
SetupTestConfig()
os.Exit(m.Run())
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
src/probe2vec/theano_minibatcher.py | import sys
import re
import gc
import time
from .token_map import UNK
from .unigram_dictionary import UnigramDictionary
import numpy as np
import os
# Only import theano and lasagne if environment permits it
exclude_theano_set = 'EXCLUDE_THEANO' in os.environ
if exclude_theano_set and int(os.environ['EXCLUDE_THEANO']) == 1:
# Don't import theano and lasagne
pass
else:
# Do import theano and lasagne
from theano import shared, function, tensor as T
class TheanoMinibatcher(object):
'''
This generates a theano shared variable storing the full dataset
-- all training examples. When the theano device setting is the
GPU, shared variables are stored on the GPU, so this has the
effect of loading the full dataset onto the GPU.
One of the return values is a (set of) symbolic theano variable(s)
corresponding to a single minibatch of the data. This symbolic
variable can be used to set up the training function. What will
happen during training is that this variable acts as a sliding
"window" on the full dataset, selecting each minibatch in turn,
even though the entire dataset is loaded into GPU memory.
The indexing that causes the symbolic minibatch to address different
parts of the dataset is itself a shared variable, and it can be
updated using an update tuple provided to the updates list of a
theanod function. The necessary update tuple is also provided as
a return value, so that it can be incorporated into the training
function
'''
def __init__(self, batch_size=1000, dtype="float32", num_dims=2):
self.batch_size = batch_size
self.dtype = dtype
self.num_dims = num_dims
self._setup_batching()
def _initialize_data_container(self, num_dims, dtype):
# Validate num_dims
if(num_dims < 1 or not isinstance(num_dims, int)):
raise ValueError(
'TheanoMinibatcher: num_dims must be an integer equal to or '
'greater than 1.'
)
# Create the first dim, which houses the dataset
data_container = []
num_dims -= 1
# Repeatedly add a nested dimension, so that we have num_dims nestings
nested_container_handle = data_container
for dim_num in range(num_dims):
new_inner_container = []
nested_container_handle.append(new_inner_container)
nested_container_handle = new_inner_container
return np.array(data_container, dtype=dtype)
def reset(self):
'''
Reset the internal batch_num pointer to the start of the dataset
'''
self.batch_num.set_value(np.int32(0))
def _setup_batching(self):
# Make an empty shared variable that will store the dataset
# Although empty, we can setup the relationship between the
# minibatch variable and the full dataset
self.dataset = shared(
self._initialize_data_container(self.num_dims, self.dtype)
)
# Make minibatch by indexing into the dataset
self.batch_num = shared(np.int32(0))
batch_start = self.batch_num * self.batch_size
batch_end = batch_start + self.batch_size
self.batch = self.dataset[batch_start : batch_end,]
# Define an update that moves the batch window through the dataset
self.updates = [(self.batch_num, self.batch_num+1)]
def load_dataset(self, dataset):
# Load the dataset onto the gpu
self.dataset.set_value(dataset)
# Determine the total number of minibatches
self.num_batches = int(np.ceil(len(dataset) / float(self.batch_size)))
return self.num_batches
def get_batch(self):
return self.batch
def get_updates(self):
return self.updates
def get_num_batches(self):
return self.num_batches
class NoiseContrastiveTheanoMinibatcher(TheanoMinibatcher):
def __init__(
self,
batch_size=1000,
noise_ratio=15,
dtype="float32",
num_dims=2
):
self.batch_size = batch_size
self.noise_ratio = noise_ratio
self.dtype = dtype
self.num_dims = num_dims
self._setup_batching()
def _setup_batching(self):
# Make an empty shared variable that will store the dataset
# Although empty, we can setup the relationship between the
# minibatch variable and the full dataset
self.signal_examples = shared(
self._initialize_data_container(self.num_dims, self.dtype)
)
self.noise_examples = shared(
self._initialize_data_container(self.num_dims, self.dtype)
)
# Make minibatch by indexing into the dataset
self.batch_num = shared(np.int32(0))
# Take a sliding minibatch window on the signal_examples
signal_batch_start = self.batch_num * self.batch_size
signal_batch_end = signal_batch_start + self.batch_size
signal_batch = self.signal_examples[signal_batch_start : signal_batch_end,]
# Take a sliding minibatch window on the noise_examples
noise_batch_start = self.batch_num * self.batch_size * self.noise_ratio
noise_batch_end = noise_batch_start + self.batch_size * self.noise_ratio
noise_batch = self.noise_examples[noise_batch_start : noise_batch_end,]
# Concatenate the signal and noise minibatch into the full minibatch
self.batch = T.concatenate((signal_batch, noise_batch))
# Define an update that moves the batch window through the dataset
self.updates = [(self.batch_num, self.batch_num+1)]
def load_dataset(self, signal_examples, noise_examples):
'''
Load the dataset onto the GPU. Determine (and return) the number of
minibatches.
'''
# Reset the internal pointer
self.reset()
# Determine the total number of minibatches
self.num_batches = int(np.ceil(len(signal_examples) / float(self.batch_size)))
# Check if the dataset divides evenly into batches
warn_last_batch = False
expected_len_signal = self.num_batches * self.batch_size
if expected_len_signal > len(signal_examples):
warn_last_batch = True
expected_len_noise = self.num_batches * self.batch_size * self.noise_ratio
if expected_len_noise > len(noise_examples):
warn_last_batch = True
# If dataset doesn't divide evenly into batches, warn the user, and
# drop the last batch
if warn_last_batch:
print('Warning: incomplete last batch will be ommitted')
# We ommit the last batch simply by reporting fewer total batches.
# It is actually up to the caller to only use self.batch_num
# minibatches.
self.num_batches -= 1
# Load the dataset onto the gpu
self.signal_examples.set_value(signal_examples)
self.noise_examples.set_value(noise_examples)
return self.num_batches
| []
| []
| [
"EXCLUDE_THEANO"
]
| [] | ["EXCLUDE_THEANO"] | python | 1 | 0 | |
map-app/src/main/java/org/gameontext/map/LogView.java | /*******************************************************************************
* Copyright (c) 2016 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package org.gameontext.map;
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Base64;
import java.util.StringTokenizer;
import java.util.stream.Stream;
import javax.naming.InitialContext;
import javax.naming.NamingException;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
/**
* Servlet implementation class LogView
*/
@WebServlet("/LogView")
public class LogView extends HttpServlet {
private static final long serialVersionUID = 1L;
/**
* @see HttpServlet#HttpServlet()
*/
public LogView() {
super();
// TODO Auto-generated constructor stub
}
public void listFilesInDir(PrintWriter out, String dir, String prefix) {
File f = new File(dir);
if (f.list() != null) {
long count = 0;
for (String c : f.list()) {
File cf = new File(f, c);
if (cf.isDirectory()) {
out.println(" - " + prefix + count + " - " + c + " (dir)<br>");
} else {
out.println(" - " + prefix + count + " - <a href=\"?cmd=view&choice=" + prefix + count + "\">" + c
+ "</a><br>");
}
count++;
}
if(f.list().length==0){
out.println(" - Is empty<br>");
}
} else {
out.println(" - Is empty, or not a directory." + "<br>");
}
}
public void viewFile(PrintWriter out, String dir, String countString) {
File f = new File(dir);
if (f.list() != null) {
long count = 0;
for (String c : f.list()) {
if (countString.equals("" + count)) {
System.out.println(
"LOGVIEW: Asked to view " + dir + " " + countString + " " + Paths.get(dir, c).toString());
try (Stream<String> stream = Files.lines(Paths.get(dir, c))) {
stream.forEach(out::println);
} catch (IOException io) {
out.println("ERROR READING FILE " + c + " " + io.getMessage());
}
}
count++;
}
} else {
out.println("Directory does not exist to view file from.");
}
}
/**
* @see HttpServlet#doGet(HttpServletRequest request, HttpServletResponse
* response)
*/
@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
String authHeader = request.getHeader("Authorization");
if (authHeader != null) {
StringTokenizer st = new StringTokenizer(authHeader);
if (st.hasMoreTokens()) {
String basic = st.nextToken();
if (basic.equalsIgnoreCase("Basic")) {
try {
String credentials = new String(Base64.getDecoder().decode(st.nextToken()));
int p = credentials.indexOf(":");
if (p != -1) {
String login = credentials.substring(0, p).trim();
String password = credentials.substring(p + 1).trim();
String expectedPassword;
try {
expectedPassword = (String) new InitialContext().lookup("registrationSecret");
} catch (NamingException e) {
response.sendError(HttpServletResponse.SC_FORBIDDEN,
"unable to obtain pw to auth against");
return;
}
if ("admin".equals(login) && expectedPassword.equals(password)) {
String cmd = request.getParameter("cmd");
PrintWriter out = response.getWriter();
if ("list".equals(cmd)) {
String serverName;
try {
serverName = (String) new InitialContext().lookup("serverName");
} catch (NamingException e) {
serverName = "Naming Exception "+e;
}
String serverOutputDir;
try {
serverOutputDir = (String) new InitialContext().lookup("serverOutputDir");
} catch (NamingException e) {
serverOutputDir = "Naming Exception "+e;
}
out.println("${wlp.server.name} "+serverName+"<br>");
out.println("${server.output.dir} "+serverOutputDir+"<br>");
response.addHeader("Content-Type", MediaType.TEXT_HTML);
String outdir = System.getenv("WLP_OUTPUT_DIR");
out.println("WLP_OUTPUT_DIR: " + String.valueOf(outdir) + "<br>");
if (outdir != null) {
listFilesInDir(out, outdir, "o");
}
String logdir = System.getenv("X_LOG_DIR");
if (logdir != null) {
out.println("X_LOG_DIR: " + String.valueOf(logdir) + "<br>");
listFilesInDir(out, logdir, "l");
String ffdcDir = new File(new File(logdir), "ffdc").getAbsolutePath();
out.println("FFDC_DIR: " + String.valueOf(ffdcDir) + "<br>");
listFilesInDir(out, ffdcDir, "f");
} else {
// going to try default location..
out.println("LOG_DIR set as WLP_OUTPUT_DIR/defaultServer/logs" + "<br>");
logdir = Paths.get(outdir, "defaultServer", "logs").toString();
listFilesInDir(out, logdir, "l");
String ffdcDir = new File(new File(logdir), "ffdc").getAbsolutePath();
out.println("FFDC_DIR: " + String.valueOf(ffdcDir) + "<br>");
listFilesInDir(out, ffdcDir, "f");
}
// going to try default location..
String hardcoded = "/logs";
out.println("Looking in hardcoded location "+hardcoded+"<br>");
String otherlogdir = Paths.get(hardcoded).toString();
listFilesInDir(out, otherlogdir, "x");
String hardffdcDir = new File(new File(otherlogdir), "ffdc").getAbsolutePath();
out.println("hardcoded ffdc: " + String.valueOf(hardffdcDir) + "<br>");
listFilesInDir(out, hardffdcDir, "y");
} else if ("view".equals(cmd)) {
response.addHeader("Content-Type", MediaType.TEXT_PLAIN);
String choice = request.getParameter("choice");
if (choice != null) {
if (choice.startsWith("o")) {
String outdir = System.getenv("WLP_OUTPUT_DIR");
viewFile(out, outdir, choice.substring(1).trim());
} else if (choice.startsWith("l")) {
String logdir = System.getenv("X_LOG_DIR");
if (logdir == null) {
String outdir = System.getenv("WLP_OUTPUT_DIR");
logdir = Paths.get(outdir, "defaultServer", "logs").toString();
}
viewFile(out, logdir, choice.substring(1).trim());
} else if (choice.startsWith("f")) {
String logdir = System.getenv("X_LOG_DIR");
if (logdir == null) {
String outdir = System.getenv("WLP_OUTPUT_DIR");
logdir = Paths.get(outdir, "defaultServer", "logs").toString();
}
String ffdcDir = new File(new File(logdir), "ffdc").getAbsolutePath();
viewFile(out, ffdcDir, choice.substring(1).trim());
} else if (choice.startsWith("x")) {
String hardcoded = "/logs";
viewFile(out, hardcoded, choice.substring(1).trim());
} else if (choice.startsWith("y")) {
String hardcoded = "/logs/ffdc";
viewFile(out, hardcoded, choice.substring(1).trim());
}
} else {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
"view cmd requires choice param");
}
} else {
response.addHeader("Content-Type", MediaType.TEXT_HTML);
out.println("<center><h1>Welcome to LogView.</h1></center>"
+ "<center>Your friendly logging choice.</center><hr><p><p><center>This logging console is shoeware, you may use it, but you must buy Ozzy shoes.</center><p><p>");
out.println("<center><a href=\"?cmd=list\">Take me to the logs!!... </a></center>");
}
}
} else {
response.sendError(HttpServletResponse.SC_FORBIDDEN,
"badly formed auth header.");
return;
}
} catch (UnsupportedEncodingException e) {
response.sendError(HttpServletResponse.SC_FORBIDDEN,
"Error decoding auth");
return;
}
}
}
} else {
response.addHeader("WWW-Authenticate", "Basic realm=\"Ozzy LogView\"");
response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Access denied");
return;
}
}
/**
* @see HttpServlet#doPost(HttpServletRequest request, HttpServletResponse
* response)
*/
@Override
protected void doPost(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
// TODO Auto-generated method stub
doGet(request, response);
}
}
| [
"\"WLP_OUTPUT_DIR\"",
"\"X_LOG_DIR\"",
"\"WLP_OUTPUT_DIR\"",
"\"X_LOG_DIR\"",
"\"WLP_OUTPUT_DIR\"",
"\"X_LOG_DIR\"",
"\"WLP_OUTPUT_DIR\""
]
| []
| [
"X_LOG_DIR",
"WLP_OUTPUT_DIR"
]
| [] | ["X_LOG_DIR", "WLP_OUTPUT_DIR"] | java | 2 | 0 | |
setup.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import os
import shutil
from os import path
from setuptools import find_packages, setup
from typing import List
import torch
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
from torch.utils.hipify import hipify_python
torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
assert torch_ver >= [1, 6], "Requires PyTorch >= 1.6"
def get_version():
init_py_path = path.join(path.abspath(path.dirname(__file__)), "detectron2", "__init__.py")
init_py = open(init_py_path, "r").readlines()
version_line = [l.strip() for l in init_py if l.startswith("__version__")][0]
version = version_line.split("=")[-1].strip().strip("'\"")
# The following is used to build release packages.
# Users should never use it.
suffix = os.getenv("D2_VERSION_SUFFIX", "")
version = version + suffix
if os.getenv("BUILD_NIGHTLY", "0") == "1":
from datetime import datetime
date_str = datetime.today().strftime("%y%m%d")
version = version + ".dev" + date_str
new_init_py = [l for l in init_py if not l.startswith("__version__")]
new_init_py.append('__version__ = "{}"\n'.format(version))
with open(init_py_path, "w") as f:
f.write("".join(new_init_py))
return version
def get_extensions():
this_dir = path.dirname(path.abspath(__file__))
extensions_dir = path.join(this_dir, "detectron2", "layers", "csrc")
main_source = path.join(extensions_dir, "vision.cpp")
sources = glob.glob(path.join(extensions_dir, "**", "*.cpp"))
from torch.utils.cpp_extension import ROCM_HOME
is_rocm_pytorch = (
True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False
)
hipify_ver = (
[int(x) for x in torch.utils.hipify.__version__.split(".")]
if hasattr(torch.utils.hipify, "__version__")
else [0, 0, 0]
)
if is_rocm_pytorch and hipify_ver < [1, 0, 0]:
# Earlier versions of hipification and extension modules were not
# transparent, i.e. would require an explicit call to hipify, and the
# hipification would introduce "hip" subdirectories, possibly changing
# the relationship between source and header files.
# This path is maintained for backwards compatibility.
hipify_python.hipify(
project_directory=this_dir,
output_directory=this_dir,
includes="/detectron2/layers/csrc/*",
show_detailed=True,
is_pytorch_extension=True,
)
source_cuda = glob.glob(path.join(extensions_dir, "**", "hip", "*.hip")) + glob.glob(
path.join(extensions_dir, "hip", "*.hip")
)
shutil.copy(
"detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h",
"detectron2/layers/csrc/box_iou_rotated/hip/box_iou_rotated_utils.h",
)
shutil.copy(
"detectron2/layers/csrc/deformable/deform_conv.h",
"detectron2/layers/csrc/deformable/hip/deform_conv.h",
)
sources = [main_source] + sources
sources = [
s
for s in sources
if not is_rocm_pytorch or torch_ver < [1, 7] or not s.endswith("hip/vision.cpp")
]
else:
# common code between cuda and rocm platforms,
# for hipify version [1,0,0] and later.
source_cuda = glob.glob(path.join(extensions_dir, "**", "*.cu")) + glob.glob(
path.join(extensions_dir, "*.cu")
)
sources = [main_source] + sources
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or os.getenv(
"FORCE_CUDA", "0"
) == "1":
extension = CUDAExtension
sources += source_cuda
if not is_rocm_pytorch:
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-O3",
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
else:
define_macros += [("WITH_HIP", None)]
extra_compile_args["nvcc"] = []
if torch_ver < [1, 7]:
# supported by https://github.com/pytorch/pytorch/pull/43931
CC = os.environ.get("CC", None)
if CC is not None:
extra_compile_args["nvcc"].append("-ccbin={}".format(CC))
include_dirs = [extensions_dir]
ext_modules = [
extension(
"detectron2._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
def get_model_zoo_configs() -> List[str]:
"""
Return a list of configs to include in package for model zoo. Copy over these configs inside
detectron2/model_zoo.
"""
# Use absolute paths while symlinking.
source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs")
destination = path.join(
path.dirname(path.realpath(__file__)), "detectron2", "model_zoo", "configs"
)
# Symlink the config directory inside package to have a cleaner pip install.
# Remove stale symlink/directory from a previous build.
if path.exists(source_configs_dir):
if path.islink(destination):
os.unlink(destination)
elif path.isdir(destination):
shutil.rmtree(destination)
if not path.exists(destination):
try:
os.symlink(source_configs_dir, destination)
except OSError:
# Fall back to copying if symlink fails: ex. on Windows.
shutil.copytree(source_configs_dir, destination)
config_paths = glob.glob("configs/**/*.yaml", recursive=True)
return config_paths
# For projects that are relative small and provide features that are very close
# to detectron2's core functionalities, we install them under detectron2.projects
PROJECTS = {
"detectron2.projects.point_rend": "projects/PointRend/point_rend",
"detectron2.projects.deeplab": "projects/DeepLab/deeplab",
"detectron2.projects.panoptic_deeplab": "projects/Panoptic-DeepLab/panoptic_deeplab",
}
setup(
name="detectron2",
version=get_version(),
author="FAIR",
url="https://github.com/facebookresearch/detectron2",
description="Detectron2 is FAIR's next-generation research "
"platform for object detection and segmentation.",
packages=find_packages(exclude=("configs", "tests*")) + list(PROJECTS.keys()),
package_dir=PROJECTS,
package_data={"detectron2.model_zoo": get_model_zoo_configs()},
python_requires=">=3.6",
install_requires=[
# Do not add opencv here. Just like pytorch, user should install
# opencv themselves, preferrably by OS's package manager, or by
# choosing the proper pypi package name at https://github.com/skvark/opencv-python
"termcolor>=1.1",
"Pillow>=7.1", # or use pillow-simd for better performance
"yacs>=0.1.6",
"tabulate",
"cloudpickle",
"matplotlib",
"tqdm>4.29.0",
"tensorboard",
# Lock version of fvcore/iopath because they may have breaking changes
# NOTE: when updating fvcore/iopath version, make sure fvcore depends
# on the same version of iopath.
"fvcore>=0.1.5,<0.1.6", # required like this to make it pip installable
"iopath>=0.1.7,<0.1.8",
"pycocotools>=2.0.2", # corresponds to https://github.com/ppwwyyxx/cocoapi
"future", # used by caffe2
"pydot", # used to save caffe2 SVGs
"dataclasses; python_version<'3.7'",
"omegaconf==2.1.0.dev22",
],
extras_require={
"all": [
"shapely",
"psutil",
"hydra-core",
"panopticapi @ https://github.com/cocodataset/panopticapi/archive/master.zip",
],
"dev": [
"flake8==3.8.1",
"isort==4.3.21",
"black==20.8b1",
"flake8-bugbear",
"flake8-comprehensions",
],
},
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| []
| []
| [
"FORCE_CUDA",
"D2_VERSION_SUFFIX",
"CC",
"BUILD_NIGHTLY"
]
| [] | ["FORCE_CUDA", "D2_VERSION_SUFFIX", "CC", "BUILD_NIGHTLY"] | python | 4 | 0 | |
configor_test.go | package configor
import (
"bytes"
"encoding/json"
"io/ioutil"
"os"
"reflect"
"testing"
"github.com/BurntSushi/toml"
"gopkg.in/yaml.v2"
)
type Anonymous struct {
Description string
}
type testConfig struct {
APPName string `default:"configor" json:",omitempty"`
Hosts []string
DB struct {
Name string
User string `default:"root"`
Password string `required:"true" env:"DBPassword"`
Port uint `default:"3306" json:",omitempty"`
SSL bool `default:"true" json:",omitempty"`
}
Contacts []struct {
Name string
Email string `required:"true"`
}
Anonymous `anonymous:"true"`
private string
}
func generateDefaultConfig() testConfig {
return testConfig{
APPName: "configor",
Hosts: []string{"http://example.org", "http://jinzhu.me"},
DB: struct {
Name string
User string `default:"root"`
Password string `required:"true" env:"DBPassword"`
Port uint `default:"3306" json:",omitempty"`
SSL bool `default:"true" json:",omitempty"`
}{
Name: "configor",
User: "configor",
Password: "configor",
Port: 3306,
SSL: true,
},
Contacts: []struct {
Name string
Email string `required:"true"`
}{
{
Name: "Jinzhu",
Email: "[email protected]",
},
},
Anonymous: Anonymous{
Description: "This is an anonymous embedded struct whose environment variables should NOT include 'ANONYMOUS'",
},
}
}
func TestLoadNormaltestConfig(t *testing.T) {
config := generateDefaultConfig()
if bytes, err := json.Marshal(config); err == nil {
if file, err := ioutil.TempFile("/tmp", "configor"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
file.Write(bytes)
var result testConfig
Load(&result, file.Name())
if !reflect.DeepEqual(result, config) {
t.Errorf("result should equal to original configuration")
}
}
} else {
t.Errorf("failed to marshal config")
}
}
func TestLoadtestConfigFromTomlWithExtension(t *testing.T) {
var (
config = generateDefaultConfig()
buffer bytes.Buffer
)
if err := toml.NewEncoder(&buffer).Encode(config); err == nil {
if file, err := ioutil.TempFile("/tmp", "configor.toml"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
file.Write(buffer.Bytes())
var result testConfig
Load(&result, file.Name())
if !reflect.DeepEqual(result, config) {
t.Errorf("result should equal to original configuration")
}
}
} else {
t.Errorf("failed to marshal config")
}
}
func TestLoadtestConfigFromTomlWithoutExtension(t *testing.T) {
var (
config = generateDefaultConfig()
buffer bytes.Buffer
)
if err := toml.NewEncoder(&buffer).Encode(config); err == nil {
if file, err := ioutil.TempFile("/tmp", "configor"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
file.Write(buffer.Bytes())
var result testConfig
Load(&result, file.Name())
if !reflect.DeepEqual(result, config) {
t.Errorf("result should equal to original configuration")
}
}
} else {
t.Errorf("failed to marshal config")
}
}
func TestDefaultValue(t *testing.T) {
config := generateDefaultConfig()
config.APPName = ""
config.DB.Port = 0
config.DB.SSL = false
if bytes, err := json.Marshal(config); err == nil {
if file, err := ioutil.TempFile("/tmp", "configor"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
file.Write(bytes)
var result testConfig
Load(&result, file.Name())
if !reflect.DeepEqual(result, generateDefaultConfig()) {
t.Errorf("result should be set default value correctly")
}
}
} else {
t.Errorf("failed to marshal config")
}
}
func TestMissingRequiredValue(t *testing.T) {
config := generateDefaultConfig()
config.DB.Password = ""
if bytes, err := json.Marshal(config); err == nil {
if file, err := ioutil.TempFile("/tmp", "configor"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
file.Write(bytes)
var result testConfig
if err := Load(&result, file.Name()); err == nil {
t.Errorf("Should got error when load configuration missing db password")
}
}
} else {
t.Errorf("failed to marshal config")
}
}
func TestUnmatchedKeyInTomltestConfigFile(t *testing.T) {
type configStruct struct {
Name string
}
type configFile struct {
Name string
Test string
}
config := configFile{Name: "test", Test: "ATest"}
file, err := ioutil.TempFile("/tmp", "configor")
if err != nil {
t.Fatal("Could not create temp file")
}
defer os.Remove(file.Name())
defer file.Close()
filename := file.Name()
if err := toml.NewEncoder(file).Encode(config); err == nil {
var result configStruct
// Do not return error when there are unmatched keys but ErrorOnUnmatchedKeys is false
if err := New(&Config{}).Load(&result, filename); err != nil {
t.Errorf("Should NOT get error when loading configuration with extra keys")
}
// Return an error when there are unmatched keys and ErrorOnUnmatchedKeys is true
err := New(&Config{ErrorOnUnmatchedKeys: true}).Load(&result, filename)
if err == nil {
t.Errorf("Should get error when loading configuration with extra keys")
}
// The error should be of type UnmatchedTomlKeysError
tomlErr, ok := err.(*UnmatchedTomlKeysError)
if !ok {
t.Errorf("Should get UnmatchedTomlKeysError error when loading configuration with extra keys")
}
// The error.Keys() function should return the "Test" key
keys := GetStringTomlKeys(tomlErr.Keys)
if len(keys) != 1 || keys[0] != "Test" {
t.Errorf("The UnmatchedTomlKeysError should contain the Test key")
}
} else {
t.Errorf("failed to marshal config")
}
// Add .toml to the file name and test again
err = os.Rename(filename, filename+".toml")
if err != nil {
t.Errorf("Could not add suffix to file")
}
filename = filename + ".toml"
defer os.Remove(filename)
var result configStruct
// Do not return error when there are unmatched keys but ErrorOnUnmatchedKeys is false
if err := New(&Config{}).Load(&result, filename); err != nil {
t.Errorf("Should NOT get error when loading configuration with extra keys. Error: %v", err)
}
// Return an error when there are unmatched keys and ErrorOnUnmatchedKeys is true
err = New(&Config{ErrorOnUnmatchedKeys: true}).Load(&result, filename)
if err == nil {
t.Errorf("Should get error when loading configuration with extra keys")
}
// The error should be of type UnmatchedTomlKeysError
tomlErr, ok := err.(*UnmatchedTomlKeysError)
if !ok {
t.Errorf("Should get UnmatchedTomlKeysError error when loading configuration with extra keys")
}
// The error.Keys() function should return the "Test" key
keys := GetStringTomlKeys(tomlErr.Keys)
if len(keys) != 1 || keys[0] != "Test" {
t.Errorf("The UnmatchedTomlKeysError should contain the Test key")
}
}
func TestUnmatchedKeyInYamltestConfigFile(t *testing.T) {
type configStruct struct {
Name string
}
type configFile struct {
Name string
Test string
}
config := configFile{Name: "test", Test: "ATest"}
file, err := ioutil.TempFile("/tmp", "configor")
if err != nil {
t.Fatal("Could not create temp file")
}
defer os.Remove(file.Name())
defer file.Close()
filename := file.Name()
if data, err := yaml.Marshal(config); err == nil {
file.WriteString(string(data))
var result configStruct
// Do not return error when there are unmatched keys but ErrorOnUnmatchedKeys is false
if err := New(&Config{}).Load(&result, filename); err != nil {
t.Errorf("Should NOT get error when loading configuration with extra keys. Error: %v", err)
}
// Return an error when there are unmatched keys and ErrorOnUnmatchedKeys is true
if err := New(&Config{ErrorOnUnmatchedKeys: true}).Load(&result, filename); err == nil {
t.Errorf("Should get error when loading configuration with extra keys")
// The error should be of type *yaml.TypeError
} else if _, ok := err.(*yaml.TypeError); !ok {
// || !strings.Contains(err.Error(), "not found in struct") {
t.Errorf("Error should be of type yaml.TypeError. Instead error is %v", err)
}
} else {
t.Errorf("failed to marshal config")
}
// Add .yaml to the file name and test again
err = os.Rename(filename, filename+".yaml")
if err != nil {
t.Errorf("Could not add suffix to file")
}
filename = filename + ".yaml"
defer os.Remove(filename)
var result configStruct
// Do not return error when there are unmatched keys but ErrorOnUnmatchedKeys is false
if err := New(&Config{}).Load(&result, filename); err != nil {
t.Errorf("Should NOT get error when loading configuration with extra keys. Error: %v", err)
}
// Return an error when there are unmatched keys and ErrorOnUnmatchedKeys is true
if err := New(&Config{ErrorOnUnmatchedKeys: true}).Load(&result, filename); err == nil {
t.Errorf("Should get error when loading configuration with extra keys")
// The error should be of type *yaml.TypeError
} else if _, ok := err.(*yaml.TypeError); !ok {
// || !strings.Contains(err.Error(), "not found in struct") {
t.Errorf("Error should be of type yaml.TypeError. Instead error is %v", err)
}
}
func TestLoadtestConfigurationByEnvironment(t *testing.T) {
config := generateDefaultConfig()
config2 := struct {
APPName string
}{
APPName: "config2",
}
if file, err := ioutil.TempFile("/tmp", "configor"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
configBytes, _ := yaml.Marshal(config)
config2Bytes, _ := yaml.Marshal(config2)
ioutil.WriteFile(file.Name()+".yaml", configBytes, 0644)
defer os.Remove(file.Name() + ".yaml")
ioutil.WriteFile(file.Name()+".production.yaml", config2Bytes, 0644)
defer os.Remove(file.Name() + ".production.yaml")
var result testConfig
os.Setenv("CONFIGOR_ENV", "production")
defer os.Setenv("CONFIGOR_ENV", "")
if err := Load(&result, file.Name()+".yaml"); err != nil {
t.Errorf("No error should happen when load configurations, but got %v", err)
}
var defaultConfig = generateDefaultConfig()
defaultConfig.APPName = "config2"
if !reflect.DeepEqual(result, defaultConfig) {
t.Errorf("result should be load configurations by environment correctly")
}
}
}
func TestLoadtestConfigurationByEnvironmentSetBytestConfig(t *testing.T) {
config := generateDefaultConfig()
config2 := struct {
APPName string
}{
APPName: "production_config2",
}
if file, err := ioutil.TempFile("/tmp", "configor"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
configBytes, _ := yaml.Marshal(config)
config2Bytes, _ := yaml.Marshal(config2)
ioutil.WriteFile(file.Name()+".yaml", configBytes, 0644)
defer os.Remove(file.Name() + ".yaml")
ioutil.WriteFile(file.Name()+".production.yaml", config2Bytes, 0644)
defer os.Remove(file.Name() + ".production.yaml")
var result testConfig
var Configor = New(&Config{Environment: "production"})
if Configor.Load(&result, file.Name()+".yaml"); err != nil {
t.Errorf("No error should happen when load configurations, but got %v", err)
}
var defaultConfig = generateDefaultConfig()
defaultConfig.APPName = "production_config2"
if !reflect.DeepEqual(result, defaultConfig) {
t.Errorf("result should be load configurations by environment correctly")
}
if Configor.GetEnvironment() != "production" {
t.Errorf("configor's environment should be production")
}
}
}
func TestOverwritetestConfigurationWithEnvironmentWithDefaultPrefix(t *testing.T) {
config := generateDefaultConfig()
if bytes, err := json.Marshal(config); err == nil {
if file, err := ioutil.TempFile("/tmp", "configor"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
file.Write(bytes)
var result testConfig
os.Setenv("CONFIGOR_APPNAME", "config2")
os.Setenv("CONFIGOR_HOSTS", "- http://example.org\n- http://jinzhu.me")
os.Setenv("CONFIGOR_DB_NAME", "db_name")
defer os.Setenv("CONFIGOR_APPNAME", "")
defer os.Setenv("CONFIGOR_HOSTS", "")
defer os.Setenv("CONFIGOR_DB_NAME", "")
Load(&result, file.Name())
var defaultConfig = generateDefaultConfig()
defaultConfig.APPName = "config2"
defaultConfig.Hosts = []string{"http://example.org", "http://jinzhu.me"}
defaultConfig.DB.Name = "db_name"
if !reflect.DeepEqual(result, defaultConfig) {
t.Errorf("result should equal to original configuration")
}
}
}
}
func TestOverwritetestConfigurationWithEnvironment(t *testing.T) {
config := generateDefaultConfig()
if bytes, err := json.Marshal(config); err == nil {
if file, err := ioutil.TempFile("/tmp", "configor"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
file.Write(bytes)
var result testConfig
os.Setenv("CONFIGOR_ENV_PREFIX", "app")
os.Setenv("APP_APPNAME", "config2")
os.Setenv("APP_DB_NAME", "db_name")
defer os.Setenv("CONFIGOR_ENV_PREFIX", "")
defer os.Setenv("APP_APPNAME", "")
defer os.Setenv("APP_DB_NAME", "")
Load(&result, file.Name())
var defaultConfig = generateDefaultConfig()
defaultConfig.APPName = "config2"
defaultConfig.DB.Name = "db_name"
if !reflect.DeepEqual(result, defaultConfig) {
t.Errorf("result should equal to original configuration")
}
}
}
}
func TestOverwritetestConfigurationWithEnvironmentThatSetBytestConfig(t *testing.T) {
config := generateDefaultConfig()
if bytes, err := json.Marshal(config); err == nil {
if file, err := ioutil.TempFile("/tmp", "configor"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
file.Write(bytes)
os.Setenv("APP1_APPName", "config2")
os.Setenv("APP1_DB_Name", "db_name")
defer os.Setenv("APP1_APPName", "")
defer os.Setenv("APP1_DB_Name", "")
var result testConfig
var Configor = New(&Config{ENVPrefix: "APP1"})
Configor.Load(&result, file.Name())
var defaultConfig = generateDefaultConfig()
defaultConfig.APPName = "config2"
defaultConfig.DB.Name = "db_name"
if !reflect.DeepEqual(result, defaultConfig) {
t.Errorf("result should equal to original configuration")
}
}
}
}
func TestResetPrefixToBlank(t *testing.T) {
config := generateDefaultConfig()
if bytes, err := json.Marshal(config); err == nil {
if file, err := ioutil.TempFile("/tmp", "configor"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
file.Write(bytes)
var result testConfig
os.Setenv("CONFIGOR_ENV_PREFIX", "-")
os.Setenv("APPNAME", "config2")
os.Setenv("DB_NAME", "db_name")
defer os.Setenv("CONFIGOR_ENV_PREFIX", "")
defer os.Setenv("APPNAME", "")
defer os.Setenv("DB_NAME", "")
Load(&result, file.Name())
var defaultConfig = generateDefaultConfig()
defaultConfig.APPName = "config2"
defaultConfig.DB.Name = "db_name"
if !reflect.DeepEqual(result, defaultConfig) {
t.Errorf("result should equal to original configuration")
}
}
}
}
func TestResetPrefixToBlank2(t *testing.T) {
config := generateDefaultConfig()
if bytes, err := json.Marshal(config); err == nil {
if file, err := ioutil.TempFile("/tmp", "configor"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
file.Write(bytes)
var result testConfig
os.Setenv("CONFIGOR_ENV_PREFIX", "-")
os.Setenv("APPName", "config2")
os.Setenv("DB_Name", "db_name")
defer os.Setenv("CONFIGOR_ENV_PREFIX", "")
defer os.Setenv("APPName", "")
defer os.Setenv("DB_Name", "")
Load(&result, file.Name())
var defaultConfig = generateDefaultConfig()
defaultConfig.APPName = "config2"
defaultConfig.DB.Name = "db_name"
if !reflect.DeepEqual(result, defaultConfig) {
t.Errorf("result should equal to original configuration")
}
}
}
}
func TestReadFromEnvironmentWithSpecifiedEnvName(t *testing.T) {
config := generateDefaultConfig()
if bytes, err := json.Marshal(config); err == nil {
if file, err := ioutil.TempFile("/tmp", "configor"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
file.Write(bytes)
var result testConfig
os.Setenv("DBPassword", "db_password")
defer os.Setenv("DBPassword", "")
Load(&result, file.Name())
var defaultConfig = generateDefaultConfig()
defaultConfig.DB.Password = "db_password"
if !reflect.DeepEqual(result, defaultConfig) {
t.Errorf("result should equal to original configuration")
}
}
}
}
func TestAnonymousStruct(t *testing.T) {
config := generateDefaultConfig()
if bytes, err := json.Marshal(config); err == nil {
if file, err := ioutil.TempFile("/tmp", "configor"); err == nil {
defer file.Close()
defer os.Remove(file.Name())
file.Write(bytes)
var result testConfig
os.Setenv("CONFIGOR_DESCRIPTION", "environment description")
defer os.Setenv("CONFIGOR_DESCRIPTION", "")
Load(&result, file.Name())
var defaultConfig = generateDefaultConfig()
defaultConfig.Anonymous.Description = "environment description"
if !reflect.DeepEqual(result, defaultConfig) {
t.Errorf("result should equal to original configuration")
}
}
}
}
func TestENV(t *testing.T) {
if ENV() != "test" {
t.Errorf("Env should be test when running `go test`, instead env is %v", ENV())
}
os.Setenv("CONFIGOR_ENV", "production")
defer os.Setenv("CONFIGOR_ENV", "")
if ENV() != "production" {
t.Errorf("Env should be production when set it with CONFIGOR_ENV")
}
}
type slicetestConfig struct {
Test1 int
Test2 []struct {
Test2Ele1 int
Test2Ele2 int
}
}
func TestSliceFromEnv(t *testing.T) {
var tc = slicetestConfig{
Test1: 1,
Test2: []struct {
Test2Ele1 int
Test2Ele2 int
}{
{
Test2Ele1: 1,
Test2Ele2: 2,
},
{
Test2Ele1: 3,
Test2Ele2: 4,
},
},
}
var result slicetestConfig
os.Setenv("CONFIGOR_TEST1", "1")
os.Setenv("CONFIGOR_TEST2_0_TEST2ELE1", "1")
os.Setenv("CONFIGOR_TEST2_0_TEST2ELE2", "2")
os.Setenv("CONFIGOR_TEST2_1_TEST2ELE1", "3")
os.Setenv("CONFIGOR_TEST2_1_TEST2ELE2", "4")
err := Load(&result)
if err != nil {
t.Fatalf("load from env err:%v", err)
}
if !reflect.DeepEqual(result, tc) {
t.Fatalf("unexpected result:%+v", result)
}
}
func TestConfigFromEnv(t *testing.T) {
type config struct {
LineBreakString string `required:"true"`
Count int64
Slient bool
}
cfg := &config{}
os.Setenv("CONFIGOR_ENV_PREFIX", "CONFIGOR")
os.Setenv("CONFIGOR_LineBreakString", "Line one\nLine two\nLine three\nAnd more lines")
os.Setenv("CONFIGOR_Slient", "1")
os.Setenv("CONFIGOR_Count", "10")
Load(cfg)
if os.Getenv("CONFIGOR_LineBreakString") != cfg.LineBreakString {
t.Error("Failed to load value has line break from env")
}
if !cfg.Slient {
t.Error("Failed to load bool from env")
}
if cfg.Count != 10 {
t.Error("Failed to load number from env")
}
}
type Menu struct {
Key string `json:"key" yaml:"key"`
Name string `json:"name" yaml:"name"`
Icon string `json:"icon" yaml:"icon"`
Children []Menu `json:"children" yaml:"children"`
}
type MenuList struct {
Top []Menu `json:"top" yaml:"top"`
}
func TestLoadNestedConfig(t *testing.T) {
adminConfig := MenuList{}
New(&Config{Verbose: true}).Load(&adminConfig, "admin.yml")
}
| [
"\"CONFIGOR_LineBreakString\""
]
| []
| [
"CONFIGOR_LineBreakString"
]
| [] | ["CONFIGOR_LineBreakString"] | go | 1 | 0 | |
src/requests.py | import base64
import json
import time
import requests
from colr import color
import os
from src.logs import Logging
class Requests:
def __init__(self, version):
self.version = version
self.headers = {}
self.Logging = Logging()
self.log = self.Logging.log
self.region = self.get_region()
self.pd_url = f"https://pd.{self.region[0]}.a.pvp.net"
self.glz_url = f"https://glz-{self.region[1][0]}.{self.region[1][1]}.a.pvp.net"
self.log(f"Api urls: pd_url: '{self.pd_url}', glz_url: '{self.glz_url}'")
self.region = self.region[0]
self.lockfile = self.get_lockfile()
self.puuid = ''
#fetch puuid so its avaible outsite
self.get_headers()
def fetch(self, url_type: str, endpoint: str, method: str):
try:
if url_type == "glz":
response = requests.request(method, self.glz_url + endpoint, headers=self.get_headers(), verify=False)
self.log(f"fetch: url: '{url_type}', endpoint: {endpoint}, method: {method},"
f" response code: {response.status_code}")
if not response.ok:
time.sleep(5)
self.headers = {}
self.fetch(url_type, endpoint, method)
return response.json()
elif url_type == "pd":
response = requests.request(method, self.pd_url + endpoint, headers=self.get_headers(), verify=False)
self.log(
f"fetch: url: '{url_type}', endpoint: {endpoint}, method: {method},"
f" response code: {response.status_code}")
if not response.ok:
time.sleep(5)
self.headers = {}
self.fetch(url_type, endpoint, method)
return response
elif url_type == "local":
local_headers = {'Authorization': 'Basic ' + base64.b64encode(
('riot:' + self.lockfile['password']).encode()).decode()}
response = requests.request(method, f"https://127.0.0.1:{self.lockfile['port']}{endpoint}",
headers=local_headers,
verify=False)
self.log(
f"fetch: url: '{url_type}', endpoint: {endpoint}, method: {method},"
f" response code: {response.status_code}")
return response.json()
elif url_type == "custom":
response = requests.request(method, f"{endpoint}", headers=self.get_headers(), verify=False)
self.log(
f"fetch: url: '{url_type}', endpoint: {endpoint}, method: {method},"
f" response code: {response.status_code}")
if not response.ok: self.headers = {}
return response.json()
except json.decoder.JSONDecodeError:
self.log(f"JSONDecodeError in fetch function, resp.code: {response.status_code}, resp_text: '{response.text}")
print(response)
print(response.text)
def get_region(self):
path = os.path.join(os.getenv('LOCALAPPDATA'), R'VALORANT\Saved\Logs\ShooterGame.log')
with open(path, "r", encoding="utf8") as file:
while True:
line = file.readline()
if '.a.pvp.net/account-xp/v1/' in line:
pd_url = line.split('.a.pvp.net/account-xp/v1/')[0].split('.')[-1]
elif 'https://glz' in line:
glz_url = [(line.split('https://glz-')[1].split(".")[0]),
(line.split('https://glz-')[1].split(".")[1])]
if "pd_url" in locals().keys() and "glz_url" in locals().keys():
return [pd_url, glz_url]
def get_current_version(self):
path = os.path.join(os.getenv('LOCALAPPDATA'), R'VALORANT\Saved\Logs\ShooterGame.log')
with open(path, "r", encoding="utf8") as file:
while True:
line = file.readline()
if 'CI server version:' in line:
version_without_shipping = line.split('CI server version: ')[1].strip()
version = version_without_shipping.split("-")
version.insert(2, "shipping")
version = "-".join(version)
self.log(f"got version from logs '{version}'")
return version
def get_lockfile(self):
try:
with open(os.path.join(os.getenv('LOCALAPPDATA'), R'Riot Games\Riot Client\Config\lockfile')) as lockfile:
self.log("opened log file")
data = lockfile.read().split(':')
keys = ['name', 'PID', 'port', 'password', 'protocol']
return dict(zip(keys, data))
except FileNotFoundError:
self.log("lockfile not found")
raise Exception("Lockfile not found, you're not in a game!")
def get_headers(self):
if self.headers == {}:
local_headers = {'Authorization': 'Basic ' + base64.b64encode(
('riot:' + self.lockfile['password']).encode()).decode()}
response = requests.get(f"https://127.0.0.1:{self.lockfile['port']}/entitlements/v1/token",
headers=local_headers, verify=False)
entitlements = response.json()
self.puuid = entitlements['subject']
headers = {
'Authorization': f"Bearer {entitlements['accessToken']}",
'X-Riot-Entitlements-JWT': entitlements['token'],
'X-Riot-ClientPlatform': "ew0KCSJwbGF0Zm9ybVR5cGUiOiAiUEMiLA0KCSJwbGF0Zm9ybU9TIjog"
"IldpbmRvd3MiLA0KCSJwbGF0Zm9ybU9TVmVyc2lvbiI6ICIxMC4wLjE5"
"MDQyLjEuMjU2LjY0Yml0IiwNCgkicGxhdGZvcm1DaGlwc2V0IjogIlVua25vd24iDQp9",
'X-Riot-ClientVersion': self.get_current_version(),
"User-Agent": "ShooterGame/13 Windows/10.0.19043.1.256.64bit"
}
return headers
| []
| []
| [
"LOCALAPPDATA"
]
| [] | ["LOCALAPPDATA"] | python | 1 | 0 | |
core/grpcox.go | package core
import (
"context"
"os"
"strconv"
"time"
"github.com/fullstorydev/grpcurl"
"github.com/jhump/protoreflect/grpcreflect"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
reflectpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
)
// GrpCox - main object
type GrpCox struct {
KeepAlive float64
activeConn *ConnStore
maxLifeConn time.Duration
// TODO : utilize below args
headers []string
reflectHeaders []string
authority string
insecure bool
cacert string
cert string
key string
serverName string
isUnixSocket func() bool
}
// InitGrpCox constructor
func InitGrpCox() *GrpCox {
maxLife, tick := 10, 3
if val, err := strconv.Atoi(os.Getenv("MAX_LIFE_CONN")); err == nil {
maxLife = val
}
if val, err := strconv.Atoi(os.Getenv("TICK_CLOSE_CONN")); err == nil {
tick = val
}
c := NewConnectionStore()
g := &GrpCox{
activeConn: c,
}
if maxLife > 0 && tick > 0 {
g.maxLifeConn = time.Duration(maxLife) * time.Minute
c.StartGC(time.Duration(tick) * time.Second)
}
return g
}
// GetResource - open resource to targeted grpc server
func (g *GrpCox) GetResource(ctx context.Context, target string, plainText, isRestartConn bool) (*Resource, error) {
if conn, ok := g.activeConn.getConnection(target); ok {
if !isRestartConn && conn.isValid() {
return conn, nil
}
g.CloseActiveConns(target)
}
var err error
r := new(Resource)
h := append(g.headers, g.reflectHeaders...)
md := grpcurl.MetadataFromHeaders(h)
refCtx := metadata.NewOutgoingContext(ctx, md)
r.clientConn, err = g.dial(ctx, target, plainText)
if err != nil {
return nil, err
}
r.refClient = grpcreflect.NewClient(refCtx, reflectpb.NewServerReflectionClient(r.clientConn))
r.descSource = grpcurl.DescriptorSourceFromServer(ctx, r.refClient)
r.headers = h
g.activeConn.addConnection(target, r, g.maxLifeConn)
return r, nil
}
// GetActiveConns - get all saved active connection
func (g *GrpCox) GetActiveConns(ctx context.Context) []string {
active := g.activeConn.getAllConn()
result := make([]string, len(active))
i := 0
for k := range active {
result[i] = k
i++
}
return result
}
// CloseActiveConns - close conn by host or all
func (g *GrpCox) CloseActiveConns(host string) error {
if host == "all" {
for k := range g.activeConn.getAllConn() {
g.activeConn.delete(k)
}
return nil
}
g.activeConn.delete(host)
return nil
}
// Extend extend connection based on setting max life
func (g *GrpCox) Extend(host string) {
g.activeConn.extend(host, g.maxLifeConn)
}
func (g *GrpCox) dial(ctx context.Context, target string, plainText bool) (*grpc.ClientConn, error) {
dialTime := 10 * time.Second
ctx, cancel := context.WithTimeout(ctx, dialTime)
defer cancel()
var opts []grpc.DialOption
// keep alive
if g.KeepAlive > 0 {
timeout := time.Duration(g.KeepAlive * float64(time.Second))
opts = append(opts, grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: timeout,
Timeout: timeout,
}))
}
if g.authority != "" {
opts = append(opts, grpc.WithAuthority(g.authority))
}
var creds credentials.TransportCredentials
if !plainText {
var err error
creds, err = grpcurl.ClientTransportCredentials(g.insecure, g.cacert, g.cert, g.key)
if err != nil {
return nil, err
}
if g.serverName != "" {
if err := creds.OverrideServerName(g.serverName); err != nil {
return nil, err
}
}
}
network := "tcp"
if g.isUnixSocket != nil && g.isUnixSocket() {
network = "unix"
}
cc, err := grpcurl.BlockingDial(ctx, network, target, creds, opts...)
if err != nil {
return nil, err
}
return cc, nil
}
| [
"\"MAX_LIFE_CONN\"",
"\"TICK_CLOSE_CONN\""
]
| []
| [
"MAX_LIFE_CONN",
"TICK_CLOSE_CONN"
]
| [] | ["MAX_LIFE_CONN", "TICK_CLOSE_CONN"] | go | 2 | 0 | |
daemon/core/gui/appconfig.py | import os
import shutil
from pathlib import Path
import yaml
# gui home paths
from core.gui import themes
HOME_PATH = Path.home().joinpath(".coretk")
BACKGROUNDS_PATH = HOME_PATH.joinpath("backgrounds")
CUSTOM_EMANE_PATH = HOME_PATH.joinpath("custom_emane")
CUSTOM_SERVICE_PATH = HOME_PATH.joinpath("custom_services")
ICONS_PATH = HOME_PATH.joinpath("icons")
MOBILITY_PATH = HOME_PATH.joinpath("mobility")
XMLS_PATH = HOME_PATH.joinpath("xmls")
CONFIG_PATH = HOME_PATH.joinpath("gui.yaml")
LOG_PATH = HOME_PATH.joinpath("gui.log")
# local paths
DATA_PATH = Path(__file__).parent.joinpath("data")
LOCAL_ICONS_PATH = DATA_PATH.joinpath("icons").absolute()
LOCAL_BACKGROUND_PATH = DATA_PATH.joinpath("backgrounds").absolute()
LOCAL_XMLS_PATH = DATA_PATH.joinpath("xmls").absolute()
LOCAL_MOBILITY_PATH = DATA_PATH.joinpath("mobility").absolute()
# configuration data
TERMINALS = [
"$TERM",
"gnome-terminal --window --",
"lxterminal -e",
"konsole -e",
"xterm -e",
"aterm -e",
"eterm -e",
"rxvt -e",
"xfce4-terminal -x",
]
EDITORS = ["$EDITOR", "vim", "emacs", "gedit", "nano", "vi"]
class IndentDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super().increase_indent(flow, False)
def copy_files(current_path, new_path):
for current_file in current_path.glob("*"):
new_file = new_path.joinpath(current_file.name)
shutil.copy(current_file, new_file)
def check_directory():
if HOME_PATH.exists():
return
HOME_PATH.mkdir()
BACKGROUNDS_PATH.mkdir()
CUSTOM_EMANE_PATH.mkdir()
CUSTOM_SERVICE_PATH.mkdir()
ICONS_PATH.mkdir()
MOBILITY_PATH.mkdir()
XMLS_PATH.mkdir()
copy_files(LOCAL_ICONS_PATH, ICONS_PATH)
copy_files(LOCAL_BACKGROUND_PATH, BACKGROUNDS_PATH)
copy_files(LOCAL_XMLS_PATH, XMLS_PATH)
copy_files(LOCAL_MOBILITY_PATH, MOBILITY_PATH)
if "TERM" in os.environ:
terminal = TERMINALS[0]
else:
terminal = TERMINALS[1]
if "EDITOR" in os.environ:
editor = EDITORS[0]
else:
editor = EDITORS[1]
config = {
"preferences": {
"theme": themes.THEME_DARK,
"editor": editor,
"terminal": terminal,
"gui3d": "/usr/local/bin/std3d.sh",
"width": 1000,
"height": 750,
},
"location": {
"x": 0.0,
"y": 0.0,
"z": 0.0,
"lat": 47.5791667,
"lon": -122.132322,
"alt": 2.0,
"scale": 150.0,
},
"servers": [{"name": "example", "address": "127.0.0.1", "port": 50051}],
"nodes": [],
"recentfiles": [],
"observers": [{"name": "hello", "cmd": "echo hello"}],
}
save(config)
def read():
with CONFIG_PATH.open("r") as f:
return yaml.load(f, Loader=yaml.SafeLoader)
def save(config):
with CONFIG_PATH.open("w") as f:
yaml.dump(config, f, Dumper=IndentDumper, default_flow_style=False)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/__init__.py | # from flask import Flask
# import os
# from flask_sqlalchemy import SQLAlchemy
#
# from app_config import FlaskConfig
# # from endpoints import blueprint_shorten_url
#
# _APP_CONFIG = {
# **FlaskConfig.CONFIG_FLASK,
# **FlaskConfig.CONFIG_SQLALCHEMY,
# }
#
# db = SQLAlchemy()
#
# def create_app(config=_APP_CONFIG):
# app = Flask(__name__)
#
# # load app specified configuration
# if config is not None:
# if isinstance(config, dict):
# app.config.update(config)
# elif config.endswith('.py'):
# app.config.from_pyfile(config)
#
# db.init_app(app=app)
# # db.create_all(app=app)
# # app.app_context().push()
#
# with app.app_context():
# import endpoints
# db.create_all()
#
# app.register_blueprint(blueprint=endpoints.blueprint_shorten_url, url_prefix="")
#
# return app
#
#
# if __name__ == "__main__":
# ENVIRONMENT_DEBUG = os.environ.get("APP_DEBUG", True)
# ENVIRONMENT_PORT = os.environ.get("APP_PORT", 5000)
# create_app(_APP_CONFIG).run(host='0.0.0.0', port=ENVIRONMENT_PORT, debug=ENVIRONMENT_DEBUG) | []
| []
| [
"APP_DEBUG",
"APP_PORT"
]
| [] | ["APP_DEBUG", "APP_PORT"] | python | 2 | 0 | |
Castor/__init__.py | ###
# Copyright (c) 2017, Weasel
# All rights reserved.
#
#
###
"""
Castor: Lulz maria script
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = "1.0.0"
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.Author('Weasel')
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
# This is a url where the most recent plugin package can be downloaded.
__url__ = ''
from . import config
from . import plugin
from imp import reload
# In case we're being reloaded.
reload(config)
reload(plugin)
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| []
| []
| []
| [] | [] | python | null | null | null |
pkg/scheduler/cache/cache.go | /*
Copyright 2021 The Volcano Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/api/scheduling/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
infov1 "k8s.io/client-go/informers/core/v1"
schedv1 "k8s.io/client-go/informers/scheduling/v1beta1"
storagev1 "k8s.io/client-go/informers/storage/v1"
storagev1alpha1 "k8s.io/client-go/informers/storage/v1alpha1"
"k8s.io/client-go/kubernetes"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
volumescheduling "k8s.io/kubernetes/pkg/controller/volume/scheduling"
"os"
"strconv"
"sync"
"time"
batch "volcano.sh/apis/pkg/apis/batch/v1alpha1"
"volcano.sh/apis/pkg/apis/scheduling"
schedulingscheme "volcano.sh/apis/pkg/apis/scheduling/scheme"
vcv1beta1 "volcano.sh/apis/pkg/apis/scheduling/v1beta1"
vcclient "volcano.sh/apis/pkg/client/clientset/versioned"
"volcano.sh/apis/pkg/client/clientset/versioned/scheme"
vcinformer "volcano.sh/apis/pkg/client/informers/externalversions"
cpuinformerv1 "volcano.sh/apis/pkg/client/informers/externalversions/nodeinfo/v1alpha1"
vcinformerv1 "volcano.sh/apis/pkg/client/informers/externalversions/scheduling/v1beta1"
"volcano.sh/volcano/cmd/scheduler/app/options"
schedulingapi "volcano.sh/volcano/pkg/scheduler/api"
"volcano.sh/volcano/pkg/scheduler/metrics"
)
func init() {
schemeBuilder := runtime.SchemeBuilder{
v1.AddToScheme,
}
utilruntime.Must(schemeBuilder.AddToScheme(scheme.Scheme))
}
// New returns a Cache implementation.
func New(config *rest.Config, schedulerName string, defaultQueue string) Cache {
return newSchedulerCache(config, schedulerName, defaultQueue)
}
// SchedulerCache cache for the kube batch
type SchedulerCache struct {
sync.Mutex
kubeClient *kubernetes.Clientset
vcClient *vcclient.Clientset
defaultQueue string
// schedulerName is the name for volcano scheduler
schedulerName string
podInformer infov1.PodInformer
nodeInformer infov1.NodeInformer
podGroupInformerV1beta1 vcinformerv1.PodGroupInformer
queueInformerV1beta1 vcinformerv1.QueueInformer
pvInformer infov1.PersistentVolumeInformer
pvcInformer infov1.PersistentVolumeClaimInformer
scInformer storagev1.StorageClassInformer
pcInformer schedv1.PriorityClassInformer
quotaInformer infov1.ResourceQuotaInformer
csiNodeInformer storagev1.CSINodeInformer
csiDriverInformer storagev1.CSIDriverInformer
csiStorageCapacityInformer storagev1alpha1.CSIStorageCapacityInformer
cpuInformer cpuinformerv1.NumatopologyInformer
Binder Binder
Evictor Evictor
StatusUpdater StatusUpdater
PodGroupBinder BatchBinder
VolumeBinder VolumeBinder
Recorder record.EventRecorder
Jobs map[schedulingapi.JobID]*schedulingapi.JobInfo
Nodes map[string]*schedulingapi.NodeInfo
Queues map[schedulingapi.QueueID]*schedulingapi.QueueInfo
PriorityClasses map[string]*v1beta1.PriorityClass
NodeList []string
defaultPriorityClass *v1beta1.PriorityClass
defaultPriority int32
NamespaceCollection map[string]*schedulingapi.NamespaceCollection
errTasks workqueue.RateLimitingInterface
deletedJobs workqueue.RateLimitingInterface
informerFactory informers.SharedInformerFactory
BindFlowChannel chan *schedulingapi.TaskInfo
bindCache []*schedulingapi.TaskInfo
batchNum int
}
type defaultBinder struct {
kubeclient *kubernetes.Clientset
}
//Bind will send bind request to api server
func (db *defaultBinder) Bind(kubeClient *kubernetes.Clientset, tasks []*schedulingapi.TaskInfo) (error, []*schedulingapi.TaskInfo) {
var errTasks []*schedulingapi.TaskInfo
for _, task := range tasks {
p := task.Pod
if err := kubeClient.CoreV1().Pods(p.Namespace).Bind(context.TODO(),
&v1.Binding{
ObjectMeta: metav1.ObjectMeta{Namespace: p.Namespace, Name: p.Name, UID: p.UID, Annotations: p.Annotations},
Target: v1.ObjectReference{
Kind: "Node",
Name: task.NodeName,
},
},
metav1.CreateOptions{}); err != nil {
klog.Errorf("Failed to bind pod <%v/%v> to node %s : %#v", p.Namespace, p.Name, task.NodeName, err)
errTasks = append(errTasks, task)
}
}
if len(errTasks) > 0 {
return fmt.Errorf("failed to bind pods"), errTasks
}
return nil, nil
}
func NewBinder() *defaultBinder {
return &defaultBinder{}
}
type defaultEvictor struct {
kubeclient *kubernetes.Clientset
recorder record.EventRecorder
}
// Evict will send delete pod request to api server
func (de *defaultEvictor) Evict(p *v1.Pod, reason string) error {
klog.V(3).Infof("Evicting pod %v/%v, because of %v", p.Namespace, p.Name, reason)
evictMsg := fmt.Sprintf("Pod is evicted, because of %v", reason)
annotations := map[string]string{}
// record that we are evicting the pod
de.recorder.AnnotatedEventf(p, annotations, v1.EventTypeWarning, "Evict", evictMsg)
pod := p.DeepCopy()
condition := &v1.PodCondition{
Type: v1.PodReady,
Status: v1.ConditionFalse,
Reason: "Evict",
Message: evictMsg,
}
if !podutil.UpdatePodCondition(&pod.Status, condition) {
klog.V(1).Infof("UpdatePodCondition: existed condition, not update")
klog.V(1).Infof("%+v", pod.Status.Conditions)
return nil
}
if _, err := de.kubeclient.CoreV1().Pods(p.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}); err != nil {
klog.Errorf("Failed to update pod <%v/%v> status: %v", pod.Namespace, pod.Name, err)
return err
}
if err := de.kubeclient.CoreV1().Pods(p.Namespace).Delete(context.TODO(), p.Name, metav1.DeleteOptions{}); err != nil {
klog.Errorf("Failed to evict pod <%v/%v>: %#v", p.Namespace, p.Name, err)
return err
}
return nil
}
// defaultStatusUpdater is the default implementation of the StatusUpdater interface
type defaultStatusUpdater struct {
kubeclient *kubernetes.Clientset
vcclient *vcclient.Clientset
}
// following the same logic as podutil.UpdatePodCondition
func podConditionHaveUpdate(status *v1.PodStatus, condition *v1.PodCondition) bool {
lastTransitionTime := metav1.Now()
// Try to find this pod condition.
_, oldCondition := podutil.GetPodCondition(status, condition.Type)
if oldCondition == nil {
// We are adding new pod condition.
return true
}
// We are updating an existing condition, so we need to check if it has changed.
if condition.Status == oldCondition.Status {
lastTransitionTime = oldCondition.LastTransitionTime
}
isEqual := condition.Status == oldCondition.Status &&
condition.Reason == oldCondition.Reason &&
condition.Message == oldCondition.Message &&
condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) &&
lastTransitionTime.Equal(&oldCondition.LastTransitionTime)
// Return true if one of the fields have changed.
return !isEqual
}
// UpdatePodCondition will Update pod with podCondition
func (su *defaultStatusUpdater) UpdatePodCondition(pod *v1.Pod, condition *v1.PodCondition) (*v1.Pod, error) {
klog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status)
if podutil.UpdatePodCondition(&pod.Status, condition) {
return su.kubeclient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{})
}
return pod, nil
}
// UpdatePodGroup will Update pod with podCondition
func (su *defaultStatusUpdater) UpdatePodGroup(pg *schedulingapi.PodGroup) (*schedulingapi.PodGroup, error) {
podgroup := &vcv1beta1.PodGroup{}
if err := schedulingscheme.Scheme.Convert(&pg.PodGroup, podgroup, nil); err != nil {
klog.Errorf("Error while converting PodGroup to v1alpha1.PodGroup with error: %v", err)
return nil, err
}
updated, err := su.vcclient.SchedulingV1beta1().PodGroups(podgroup.Namespace).Update(context.TODO(), podgroup, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("Error while updating PodGroup with error: %v", err)
return nil, err
}
podGroupInfo := &schedulingapi.PodGroup{Version: schedulingapi.PodGroupVersionV1Beta1}
if err := schedulingscheme.Scheme.Convert(updated, &podGroupInfo.PodGroup, nil); err != nil {
klog.Errorf("Error while converting v1alpha.PodGroup to api.PodGroup with error: %v", err)
return nil, err
}
return podGroupInfo, nil
}
type defaultVolumeBinder struct {
volumeBinder volumescheduling.SchedulerVolumeBinder
}
// AllocateVolumes allocates volume on the host to the task
func (dvb *defaultVolumeBinder) AllocateVolumes(task *schedulingapi.TaskInfo, hostname string, podVolumes *volumescheduling.PodVolumes) error {
allBound, err := dvb.volumeBinder.AssumePodVolumes(task.Pod, hostname, podVolumes)
task.VolumeReady = allBound
return err
}
// GetPodVolumes get pod volume on the host
func (dvb *defaultVolumeBinder) GetPodVolumes(task *schedulingapi.TaskInfo,
node *v1.Node) (podVolumes *volumescheduling.PodVolumes, err error) {
boundClaims, claimsToBind, _, err := dvb.volumeBinder.GetPodVolumes(task.Pod)
if err != nil {
return nil, err
}
podVolumes, _, err = dvb.volumeBinder.FindPodVolumes(task.Pod, boundClaims, claimsToBind, node)
return podVolumes, err
}
// BindVolumes binds volumes to the task
func (dvb *defaultVolumeBinder) BindVolumes(task *schedulingapi.TaskInfo, podVolumes *volumescheduling.PodVolumes) error {
// If task's volumes are ready, did not bind them again.
if task.VolumeReady {
return nil
}
return dvb.volumeBinder.BindPodVolumes(task.Pod, podVolumes)
}
type podgroupBinder struct {
kubeclient *kubernetes.Clientset
vcclient *vcclient.Clientset
}
// Bind will add silo cluster annotaion on pod and podgroup
func (pgb *podgroupBinder) Bind(job *schedulingapi.JobInfo, cluster string) (*schedulingapi.JobInfo, error) {
if len(job.Tasks) == 0 {
klog.V(4).Infof("Job pods have not been created yet")
return job, nil
}
for _, task := range job.Tasks {
pod := task.Pod
pod.Annotations[batch.ForwardClusterKey] = cluster
pod.ResourceVersion = ""
_, err := pgb.kubeclient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("Error while update pod annotation with error: %v", err)
return nil, err
}
}
pg := job.PodGroup
pg.Annotations[batch.ForwardClusterKey] = cluster
podgroup := &vcv1beta1.PodGroup{}
if err := schedulingscheme.Scheme.Convert(&pg.PodGroup, podgroup, nil); err != nil {
klog.Errorf("Error while converting PodGroup to v1alpha1.PodGroup with error: %v", err)
return nil, err
}
newPg, err := pgb.vcclient.SchedulingV1beta1().PodGroups(pg.Namespace).Update(context.TODO(), podgroup, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("Error while update PodGroup annotation with error: %v", err)
return nil, err
}
job.PodGroup.ResourceVersion = newPg.ResourceVersion
klog.V(4).Infof("Bind PodGroup <%s> successfully", job.PodGroup.Name)
return job, nil
}
func newSchedulerCache(config *rest.Config, schedulerName string, defaultQueue string) *SchedulerCache {
kubeClient, err := kubernetes.NewForConfig(config)
if err != nil {
panic(fmt.Sprintf("failed init kubeClient, with err: %v", err))
}
vcClient, err := vcclient.NewForConfig(config)
if err != nil {
panic(fmt.Sprintf("failed init vcClient, with err: %v", err))
}
eventClient, err := kubernetes.NewForConfig(config)
if err != nil {
panic(fmt.Sprintf("failed init eventClient, with err: %v", err))
}
// create default queue
reclaimable := true
defaultQue := vcv1beta1.Queue{
ObjectMeta: metav1.ObjectMeta{
Name: defaultQueue,
},
Spec: vcv1beta1.QueueSpec{
Reclaimable: &reclaimable,
Weight: 1,
},
}
if _, err := vcClient.SchedulingV1beta1().Queues().Create(context.TODO(), &defaultQue, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
panic(fmt.Sprintf("failed init default queue, with err: %v", err))
}
sc := &SchedulerCache{
Jobs: make(map[schedulingapi.JobID]*schedulingapi.JobInfo),
Nodes: make(map[string]*schedulingapi.NodeInfo),
Queues: make(map[schedulingapi.QueueID]*schedulingapi.QueueInfo),
PriorityClasses: make(map[string]*v1beta1.PriorityClass),
errTasks: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
deletedJobs: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
kubeClient: kubeClient,
vcClient: vcClient,
defaultQueue: defaultQueue,
schedulerName: schedulerName,
NamespaceCollection: make(map[string]*schedulingapi.NamespaceCollection),
NodeList: []string{},
}
// Prepare event clients.
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: eventClient.CoreV1().Events("")})
sc.Recorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: schedulerName})
sc.BindFlowChannel = make(chan *schedulingapi.TaskInfo, 5000)
sc.Binder = GetBindMethod()
var batchNum int
batchNum, err = strconv.Atoi(os.Getenv("BATCH_BIND_NUM"))
if err == nil && batchNum > 0 {
sc.batchNum = batchNum
} else {
sc.batchNum = 1
}
sc.Evictor = &defaultEvictor{
kubeclient: sc.kubeClient,
recorder: sc.Recorder,
}
sc.StatusUpdater = &defaultStatusUpdater{
kubeclient: sc.kubeClient,
vcclient: sc.vcClient,
}
sc.PodGroupBinder = &podgroupBinder{
kubeclient: sc.kubeClient,
vcclient: sc.vcClient,
}
informerFactory := informers.NewSharedInformerFactory(sc.kubeClient, 0)
sc.informerFactory = informerFactory
mySchedulerPodName, c := getMultiSchedulerInfo()
// create informer for node information
sc.nodeInformer = informerFactory.Core().V1().Nodes()
sc.nodeInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch v := obj.(type) {
case *v1.Node:
return responsibleForNode(v.Name, mySchedulerPodName, c)
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddNode,
UpdateFunc: sc.UpdateNode,
DeleteFunc: sc.DeleteNode,
},
},
0,
)
sc.podInformer = informerFactory.Core().V1().Pods()
sc.pvcInformer = informerFactory.Core().V1().PersistentVolumeClaims()
sc.pvInformer = informerFactory.Core().V1().PersistentVolumes()
sc.scInformer = informerFactory.Storage().V1().StorageClasses()
sc.csiNodeInformer = informerFactory.Storage().V1().CSINodes()
sc.csiDriverInformer = informerFactory.Storage().V1().CSIDrivers()
sc.csiStorageCapacityInformer = informerFactory.Storage().V1alpha1().CSIStorageCapacities()
sc.VolumeBinder = &defaultVolumeBinder{
volumeBinder: volumescheduling.NewVolumeBinder(
sc.kubeClient,
sc.podInformer,
sc.nodeInformer,
sc.csiNodeInformer,
sc.pvcInformer,
sc.pvInformer,
sc.scInformer,
&volumescheduling.CapacityCheck{
CSIDriverInformer: sc.csiDriverInformer,
CSIStorageCapacityInformer: sc.csiStorageCapacityInformer,
},
30*time.Second,
),
}
// create informer for pod information
sc.podInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch v := obj.(type) {
case *v1.Pod:
if !responsibleForPod(v, schedulerName, mySchedulerPodName, c) {
if len(v.Spec.NodeName) == 0 {
return false
}
if !responsibleForNode(v.Spec.NodeName, mySchedulerPodName, c) {
return false
}
}
return true
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddPod,
UpdateFunc: sc.UpdatePod,
DeleteFunc: sc.DeletePod,
},
})
sc.pcInformer = informerFactory.Scheduling().V1beta1().PriorityClasses()
sc.pcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddPriorityClass,
UpdateFunc: sc.UpdatePriorityClass,
DeleteFunc: sc.DeletePriorityClass,
})
sc.quotaInformer = informerFactory.Core().V1().ResourceQuotas()
sc.quotaInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddResourceQuota,
UpdateFunc: sc.UpdateResourceQuota,
DeleteFunc: sc.DeleteResourceQuota,
})
vcinformers := vcinformer.NewSharedInformerFactory(sc.vcClient, 0)
// create informer for PodGroup(v1beta1) information
sc.podGroupInformerV1beta1 = vcinformers.Scheduling().V1beta1().PodGroups()
sc.podGroupInformerV1beta1.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch v := obj.(type) {
case *vcv1beta1.PodGroup:
return responsibleForPodGroup(v, mySchedulerPodName, c)
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddPodGroupV1beta1,
UpdateFunc: sc.UpdatePodGroupV1beta1,
DeleteFunc: sc.DeletePodGroupV1beta1,
},
})
// create informer(v1beta1) for Queue information
sc.queueInformerV1beta1 = vcinformers.Scheduling().V1beta1().Queues()
sc.queueInformerV1beta1.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddQueueV1beta1,
UpdateFunc: sc.UpdateQueueV1beta1,
DeleteFunc: sc.DeleteQueueV1beta1,
})
sc.cpuInformer = vcinformers.Nodeinfo().V1alpha1().Numatopologies()
sc.cpuInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddNumaInfoV1alpha1,
UpdateFunc: sc.UpdateNumaInfoV1alpha1,
DeleteFunc: sc.DeleteNumaInfoV1alpha1,
})
return sc
}
// Run starts the schedulerCache
func (sc *SchedulerCache) Run(stopCh <-chan struct{}) {
go sc.podInformer.Informer().Run(stopCh)
go sc.nodeInformer.Informer().Run(stopCh)
go sc.podGroupInformerV1beta1.Informer().Run(stopCh)
go sc.pvInformer.Informer().Run(stopCh)
go sc.pvcInformer.Informer().Run(stopCh)
go sc.scInformer.Informer().Run(stopCh)
go sc.queueInformerV1beta1.Informer().Run(stopCh)
go sc.quotaInformer.Informer().Run(stopCh)
go sc.cpuInformer.Informer().Run(stopCh)
if options.ServerOpts.EnablePriorityClass {
go sc.pcInformer.Informer().Run(stopCh)
}
// Re-sync error tasks.
go wait.Until(sc.processResyncTask, 0, stopCh)
// Cleanup jobs.
go wait.Until(sc.processCleanupJob, 0, stopCh)
go wait.Until(sc.processBindTask, time.Millisecond*20, stopCh)
}
// WaitForCacheSync sync the cache with the api server
func (sc *SchedulerCache) WaitForCacheSync(stopCh <-chan struct{}) bool {
return cache.WaitForCacheSync(stopCh,
func() []cache.InformerSynced {
informerSynced := []cache.InformerSynced{
sc.podInformer.Informer().HasSynced,
sc.podGroupInformerV1beta1.Informer().HasSynced,
sc.nodeInformer.Informer().HasSynced,
sc.pvInformer.Informer().HasSynced,
sc.pvcInformer.Informer().HasSynced,
sc.scInformer.Informer().HasSynced,
sc.queueInformerV1beta1.Informer().HasSynced,
sc.quotaInformer.Informer().HasSynced,
sc.cpuInformer.Informer().HasSynced,
}
if options.ServerOpts.EnablePriorityClass {
informerSynced = append(informerSynced, sc.pcInformer.Informer().HasSynced)
}
return informerSynced
}()...,
)
}
// findJobAndTask returns job and the task info
func (sc *SchedulerCache) findJobAndTask(taskInfo *schedulingapi.TaskInfo) (*schedulingapi.JobInfo, *schedulingapi.TaskInfo, error) {
job, found := sc.Jobs[taskInfo.Job]
if !found {
return nil, nil, fmt.Errorf("failed to find Job %v for Task %v",
taskInfo.Job, taskInfo.UID)
}
task, found := job.Tasks[taskInfo.UID]
if !found {
return nil, nil, fmt.Errorf("failed to find task in status %v by id %v",
taskInfo.Status, taskInfo.UID)
}
return job, task, nil
}
// Evict will evict the pod.
//
// If error occurs both task and job are guaranteed to be in the original state.
func (sc *SchedulerCache) Evict(taskInfo *schedulingapi.TaskInfo, reason string) error {
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
job, task, err := sc.findJobAndTask(taskInfo)
if err != nil {
return err
}
node, found := sc.Nodes[task.NodeName]
if !found {
return fmt.Errorf("failed to bind Task %v to host %v, host does not exist",
task.UID, task.NodeName)
}
originalStatus := task.Status
if err := job.UpdateTaskStatus(task, schedulingapi.Releasing); err != nil {
return err
}
// Add new task to node.
if err := node.UpdateTask(task); err != nil {
// After failing to update task to a node we need to revert task status from Releasing,
// otherwise task might be stuck in the Releasing state indefinitely.
if err := job.UpdateTaskStatus(task, originalStatus); err != nil {
klog.Errorf("Task <%s/%s> will be resynchronized after failing to revert status "+
"from %s to %s after failing to update Task on Node <%s>: %v",
task.Namespace, task.Name, task.Status, originalStatus, node.Name, err)
sc.resyncTask(task)
}
return err
}
p := task.Pod
go func() {
err := sc.Evictor.Evict(p, reason)
if err != nil {
sc.resyncTask(task)
}
}()
podgroup := &vcv1beta1.PodGroup{}
if err := schedulingscheme.Scheme.Convert(&job.PodGroup.PodGroup, podgroup, nil); err != nil {
klog.Errorf("Error while converting PodGroup to v1alpha1.PodGroup with error: %v", err)
return err
}
sc.Recorder.Eventf(podgroup, v1.EventTypeNormal, "Evict", reason)
return nil
}
// Bind binds task to the target host.
func (sc *SchedulerCache) Bind(tasks []*schedulingapi.TaskInfo) error {
go func(taskArray []*schedulingapi.TaskInfo) {
tmp := time.Now()
err, errTasks := sc.Binder.Bind(sc.kubeClient, taskArray)
if err == nil {
klog.V(3).Infof("bind ok, latency %v", time.Since(tmp))
for _, task := range tasks {
sc.Recorder.Eventf(task.Pod, v1.EventTypeNormal, "Scheduled", "Successfully assigned %v/%v to %v",
task.Namespace, task.Name, task.NodeName)
}
} else {
for _, task := range errTasks {
klog.V(2).Infof("resyncTask task %s", task.Name)
sc.resyncTask(task)
}
}
}(tasks)
return nil
}
// BindPodGroup binds job to silo cluster
func (sc *SchedulerCache) BindPodGroup(job *schedulingapi.JobInfo, cluster string) error {
if _, err := sc.PodGroupBinder.Bind(job, cluster); err != nil {
klog.Errorf("Bind job <%s> to cluster <%s> failed: %v", job.Name, cluster, err)
return err
}
return nil
}
// GetPodVolumes get pod volume on the host
func (sc *SchedulerCache) GetPodVolumes(task *schedulingapi.TaskInfo, node *v1.Node) (*volumescheduling.PodVolumes, error) {
return sc.VolumeBinder.GetPodVolumes(task, node)
}
// AllocateVolumes allocates volume on the host to the task
func (sc *SchedulerCache) AllocateVolumes(task *schedulingapi.TaskInfo, hostname string, podVolumes *volumescheduling.PodVolumes) error {
return sc.VolumeBinder.AllocateVolumes(task, hostname, podVolumes)
}
// BindVolumes binds volumes to the task
func (sc *SchedulerCache) BindVolumes(task *schedulingapi.TaskInfo, podVolumes *volumescheduling.PodVolumes) error {
return sc.VolumeBinder.BindVolumes(task, podVolumes)
}
// Client returns the kubernetes clientSet
func (sc *SchedulerCache) Client() kubernetes.Interface {
return sc.kubeClient
}
// SharedInformerFactory returns the scheduler SharedInformerFactory
func (sc *SchedulerCache) SharedInformerFactory() informers.SharedInformerFactory {
return sc.informerFactory
}
// UpdateSchedulerNumaInfo used to update scheduler node cache NumaSchedulerInfo
func (sc *SchedulerCache) UpdateSchedulerNumaInfo(AllocatedSets map[string]schedulingapi.ResNumaSets) error {
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
for nodeName, sets := range AllocatedSets {
if _, found := sc.Nodes[nodeName]; !found {
continue
}
numaInfo := sc.Nodes[nodeName].NumaSchedulerInfo
if numaInfo == nil {
continue
}
numaInfo.Allocate(sets)
}
return nil
}
// taskUnschedulable updates pod status of pending task
func (sc *SchedulerCache) taskUnschedulable(task *schedulingapi.TaskInfo, reason, message string) error {
pod := task.Pod
condition := &v1.PodCondition{
Type: v1.PodScheduled,
Status: v1.ConditionFalse,
Reason: reason, // Add more reasons in order to distinguish more specific scenario of pending tasks
Message: message,
}
if podConditionHaveUpdate(&pod.Status, condition) {
pod = pod.DeepCopy()
// The reason field in 'Events' should be "FailedScheduling", there is not constants defined for this in
// k8s core, so using the same string here.
// The reason field in PodCondition can be "Unschedulable"
sc.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", message)
if _, err := sc.StatusUpdater.UpdatePodCondition(pod, condition); err != nil {
return err
}
} else {
klog.V(4).Infof("task unscheduleable %s/%s, message: %s, skip by no condition update", pod.Namespace, pod.Name, message)
}
return nil
}
func (sc *SchedulerCache) deleteJob(job *schedulingapi.JobInfo) {
klog.V(3).Infof("Try to delete Job <%v:%v/%v>", job.UID, job.Namespace, job.Name)
sc.deletedJobs.AddRateLimited(job)
}
func (sc *SchedulerCache) processCleanupJob() {
obj, shutdown := sc.deletedJobs.Get()
if shutdown {
return
}
defer sc.deletedJobs.Done(obj)
job, found := obj.(*schedulingapi.JobInfo)
if !found {
klog.Errorf("Failed to convert <%v> to *JobInfo", obj)
return
}
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
if schedulingapi.JobTerminated(job) {
delete(sc.Jobs, job.UID)
klog.V(3).Infof("Job <%v:%v/%v> was deleted.", job.UID, job.Namespace, job.Name)
} else {
// Retry
sc.deleteJob(job)
}
}
func (sc *SchedulerCache) resyncTask(task *schedulingapi.TaskInfo) {
sc.errTasks.AddRateLimited(task)
}
func (sc *SchedulerCache) processResyncTask() {
obj, shutdown := sc.errTasks.Get()
if shutdown {
return
}
defer sc.errTasks.Done(obj)
task, ok := obj.(*schedulingapi.TaskInfo)
if !ok {
klog.Errorf("failed to convert %v to *schedulingapi.TaskInfo", obj)
return
}
if err := sc.syncTask(task); err != nil {
klog.Errorf("Failed to sync pod <%v/%v>, retry it.", task.Namespace, task.Name)
sc.resyncTask(task)
}
}
func (sc *SchedulerCache) AddBindTask(taskInfo *schedulingapi.TaskInfo) error {
klog.V(5).Infof("add bind task %v/%v", taskInfo.Namespace, taskInfo.Name)
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
job, task, err := sc.findJobAndTask(taskInfo)
if err != nil {
return err
}
node, found := sc.Nodes[taskInfo.NodeName]
if !found {
return fmt.Errorf("failed to bind Task %v to host %v, host does not exist",
task.UID, taskInfo.NodeName)
}
originalStatus := task.Status
if err := job.UpdateTaskStatus(task, schedulingapi.Binding); err != nil {
return err
}
// Add task to the node.
if err := node.AddTask(task); err != nil {
// After failing to update task to a node we need to revert task status from Releasing,
// otherwise task might be stuck in the Releasing state indefinitely.
if err := job.UpdateTaskStatus(task, originalStatus); err != nil {
klog.Errorf("Task <%s/%s> will be resynchronized after failing to revert status "+
"from %s to %s after failing to update Task on Node <%s>: %v",
task.Namespace, task.Name, task.Status, originalStatus, node.Name, err)
sc.resyncTask(task)
}
return err
}
sc.BindFlowChannel <- taskInfo
return nil
}
func (sc *SchedulerCache) processBindTask() {
for {
select {
case taskInfo, ok := <-sc.BindFlowChannel:
if !ok {
return
}
sc.bindCache = append(sc.bindCache, taskInfo)
if len(sc.bindCache) == sc.batchNum {
sc.BindTask()
}
}
if len(sc.BindFlowChannel) == 0 {
break
}
}
if len(sc.bindCache) == 0 {
return
}
sc.BindTask()
}
func (sc *SchedulerCache) BindTask() {
klog.V(5).Infof("batch bind task count %d", len(sc.bindCache))
for _, task := range sc.bindCache {
if err := sc.BindVolumes(task, task.PodVolumes); err != nil {
klog.Errorf("task %s/%s bind Volumes failed: %#v", task.Namespace, task.Name, err)
sc.resyncTask(task)
return
}
}
bindTasks := make([]*schedulingapi.TaskInfo, len(sc.bindCache))
copy(bindTasks, sc.bindCache)
if err := sc.Bind(bindTasks); err != nil {
return
}
for _, task := range sc.bindCache {
metrics.UpdateTaskScheduleDuration(metrics.Duration(task.Pod.CreationTimestamp.Time))
}
sc.bindCache = sc.bindCache[0:0]
return
}
// Snapshot returns the complete snapshot of the cluster from cache
func (sc *SchedulerCache) Snapshot() *schedulingapi.ClusterInfo {
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
snapshot := &schedulingapi.ClusterInfo{
Nodes: make(map[string]*schedulingapi.NodeInfo),
Jobs: make(map[schedulingapi.JobID]*schedulingapi.JobInfo),
Queues: make(map[schedulingapi.QueueID]*schedulingapi.QueueInfo),
NamespaceInfo: make(map[schedulingapi.NamespaceName]*schedulingapi.NamespaceInfo),
RevocableNodes: make(map[string]*schedulingapi.NodeInfo),
NodeList: make([]string, len(sc.NodeList)),
}
copy(snapshot.NodeList, sc.NodeList)
for _, value := range sc.Nodes {
value.RefreshNumaSchedulerInfoByCrd()
}
for _, value := range sc.Nodes {
if !value.Ready() {
continue
}
snapshot.Nodes[value.Name] = value.Clone()
if value.RevocableZone != "" {
snapshot.RevocableNodes[value.Name] = snapshot.Nodes[value.Name]
}
}
for _, value := range sc.Queues {
snapshot.Queues[value.UID] = value.Clone()
}
var cloneJobLock sync.Mutex
var wg sync.WaitGroup
cloneJob := func(value *schedulingapi.JobInfo) {
defer wg.Done()
if value.PodGroup != nil {
value.Priority = sc.defaultPriority
priName := value.PodGroup.Spec.PriorityClassName
if priorityClass, found := sc.PriorityClasses[priName]; found {
value.Priority = priorityClass.Value
}
klog.V(4).Infof("The priority of job <%s/%s> is <%s/%d>",
value.Namespace, value.Name, priName, value.Priority)
}
clonedJob := value.Clone()
cloneJobLock.Lock()
snapshot.Jobs[value.UID] = clonedJob
cloneJobLock.Unlock()
}
for _, value := range sc.NamespaceCollection {
info := value.Snapshot()
snapshot.NamespaceInfo[info.Name] = info
klog.V(4).Infof("Namespace %s has weight %v",
value.Name, info.GetWeight())
}
for _, value := range sc.Jobs {
// If no scheduling spec, does not handle it.
if value.PodGroup == nil {
klog.V(4).Infof("The scheduling spec of Job <%v:%s/%s> is nil, ignore it.",
value.UID, value.Namespace, value.Name)
continue
}
if _, found := snapshot.Queues[value.Queue]; !found {
klog.V(3).Infof("The Queue <%v> of Job <%v/%v> does not exist, ignore it.",
value.Queue, value.Namespace, value.Name)
continue
}
wg.Add(1)
go cloneJob(value)
}
wg.Wait()
klog.V(3).Infof("There are <%d> Jobs, <%d> Queues and <%d> Nodes in total for scheduling.",
len(snapshot.Jobs), len(snapshot.Queues), len(snapshot.Nodes))
return snapshot
}
// String returns information about the cache in a string format
func (sc *SchedulerCache) String() string {
sc.Mutex.Lock()
defer sc.Mutex.Unlock()
str := "Cache:\n"
if len(sc.Nodes) != 0 {
str += "Nodes:\n"
for _, n := range sc.Nodes {
str += fmt.Sprintf("\t %s: idle(%v) used(%v) allocatable(%v) pods(%d)\n",
n.Name, n.Idle, n.Used, n.Allocatable, len(n.Tasks))
i := 0
for _, p := range n.Tasks {
str += fmt.Sprintf("\t\t %d: %v\n", i, p)
i++
}
}
}
if len(sc.Jobs) != 0 {
str += "Jobs:\n"
for _, job := range sc.Jobs {
str += fmt.Sprintf("\t %s\n", job)
}
}
if len(sc.NamespaceCollection) != 0 {
str += "Namespaces:\n"
for _, ns := range sc.NamespaceCollection {
info := ns.Snapshot()
str += fmt.Sprintf("\t Namespace(%s) Weight(%v)\n",
info.Name, info.Weight)
}
}
if len(sc.NodeList) != 0 {
str += fmt.Sprintf("NodeList: %v\n", sc.NodeList)
}
return str
}
// RecordJobStatusEvent records related events according to job status.
func (sc *SchedulerCache) RecordJobStatusEvent(job *schedulingapi.JobInfo) {
pgUnschedulable := job.PodGroup != nil &&
(job.PodGroup.Status.Phase == scheduling.PodGroupUnknown ||
job.PodGroup.Status.Phase == scheduling.PodGroupPending ||
job.PodGroup.Status.Phase == scheduling.PodGroupInqueue)
// If pending or unschedulable, record unschedulable event.
if pgUnschedulable {
msg := fmt.Sprintf("%v/%v tasks in gang unschedulable: %v",
len(job.TaskStatusIndex[schedulingapi.Pending]),
len(job.Tasks),
job.FitError())
sc.recordPodGroupEvent(job.PodGroup, v1.EventTypeWarning, string(scheduling.PodGroupUnschedulableType), msg)
} else {
sc.recordPodGroupEvent(job.PodGroup, v1.EventTypeNormal, string(scheduling.PodGroupScheduled), string(scheduling.PodGroupReady))
}
baseErrorMessage := job.JobFitErrors
if baseErrorMessage == "" {
baseErrorMessage = schedulingapi.AllNodeUnavailableMsg
}
// Update podCondition for tasks Allocated and Pending before job discarded
for _, status := range []schedulingapi.TaskStatus{schedulingapi.Allocated, schedulingapi.Pending, schedulingapi.Pipelined} {
for _, taskInfo := range job.TaskStatusIndex[status] {
reason, msg := job.TaskSchedulingReason(taskInfo.UID)
if len(msg) == 0 {
msg = baseErrorMessage
}
if err := sc.taskUnschedulable(taskInfo, reason, msg); err != nil {
klog.Errorf("Failed to update unschedulable task status <%s/%s>: %v",
taskInfo.Namespace, taskInfo.Name, err)
}
}
}
}
// UpdateJobStatus update the status of job and its tasks.
func (sc *SchedulerCache) UpdateJobStatus(job *schedulingapi.JobInfo, updatePG bool) (*schedulingapi.JobInfo, error) {
if updatePG {
pg, err := sc.StatusUpdater.UpdatePodGroup(job.PodGroup)
if err != nil {
return nil, err
}
job.PodGroup = pg
}
sc.RecordJobStatusEvent(job)
return job, nil
}
func (sc *SchedulerCache) recordPodGroupEvent(podGroup *schedulingapi.PodGroup, eventType, reason, msg string) {
if podGroup == nil {
return
}
pg := &vcv1beta1.PodGroup{}
if err := schedulingscheme.Scheme.Convert(&podGroup.PodGroup, pg, nil); err != nil {
klog.Errorf("Error while converting PodGroup to v1alpha1.PodGroup with error: %v", err)
return
}
sc.Recorder.Eventf(pg, eventType, reason, msg)
}
| [
"\"BATCH_BIND_NUM\""
]
| []
| [
"BATCH_BIND_NUM"
]
| [] | ["BATCH_BIND_NUM"] | go | 1 | 0 | |
HPCrunScripts/PS_DeterministicNowcast_parallel_advection_24h.py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 17 07:41:32 2019
Deterministic nowcast with pySTEPS, with extraction of results per catchment.
Based on the input data for the Ensemble nowcast, but without any ensembles.
Make sure to change the initial part to your case.
Note that this script assumes that the catchments are already reprojected.
TO DO - add _reprojected to input and change this later on in the script.
@author: imhof_rn
"""
from osgeo import gdal
from osgeo import gdal_array
from osgeo import ogr, osr
import os
os.environ['PROJ_LIB'] = r'/u/imhof_rn/anaconda3/pkgs/proj4-5.2.0-h470a237_1/share/proj'
import mkl
mkl.set_num_threads(1)
import datetime
import netCDF4
import numpy as np
import pprint
import sys
import time
import pysteps as stp
import config as cfg
import logging
import itertools
logging.basicConfig(level=logging.INFO)
# import message passing interface for python
from mpi4py import MPI
# import for memory use
#from pympler import tracker
#tr = tracker.SummaryTracker()
#tr.print_diff()
###############################################################################
#################
# Initial part, only change this
# NOTE: This script only works when the catchment shapefiles are already reprojected
# to the KNMI radar dataset.
#################
os.chdir('/u/imhof_rn/pysteps-0.2')
# Catchment filenames and directories
catchments = True # Put on false when you don't want any slicing for catchments (i.e. you will use the full output)
# If catchments = 'False', uncomment the next two lines.
catchment_filenames = ["/u/imhof_rn/GIS/Catchments_pysteps/Hupsel.shp", "/u/imhof_rn/GIS/Catchments_pysteps/stroomgebied_Regge.shp", "/u/imhof_rn/GIS/Catchments_pysteps/GroteWaterleiding.shp", "/u/imhof_rn/GIS/Catchments_pysteps/Aa.shp", "/u/imhof_rn/GIS/Catchments_pysteps/Reusel.shp", "/u/imhof_rn/GIS/Catchments_pysteps/het_molentje.shp", "/u/imhof_rn/GIS/Catchments_pysteps/Luntersebeek.shp", "/u/imhof_rn/GIS/Catchments_pysteps/Dwarsdiep.shp", "/u/imhof_rn/GIS/Catchments_pysteps/AfwaterendgebiedBoezemsysteem.shp", "/u/imhof_rn/GIS/Catchments_pysteps/HHRijnland.shp", "/u/imhof_rn/GIS/Catchments_pysteps/Beemster.shp", "/u/imhof_rn/GIS/Catchments_pysteps/DeLinde.shp"] # Put here the locations of the shapefiles
catchment_names = ['Hupsel', 'Regge', 'GroteWaterleiding', 'Aa', 'Reusel', 'Molentje', 'Luntersebeek', 'Dwarsdiep', 'Delfland', 'Rijnland', 'Beemster', 'Linde'] # A list of catchment names.
out_dir = "/u/imhof_rn/Nowcasts/pySTEPS" # Just used for logging, the actual
# out_dir is set in the pystepsrc-file.
# Verification settings
verification = {
"experiment_name" : "pysteps_mpi_24hours_deterministic",
"overwrite" : True, # to recompute nowcasts
"v_thresholds" : [0.1, 1.0], # [mm/h]
"v_leadtimes" : [10, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360], # [min]
"v_accu" : None, # [min]
"seed" : 42, # for reproducibility
"doplot" : True, # save figures
"dosaveresults" : True # save verification scores to csv
}
# Forecast settings
forecast = {
"n_lead_times" : 72, # timesteps per nowcast
"r_threshold" : 0.1, # rain/no rain threshold [mm/h]
"unit" : "mm/h", # mm/h or dBZ
"transformation" : "dB", # None or dB
"adjust_domain" : None # None or square
}
# The experiment set-up
## this includes tuneable parameters
experiment = {
## the events event start event end update cycle data source
"data" : [("200801101205","200801111800",5,"knmi"),
("200801190305","200801200900",5,"knmi"),
("200801191005","200801201600",5,"knmi"),
("200801201705","200801212300",5,"knmi"),
("200802042305","200802060500",5,"knmi"),
("200807070605","200807081200",5,"knmi"),
("200808070405","200808081000",5,"knmi"),
("200812100305","200812110900",5,"knmi"),
("200902091005","200902101600",5,"knmi"),
("200905131705","200905142300",5,"knmi"),
("200905161005","200905171600",5,"knmi"),
("200912091805","200912110000",5,"knmi"),
("201005110005","201005120600",5,"knmi"),
("201006090205","201006100800",5,"knmi"),
("201007101005","201007111600",5,"knmi"),
("201007101105","201007111700",5,"knmi"),
("201008251605","201008262200",5,"knmi"),
("201008252105","201008270300",5,"knmi"),
("201008252205","201008270400",5,"knmi"),
("201008252305","201008270500",5,"knmi"),
("201101120405","201101131000",5,"knmi"),
("201106180405","201106191000",5,"knmi"),
("201107131805","201107150000",5,"knmi"),
("201107210105","201107220700",5,"knmi"),
("201107231105","201107241700",5,"knmi"),
("201107271805","201107290000",5,"knmi"),
("201112151205","201112161800",5,"knmi"),
("201112151305","201112161900",5,"knmi"),
("201112311805","201201020000",5,"knmi"),
("201112312105","201201020300",5,"knmi"),
("201201010905","201201021500",5,"knmi"),
("201201041205","201201051800",5,"knmi"),
("201206120205","201206130800",5,"knmi"),
("201207271505","201207282100",5,"knmi"),
("201208010605","201208021200",5,"knmi"),
("201212220305","201212230900",5,"knmi"),
("201212220505","201212231100",5,"knmi"),
("201212241705","201212252300",5,"knmi"),
("201305200605","201305211200",5,"knmi"),
("201312232205","201312250400",5,"knmi"),
("201407080605","201407091200",5,"knmi"),
("201407101205","201407111800",5,"knmi"),
("201407270605","201407281200",5,"knmi"),
("201407271905","201407290100",5,"knmi"),
("201407280605","201407291200",5,"knmi"),
("201412110705","201412121300",5,"knmi"),
("201412110805","201412121400",5,"knmi"),
("201412111205","201412121800",5,"knmi"),
("201412261705","201412272300",5,"knmi"),
("201501071705","201501082300",5,"knmi"),
("201501120805","201501131400",5,"knmi"),
("201501121005","201501131600",5,"knmi"),
("201501121105","201501131700",5,"knmi"),
("201502200805","201502211400",5,"knmi"),
("201508160405","201508171000",5,"knmi"),
("201511292305","201512010500",5,"knmi"),
("201511300205","201512010800",5,"knmi"),
("201601131405","201601142000",5,"knmi"),
("201601291405","201601302000",5,"knmi"),
("201602081205","201602091800",5,"knmi"),
("201602081305","201602091900",5,"knmi"),
("201603040205","201603050800",5,"knmi"),
("201605220405","201605231000",5,"knmi"),
("201605221505","201605232100",5,"knmi"),
("201605312105","201606020300",5,"knmi"),
("201605312305","201606020500",5,"knmi"),
("201606031605","201606042200",5,"knmi"),
("201607210705","201607221300",5,"knmi"),
("201701120505","201701131100",5,"knmi"),
("201701120805","201701131400",5,"knmi"),
("201701121105","201701131700",5,"knmi"),
("201702212105","201702230300",5,"knmi"),
("201706271405","201706282000",5,"knmi"),
("201707231505","201707242100",5,"knmi"),
("201708100005","201708110600",5,"knmi"),
("201708291205","201708301800",5,"knmi"),
("201708291605","201708302200",5,"knmi"),
("201712080205","201712090800",5,"knmi"),
("201712130805","201712141400",5,"knmi"),
("201712301705","201712312300",5,"knmi"),
("201805310605","201806011200",5,"knmi"),
("201812081205","201812091800",5,"knmi")],
## the methods
"oflow_method" : ["lucaskanade"], # lucaskanade, darts
"adv_method" : ["semilagrangian"], # semilagrangian, eulerian
"nwc_method" : ["extrapolation"],
"noise_method" : [None], # parametric, nonparametric, ssft
"decomp_method" : ["fft"],
## the parameters
"n_ens_members" : [1],
"ar_order" : [2],
"n_cascade_levels" : [8],
"noise_adjustment" : [False],
"conditional" : [False],
"precip_mask" : [True],
"mask_method" : ["sprog"], # obs, incremental, sprog
"prob_matching" : ["mean"],
"num_workers" : [1], # Set the number of processors available for parallel computing
"vel_pert_method" : [None], # No velocity pertubation in order to allow for deterministic run following Seed et al. [2003]
}
# End of initial part
###############################################################################
start_time = time.time()
#### HERE ALL AVAILABLE PROCESSES AT START-UP TIME ARE COLLECTED IN comm
#### SEE FOR MORE INFO ON MPI: https://www.cs.earlham.edu/~lemanal/slides/mpi-slides.pdf
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
logging.info(('I am process rank {}'.format(rank)))
#########################################################
# Open the catchment shapes - They're needed later for the catchment_slice utils
#########################################################
shapes = []
for i in range(0, len(catchment_filenames)):
shape_filename = catchment_filenames[i]
# set file names in order to obtain the reprojected shapefile, which
# was made with the catchment_medata functionality.
dirname = os.path.dirname(shape_filename)
basename = os.path.basename(shape_filename)
basenametxt = os.path.splitext(basename)[0]
shapes_reprojected = os.path.join(dirname, basenametxt+'_Reprojected.shp')
driver = ogr.GetDriverByName('ESRI Shapefile')
shapes.append(driver.Open(shapes_reprojected))
###########
# Set some first functions
###########
## define the callback function to export the nowcast to netcdf
converter = stp.utils.get_method("mm/h")
def export(X_3D):
"""
X_3D 3D forecast consisting of (lead time, h, w)
"""
## Open the array for lead time t and convert to mm/h
X,_ = converter(X_3D, metadata)
# readjust to initial domain shape
X,_ = reshaper(X, metadata, inverse=True)
# Then, slice the array per catchment or not if no catchments are given
if catchments == True:
X_catchment = stp.utils.catchment_slice_mpi(X, shapes)
# Export to netCDF per catchment
for n in range(0, len(catchment_filenames)):
key = list(d.keys())[n]
stp.io.export_forecast_dataset(np.array([X_catchment[n]]), d[key])
else:
# We have to change the 2D array to a 3D array (with just 1 ens member)
X = np.array([X])
# else, export full radar nowcast to netcdf
stp.io.export_forecast_dataset(X, exporter)
X = None
# Conditional parameters
## parameters that can be directly related to other parameters
def cond_pars(pars):
for key in list(pars):
if key == "oflow_method":
if pars[key].lower() == "darts": pars["n_prvs_times"] = 9
else: pars["n_prvs_times"] = 3
elif key.lower() == "n_cascade_levels":
if pars[key] == 1 : pars["bandpass_filter"] = "uniform"
else: pars["bandpass_filter"] = "gaussian"
elif key.lower() == "nwc_method":
if pars[key] == "extrapolation" : pars["n_ens_members"] = 1
return pars
#########
# Make list of parameters (i.e. the different dates - all other parameters are
# the same for every run) and scatter these over the nodes.
#########
# Prepare the list of all parameter sets of the verification
parsets = [[]]
for _, items in experiment.items():
parsets = [parset+[item] for parset in parsets for item in items]
if rank == 0:
#### Reorganize work a bit so we can scatter it
keyfunc = lambda x:x[0] % size
work = itertools.groupby(sorted(enumerate(parsets), key=keyfunc), keyfunc)
#### Expand the work so we get lists of row, col per node
workpernode = [[x[1] for x in val] for (key, val) in work]
else:
workpernode = None
#### NOW DISTRIBUTE THE WORK
workpernode = comm.scatter(workpernode, root=0)
logging.info("Got the following work in process rank {} : {}".format(rank, workpernode))
#### Each node can now do it's own work. The main advantage is that we can do a gather at the end to collect all results.
#### Keep track of all the runs per node in scores
#scores = []
#### before starting any runs, make sure that you know in which folder we run this MPI run routine.
#### Always return to this folder before the next run
#curdir = os.getcwd()
os.chdir('/u/imhof_rn/pysteps-master')
###########
# Run the model in parallel
###########
# Now loop all parameter sets
for n, parset in enumerate(workpernode):
# logging.info("rank %02.f computing scores for parameter set nr %04.f" % (rank, n))
runId = '%s_%04.f' % (out_dir, n)
# Build parameter set
p = {}
for m, key in enumerate(experiment.keys()):
p[key] = parset[m]
## apply conditional parameters
p = cond_pars(p)
## include all remaining parameters
p.update(verification)
p.update(forecast)
# print("************************")
# print("* Parameter set %02d/%02d: *" % (n+1, len(parsets)))
# print("************************")
# pprint.pprint(p)
# If necessary, build path to results
path_to_experiment = os.path.join(cfg.path_outputs, p["experiment_name"])
# subdir with event date
path_to_nwc = os.path.join(path_to_experiment, '-'.join([p["data"][0], p["data"][3]]))
# for key, item in p.items():
# # include only variables that change
# if len(experiment.get(key,[None])) > 1 and key.lower() is not "data":
# path_to_nwc = os.path.join(path_to_nwc, '-'.join([key, str(item)]))
try:
os.makedirs(path_to_nwc)
except OSError:
pass
# **************************************************************************
# NOWCASTING
# **************************************************************************
# Loop forecasts within given event using the prescribed update cycle interval
## import data specifications
ds = cfg.get_specifications(p["data"][3])
if p["v_accu"] is None:
p["v_accu"] = ds.timestep
# Loop forecasts for given event
startdate = datetime.datetime.strptime(p["data"][0], "%Y%m%d%H%M")
enddate = datetime.datetime.strptime(p["data"][1], "%Y%m%d%H%M")
countnwc = 0
while startdate <= enddate:
try:
# filename of the nowcast netcdf. Set name either per catchment or as
# total nowcast for the entire radar image.
if catchments == True:
outfn = []
for n in range(0, len(catchment_names)):
path_to_catchment = os.path.join(path_to_nwc, catchment_names[n])
try:
os.makedirs(path_to_catchment)
Name = os.path.join(path_to_catchment, "%s_nowcast.netcdf" % startdate.strftime("%Y%m%d%H%M"))
outfn.append(Name)
except OSError:
print("Catchment outfile directory does already exist for starttime: %s" % startdate.strftime("%Y%m%d%H%M"))
Name = os.path.join(path_to_catchment, "%s_nowcast.netcdf" % startdate.strftime("%Y%m%d%H%M"))
outfn.append(Name)
else:
outfn = os.path.join(path_to_nwc, "%s_nowcast.netcdf" % startdate.strftime("%Y%m%d%H%M"))
## check if results already exists
if catchments == True:
run_exist = False
if os.path.isfile(outfn[n]):
fid = netCDF4.Dataset(outfn[n], 'r')
if fid.dimensions["time"].size == p["n_lead_times"]:
run_exist = True
if p["overwrite"]:
os.remove(outfn[n])
run_exist = False
else:
os.remove(outfn[n])
else:
run_exist = False
if os.path.isfile(outfn):
fid = netCDF4.Dataset(outfn, 'r')
if fid.dimensions["time"].size == p["n_lead_times"]:
run_exist = True
if p["overwrite"]:
os.remove(outfn)
run_exist = False
else:
os.remove(outfn)
if run_exist:
print("Nowcast %s_nowcast already exists in %s" % (startdate.strftime("%Y%m%d%H%M"),path_to_nwc))
else:
countnwc += 1
print("Computing the nowcast (%02d) ..." % countnwc)
print("Starttime: %s" % startdate.strftime("%Y%m%d%H%M"))
## redirect stdout to log file
logfn = os.path.join(path_to_nwc, "%s_log.txt" % startdate.strftime("%Y%m%d%H%M"))
print("Log: %s" % logfn)
orig_stdout = sys.stdout
f = open(logfn, 'w')
sys.stdout = f
print("*******************")
print("* %s *****" % startdate.strftime("%Y%m%d%H%M"))
print("* Parameter set : *")
# pprint.pprint(p)
print("*******************")
print("--- Start of the run : %s ---" % (datetime.datetime.now()))
## time
t0 = time.time()
# Read inputs
# print("Read the data...")
## find radar field filenames
input_files = stp.io.find_by_date(startdate, ds.root_path, ds.path_fmt, ds.fn_pattern,
ds.fn_ext, ds.timestep, p["n_prvs_times"])
## read radar field files
importer = stp.io.get_method(ds.importer, type="importer")
R, _, metadata = stp.io.read_timeseries(input_files, importer, **ds.importer_kwargs)
metadata0 = metadata.copy()
metadata0["shape"] = R.shape[1:]
# Prepare input files
# print("Prepare the data...")
## if requested, make sure we work with a square domain
reshaper = stp.utils.get_method(p["adjust_domain"])
R, metadata = reshaper(R, metadata)
## if necessary, convert to rain rates [mm/h]
converter = stp.utils.get_method("mm/h")
R, metadata = converter(R, metadata)
## threshold the data
R[R < p["r_threshold"]] = 0.0
metadata["threshold"] = p["r_threshold"]
## convert the data
converter = stp.utils.get_method(p["unit"])
R, metadata = converter(R, metadata)
## transform the data
transformer = stp.utils.get_method(p["transformation"])
R, metadata = transformer(R, metadata)
## set NaN equal to zero
R[~np.isfinite(R)] = metadata["zerovalue"]
# Compute motion field
oflow_method = stp.motion.get_method(p["oflow_method"])
UV = oflow_method(R)
#####
# Perform the nowcast
#####
## initialize netcdf file
incremental = "timestep" if p["nwc_method"].lower() == "steps" else None
if catchments == True:
metadata_new = stp.utils.catchment_metadata_mpi(shapes, metadata0)
d = {}
for n in range(0, len(catchment_filenames)):
d["exporter_{0}".format(n)] = stp.io.initialize_forecast_exporter_netcdf(outfn[n], startdate,
ds.timestep, p["n_lead_times"], metadata_new[n]["shape"],
p["n_ens_members"], metadata_new[n], incremental=incremental)
else:
exporter = stp.io.initialize_forecast_exporter_netcdf(outfn, startdate,
ds.timestep, p["n_lead_times"], metadata0["shape"],
p["n_ens_members"], metadata0, incremental=incremental)
## start the nowcast
nwc_method = stp.nowcasts.get_method(p["nwc_method"])
R_fct = nwc_method(R[-1,:,:], UV, p["n_lead_times"], extrap_method=p["adv_method"])
print(R_fct.shape[0])
export(R_fct)
## save results, either per catchment or in total
if catchments == True:
for n in range(0, len(catchment_filenames)):
key = list(d.keys())[n]
stp.io.close_forecast_file(d[key])
else:
stp.io.close_forecast_file(exporter)
R_fct = None
# save log
print("--- End of the run : %s ---" % (datetime.datetime.now()))
print("--- Total time : %s seconds ---" % (time.time() - t0))
sys.stdout = orig_stdout
f.close()
# next forecast
startdate += datetime.timedelta(minutes = p["data"][2])
except ValueError:
print('ValueError')
# next forecast
startdate += datetime.timedelta(minutes = p["data"][2])
# tr.print_diff()
# scores.append(n)
#### RETURN TO THE CORRECT DIRECTORY, JUST IN CASE SOMETHING WAS CHANGED...
os.chdir('/u/imhof_rn/pysteps-master')
#### Wait here so we can collect all runs
#### Because we distributed the work evenly all processes should be here at approximately the same time
comm.Barrier()
#### Great, we're all here. Now let's gather the scores...
#### Collect values from all the processes in the main root
#scores = comm.gather(scores, root=0)
#logging.debug("Rank {} has scores {}".format(rank, scores))
end_time = time.time()
print('Total process took', (end_time - start_time)/3600.0, 'hours') | []
| []
| [
"PROJ_LIB"
]
| [] | ["PROJ_LIB"] | python | 1 | 0 | |
src/main/java/com/github/rhoar_ci/dashboard/openshift/OpenShiftClientProducer.java | package com.github.rhoar_ci.dashboard.openshift;
import io.fabric8.kubernetes.client.Config;
import io.fabric8.kubernetes.client.ConfigBuilder;
import io.fabric8.openshift.client.DefaultOpenShiftClient;
import io.fabric8.openshift.client.OpenShiftClient;
import org.wildfly.swarm.spi.runtime.annotations.ConfigurationValue;
import javax.annotation.PostConstruct;
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.context.RequestScoped;
import javax.enterprise.inject.Disposes;
import javax.enterprise.inject.Produces;
import javax.inject.Inject;
@ApplicationScoped
public class OpenShiftClientProducer {
@Inject
private MyToken myToken;
@Inject
@ConfigurationValue("dashboard.openshift.url")
private String openshiftUrl;
@PostConstruct
public void init() {
if (openshiftUrl == null) {
// default inside OpenShift
openshiftUrl = "https://" + System.getenv("KUBERNETES_SERVICE_HOST") + ":" + System.getenv("KUBERNETES_SERVICE_PORT");
}
}
@Produces
@RequestScoped
public OpenShiftClient createOpenShiftClient() {
Config config = new ConfigBuilder()
.withMasterUrl(openshiftUrl)
.withOauthToken(myToken.get())
.withTrustCerts(true)
.build();
return new DefaultOpenShiftClient(config);
}
public void closeHttpClient(@Disposes OpenShiftClient openShiftClient) {
openShiftClient.close();
}
}
| [
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
]
| []
| [
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
]
| [] | ["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"] | java | 2 | 0 | |
run_tests.py | from tests.test_adk_remote import RemoteTest
from tests.test_adk_local import LocalTest
import unittest
import os
if __name__ == "__main__":
if os.getenv('ALGORITHMIA_API_KEY', None) == None:
raise Exception("api key not provided, please export your ALGORITHMIA_API_KEY environment variable.")
unittest.main() | []
| []
| [
"ALGORITHMIA_API_KEY"
]
| [] | ["ALGORITHMIA_API_KEY"] | python | 1 | 0 | |
pkg/gardenlet/controller/networkpolicy/hostnameresolver/resolver.go | // Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hostnameresolver
import (
"context"
"net"
"net/url"
"os"
"sort"
"sync"
"time"
"github.com/gardener/gardener/pkg/client/kubernetes"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
type resolver struct {
lock sync.RWMutex
upstreamFQDN string
upstreamPort int32
refreshTicker *time.Ticker
onUpdate func()
log logrus.FieldLogger
addrs []string
// used for testing
lookup lookup
}
type noOpResover struct{}
type lookup interface {
LookupHost(ctx context.Context, host string) (addrs []string, err error)
}
// Provider allows to start and attach callbacks for a specific host
// resolution updates.
type Provider interface {
HasSynced() bool
Start(ctx context.Context)
WithCallback(onUpdate func())
HostResolver
}
// HostResolver is used for getting endpoint subsets with resolved IPs.
type HostResolver interface {
Subset() []corev1.EndpointSubset
}
// NewProvider returns a Provider for a specific host and port with resync
// indicating how often the hostname resolution is happening.
func NewProvider(host string, port string, log logrus.FieldLogger, resync time.Duration) Provider {
return &resolver{
upstreamFQDN: host,
upstreamPort: intstr.Parse(port).IntVal,
refreshTicker: time.NewTicker(resync),
log: log,
lookup: net.DefaultResolver,
}
}
// HasSynced returns true if ip addresses are exposed.
func (l *resolver) HasSynced() bool {
l.lock.Lock()
defer l.lock.Unlock()
return len(l.addrs) > 0
}
// Start waits for stopCtx to be done and resolves the upstream
// hostname every resync period.
// Updates are send if returned hosts are changed.
func (l *resolver) Start(stopCtx context.Context) {
updateFunc := func() {
addresses, err := l.lookup.LookupHost(stopCtx, l.upstreamFQDN)
if err != nil {
l.log.WithField("error", err).Errorln("could not resolve upstream hostname")
return
}
sort.Strings(addresses)
l.lock.Lock()
updated := !equal(addresses, l.addrs)
if updated {
l.addrs = addresses
l.log.WithField("resolvedIPs", l.addrs).Infoln("updated resolved addresses")
if l.onUpdate != nil {
l.onUpdate()
}
}
l.lock.Unlock()
}
// start the update in the beginning
updateFunc()
for {
select {
case <-l.refreshTicker.C:
updateFunc()
case <-stopCtx.Done():
l.refreshTicker.Stop()
l.log.Infoln("stopping periodic hostname resolution")
return
}
}
}
// Subset returns a slice of resolved ip addresses.
func (l *resolver) Subset() []corev1.EndpointSubset {
l.lock.RLock()
defer l.lock.RUnlock()
subset := []corev1.EndpointSubset{}
if len(l.addrs) > 0 {
s := corev1.EndpointSubset{
Ports: []corev1.EndpointPort{
{
Port: l.upstreamPort,
Protocol: corev1.ProtocolTCP,
},
},
Addresses: make([]corev1.EndpointAddress, 0, len(l.addrs)),
}
for _, addr := range l.addrs {
s.Addresses = append(s.Addresses, corev1.EndpointAddress{IP: addr})
}
subset = append(subset, s)
}
return subset
}
// WithCallback calls onUpdate function when resolved IPs are changed.
func (l *resolver) WithCallback(onUpdate func()) {
l.onUpdate = onUpdate
}
// CreateForCluster tries to use the hostname and port from the client to
// create the provider. If that fails, then tries to use the
// KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT environment variable.
// If that fails it fallbacks to NoOpProvider().
func CreateForCluster(client kubernetes.Interface, logger logrus.FieldLogger) (Provider, error) {
u, err := url.Parse(client.RESTConfig().Host)
if err != nil {
return nil, err
}
var (
serverHostname = u.Hostname()
providerLogger = logger.WithField("hostname", serverHostname)
envHostname, envPort = os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
port = "443"
)
if net.ParseIP(serverHostname) == nil {
if p := u.Port(); p != "" {
port = p
}
providerLogger.Infoln("using hostname resolver")
return NewProvider(
serverHostname,
port,
providerLogger,
time.Second*30,
), nil
} else if envHostname != "" &&
envPort != "" &&
net.ParseIP(envHostname) == nil {
providerLogger.Infoln("fallback to environment variable hostname resolver")
return NewProvider(
envHostname,
envPort,
providerLogger,
time.Second*30,
), nil
}
providerLogger.Infoln("using no-op hostname resolver")
return NewNoOpProvider(), nil
}
// NewNoOpProvider returns a no-op Provider.
func NewNoOpProvider() Provider { return &noOpResover{} }
// HasSynced always returns true.
func (*noOpResover) HasSynced() bool { return true }
// Start does nothing.
func (*noOpResover) Start(_ context.Context) {}
// Subset returns an empty slice.
func (*noOpResover) Subset() []corev1.EndpointSubset { return []corev1.EndpointSubset{} }
// WithCallback does nothing.
func (*noOpResover) WithCallback(_ func()) {}
func equal(a, b []string) bool {
if (a == nil) != (b == nil) {
return false
}
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
| [
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
]
| []
| [
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
]
| [] | ["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"] | go | 2 | 0 | |
config/config.go | package config
import (
"encoding/xml"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/projectx13/projectx/xbmc"
"github.com/dustin/go-humanize"
"github.com/op/go-logging"
"github.com/pbnjay/memory"
"github.com/sanity-io/litter"
)
var log = logging.MustGetLogger("config")
var privacyRegex = regexp.MustCompile(`(?i)(pass|password): "(.+?)"`)
const maxMemorySize = 300 * 1024 * 1024
// Configuration ...
type Configuration struct {
DownloadPath string
TorrentsPath string
LibraryPath string
Info *xbmc.AddonInfo
Platform *xbmc.Platform
Language string
TemporaryPath string
ProfilePath string
HomePath string
XbmcPath string
SpoofUserAgent int
KeepDownloading int
KeepFilesPlaying int
KeepFilesFinished int
UseTorrentHistory bool
TorrentHistorySize int
UseFanartTv bool
DisableBgProgress bool
DisableBgProgressPlayback bool
ForceUseTrakt bool
UseCacheSelection bool
UseCacheSearch bool
UseCacheTorrents bool
CacheSearchDuration int
ShowFilesWatched bool
ResultsPerPage int
GreetingEnabled bool
EnableOverlayStatus bool
SilentStreamStart bool
AutoYesEnabled bool
AutoYesTimeout int
ChooseStreamAutoMovie bool
ChooseStreamAutoShow bool
ChooseStreamAutoSearch bool
ForceLinkType bool
UseOriginalTitle bool
UseAnimeEnTitle bool
UseLowestReleaseDate bool
AddSpecials bool
AddEpisodeNumbers bool
ShowUnairedSeasons bool
ShowUnairedEpisodes bool
ShowSeasonsAll bool
ShowSeasonsOrder int
SmartEpisodeStart bool
SmartEpisodeMatch bool
SmartEpisodeChoose bool
LibraryEnabled bool
LibrarySyncEnabled bool
LibrarySyncPlaybackEnabled bool
LibraryUpdate int
StrmLanguage string
LibraryNFOMovies bool
LibraryNFOShows bool
PlaybackPercent int
DownloadStorage int
SkipBurstSearch bool
AutoMemorySize bool
AutoKodiBufferSize bool
AutoAdjustMemorySize bool
AutoMemorySizeStrategy int
MemorySize int
AutoAdjustBufferSize bool
MinCandidateSize int64
MinCandidateShowSize int64
BufferTimeout int
BufferSize int
EndBufferSize int
KodiBufferSize int
UploadRateLimit int
DownloadRateLimit int
AutoloadTorrents bool
AutoloadTorrentsPaused bool
LimitAfterBuffering bool
ConnectionsLimit int
ConnTrackerLimit int
ConnTrackerLimitAuto bool
SessionSave int
SeedForever bool
ShareRatioLimit int
SeedTimeRatioLimit int
SeedTimeLimit int
DisableUpload bool
DisableDHT bool
DisableTCP bool
DisableUTP bool
DisableUPNP bool
EncryptionPolicy int
ListenPortMin int
ListenPortMax int
ListenInterfaces string
ListenAutoDetectIP bool
ListenAutoDetectPort bool
OutgoingInterfaces string
TunedStorage bool
DiskCacheSize int
UseLibtorrentConfig bool
UseLibtorrentLogging bool
UseLibtorrentDeadlines bool
UseLibtorrentPauseResume bool
LibtorrentProfile int
MagnetTrackers int
MagnetResolveTimeout int
Scrobble bool
AutoScrapeEnabled bool
AutoScrapeLibraryEnabled bool
AutoScrapeStrategy int
AutoScrapeStrategyExpect int
AutoScrapePerHours int
AutoScrapeLimitMovies int
AutoScrapeInterval int
TraktClientID string
TraktClientSecret string
TraktUsername string
TraktToken string
TraktRefreshToken string
TraktTokenExpiry int
TraktSyncEnabled bool
TraktSyncPlaybackEnabled bool
TraktSyncFrequencyMin int
TraktSyncCollections bool
TraktSyncWatchlist bool
TraktSyncUserlists bool
TraktSyncPlaybackProgress bool
TraktSyncHidden bool
TraktSyncWatched bool
TraktSyncWatchedSingle bool
TraktSyncWatchedBack bool
TraktSyncAddedMovies bool
TraktSyncAddedMoviesLocation int
TraktSyncAddedMoviesList int
TraktSyncAddedShows bool
TraktSyncAddedShowsLocation int
TraktSyncAddedShowsList int
TraktSyncRemovedMovies bool
TraktSyncRemovedMoviesLocation int
TraktSyncRemovedMoviesList int
TraktSyncRemovedShows bool
TraktSyncRemovedShowsLocation int
TraktSyncRemovedShowsList int
TraktProgressUnaired bool
TraktProgressSort int
TraktProgressDateFormat string
TraktProgressColorDate string
TraktProgressColorShow string
TraktProgressColorEpisode string
TraktProgressColorUnaired string
TraktCalendarsDateFormat string
TraktCalendarsColorDate string
TraktCalendarsColorShow string
TraktCalendarsColorEpisode string
TraktCalendarsColorUnaired string
UpdateFrequency int
UpdateDelay int
UpdateAutoScan bool
PlayResume bool
PlayResumeBack int
StoreResume bool
StoreResumeAction int
TMDBApiKey string
OSDBUser string
OSDBPass string
OSDBLanguage string
OSDBAutoLanguage bool
OSDBAutoLoad bool
OSDBAutoLoadCount int
OSDBAutoLoadDelete bool
OSDBAutoLoadSkipExists bool
OSDBIncludedEnabled bool
OSDBIncludedSkipExists bool
SortingModeMovies int
SortingModeShows int
ResolutionPreferenceMovies int
ResolutionPreferenceShows int
PercentageAdditionalSeeders int
CustomProviderTimeoutEnabled bool
CustomProviderTimeout int
InternalDNSEnabled bool
InternalDNSSkipIPv6 bool
InternalProxyEnabled bool
InternalProxyLogging bool
InternalProxyLoggingBody bool
AntizapretEnabled bool
ProxyURL string
ProxyType int
ProxyEnabled bool
ProxyHost string
ProxyPort int
ProxyLogin string
ProxyPassword string
ProxyUseHTTP bool
ProxyUseTracker bool
ProxyUseDownload bool
CompletedMove bool
CompletedMoviesPath string
CompletedShowsPath string
LocalOnlyClient bool
}
// Addon ...
type Addon struct {
ID string
Name string
Version string
Enabled bool
}
var (
config = &Configuration{}
lock = sync.RWMutex{}
settingsAreSet = false
settingsWarning = ""
proxyTypes = []string{
"Socks4",
"Socks5",
"HTTP",
"HTTPS",
}
)
var (
// Args for cli arguments parsing
Args = struct {
RemoteHost string `help:"remote host, default is '127.0.0.1'"`
RemotePort int `help:"remote port, default is '65221'"`
LocalHost string `help:"local host, default is '0.0.0.0'"`
LocalPort int `help:"local port, default is '65220'"`
}{
RemoteHost: "127.0.0.1",
RemotePort: 65221,
LocalHost: "127.0.0.1",
LocalPort: 65220,
}
)
// Get ...
func Get() *Configuration {
lock.RLock()
defer lock.RUnlock()
return config
}
// Reload ...
func Reload() *Configuration {
log.Info("Reloading configuration...")
// Reloading RPC Hosts
log.Infof("Setting remote address to %s:%d", Args.RemoteHost, Args.RemotePort)
xbmc.XBMCJSONRPCHosts = []string{net.JoinHostPort(Args.RemoteHost, "9090")}
xbmc.XBMCExJSONRPCHosts = []string{net.JoinHostPort(Args.RemoteHost, strconv.Itoa(Args.RemotePort))}
defer func() {
if r := recover(); r != nil {
log.Warningf("Addon settings not properly set, opening settings window: %#v", r)
message := "LOCALIZE[30314]"
if settingsWarning != "" {
message = settingsWarning
}
xbmc.AddonSettings("plugin.video.projectx")
xbmc.Dialog("projectx", message)
waitForSettingsClosed()
// Custom code to say python not to report this error
os.Exit(5)
}
}()
info := xbmc.GetAddonInfo()
if info == nil || info.ID == "" {
log.Warningf("Can't continue because addon info is empty")
settingsWarning = "LOCALIZE[30113]"
panic(settingsWarning)
}
info.Path = xbmc.TranslatePath(info.Path)
info.Profile = xbmc.TranslatePath(info.Profile)
info.Home = xbmc.TranslatePath(info.Home)
info.Xbmc = xbmc.TranslatePath(info.Xbmc)
info.TempPath = filepath.Join(xbmc.TranslatePath("special://temp"), "projectx")
platform := xbmc.GetPlatform()
// If it's Windows and it's installed from Store - we should try to find real path
// and change addon settings accordingly
if platform != nil && strings.ToLower(platform.OS) == "windows" && strings.Contains(info.Xbmc, "XBMCFoundation") {
path := findExistingPath([]string{
filepath.Join(os.Getenv("LOCALAPPDATA"), "/Packages/XBMCFoundation.Kodi_4n2hpmxwrvr6p/LocalCache/Roaming/Kodi/"),
filepath.Join(os.Getenv("APPDATA"), "/kodi/"),
}, "/userdata/addon_data/"+info.ID)
if path != "" {
info.Path = strings.Replace(info.Path, info.Home, "", 1)
info.Profile = strings.Replace(info.Profile, info.Home, "", 1)
info.TempPath = strings.Replace(info.TempPath, info.Home, "", 1)
info.Icon = strings.Replace(info.Icon, info.Home, "", 1)
info.Path = filepath.Join(path, info.Path)
info.Profile = filepath.Join(path, info.Profile)
info.TempPath = filepath.Join(path, info.TempPath)
info.Icon = filepath.Join(path, info.Icon)
info.Home = path
}
}
os.RemoveAll(info.TempPath)
if err := os.MkdirAll(info.TempPath, 0777); err != nil {
log.Infof("Could not create temporary directory: %#v", err)
}
if platform.OS == "android" {
legacyPath := strings.Replace(info.Path, "/storage/emulated/0", "/storage/emulated/legacy", 1)
if _, err := os.Stat(legacyPath); err == nil {
info.Path = legacyPath
info.Profile = strings.Replace(info.Profile, "/storage/emulated/0", "/storage/emulated/legacy", 1)
log.Info("Using /storage/emulated/legacy path.")
}
}
if !PathExists(info.Profile) {
log.Infof("Profile path does not exist, creating it at: %s", info.Profile)
if err := os.MkdirAll(info.Profile, 0777); err != nil {
log.Errorf("Could not create profile directory: %#v", err)
}
}
if !PathExists(filepath.Join(info.Profile, "libtorrent.config")) {
filePath := filepath.Join(info.Profile, "libtorrent.config")
log.Infof("Creating libtorrent.config to further usage at: %s", filePath)
if _, err := os.Create(filePath); err == nil {
os.Chmod(filePath, 0666)
}
}
downloadPath := TranslatePath(xbmc.GetSettingString("download_path"))
libraryPath := TranslatePath(xbmc.GetSettingString("library_path"))
torrentsPath := TranslatePath(xbmc.GetSettingString("torrents_path"))
downloadStorage := xbmc.GetSettingInt("download_storage")
if downloadStorage > 1 {
downloadStorage = 1
}
log.Noticef("Paths translated by Kodi: Download = %s , Library = %s , Torrents = %s , Storage = %d", downloadPath, libraryPath, torrentsPath, downloadStorage)
if downloadStorage != 1 {
if downloadPath == "." {
log.Warningf("Can't continue because download path is empty")
settingsWarning = "LOCALIZE[30113]"
panic(settingsWarning)
} else if err := IsWritablePath(downloadPath); err != nil {
log.Errorf("Cannot write to download location '%s': %#v", downloadPath, err)
settingsWarning = err.Error()
panic(settingsWarning)
}
}
log.Infof("Using download path: %s", downloadPath)
if libraryPath == "." {
log.Errorf("Cannot use library location '%s'", libraryPath)
settingsWarning = "LOCALIZE[30220]"
panic(settingsWarning)
} else if strings.Contains(libraryPath, "projectx_library") {
if err := os.MkdirAll(libraryPath, 0777); err != nil {
log.Errorf("Could not create temporary library directory: %#v", err)
settingsWarning = err.Error()
panic(settingsWarning)
}
}
if err := IsWritablePath(libraryPath); err != nil {
log.Errorf("Cannot write to library location '%s': %#v", libraryPath, err)
settingsWarning = err.Error()
panic(settingsWarning)
}
log.Infof("Using library path: %s", libraryPath)
if torrentsPath == "." {
torrentsPath = filepath.Join(downloadPath, "Torrents")
} else if strings.Contains(torrentsPath, "projectx_torrents") {
if err := os.MkdirAll(torrentsPath, 0777); err != nil {
log.Errorf("Could not create temporary torrents directory: %#v", err)
settingsWarning = err.Error()
panic(settingsWarning)
}
}
if err := IsWritablePath(torrentsPath); err != nil {
log.Errorf("Cannot write to location '%s': %#v", torrentsPath, err)
settingsWarning = err.Error()
panic(settingsWarning)
}
log.Infof("Using torrents path: %s", torrentsPath)
xbmcSettings := xbmc.GetAllSettings()
settings := make(map[string]interface{})
for _, setting := range xbmcSettings {
switch setting.Type {
case "enum":
fallthrough
case "number":
value, _ := strconv.Atoi(setting.Value)
settings[setting.Key] = value
case "slider":
var valueInt int
var valueFloat float32
switch setting.Option {
case "percent":
fallthrough
case "int":
floated, _ := strconv.ParseFloat(setting.Value, 32)
valueInt = int(floated)
case "float":
floated, _ := strconv.ParseFloat(setting.Value, 32)
valueFloat = float32(floated)
}
if valueFloat > 0 {
settings[setting.Key] = valueFloat
} else {
settings[setting.Key] = valueInt
}
case "bool":
settings[setting.Key] = (setting.Value == "true")
default:
settings[setting.Key] = setting.Value
}
}
newConfig := Configuration{
DownloadPath: downloadPath,
LibraryPath: libraryPath,
TorrentsPath: torrentsPath,
Info: info,
Platform: platform,
Language: xbmc.GetLanguageISO639_1(),
TemporaryPath: info.TempPath,
ProfilePath: info.Profile,
HomePath: info.Home,
XbmcPath: info.Xbmc,
DownloadStorage: settings["download_storage"].(int),
SkipBurstSearch: settings["skip_burst_search"].(bool),
AutoMemorySize: settings["auto_memory_size"].(bool),
AutoAdjustMemorySize: settings["auto_adjust_memory_size"].(bool),
AutoMemorySizeStrategy: settings["auto_memory_size_strategy"].(int),
MemorySize: settings["memory_size"].(int) * 1024 * 1024,
AutoKodiBufferSize: settings["auto_kodi_buffer_size"].(bool),
AutoAdjustBufferSize: settings["auto_adjust_buffer_size"].(bool),
MinCandidateSize: int64(settings["min_candidate_size"].(int) * 1024 * 1024),
MinCandidateShowSize: int64(settings["min_candidate_show_size"].(int) * 1024 * 1024),
BufferTimeout: settings["buffer_timeout"].(int),
BufferSize: settings["buffer_size"].(int) * 1024 * 1024,
EndBufferSize: settings["end_buffer_size"].(int) * 1024 * 1024,
UploadRateLimit: settings["max_upload_rate"].(int) * 1024,
DownloadRateLimit: settings["max_download_rate"].(int) * 1024,
AutoloadTorrents: settings["autoload_torrents"].(bool),
AutoloadTorrentsPaused: settings["autoload_torrents_paused"].(bool),
SpoofUserAgent: settings["spoof_user_agent"].(int),
LimitAfterBuffering: settings["limit_after_buffering"].(bool),
KeepDownloading: settings["keep_downloading"].(int),
KeepFilesPlaying: settings["keep_files_playing"].(int),
KeepFilesFinished: settings["keep_files_finished"].(int),
UseTorrentHistory: settings["use_torrent_history"].(bool),
TorrentHistorySize: settings["torrent_history_size"].(int),
UseFanartTv: settings["use_fanart_tv"].(bool),
DisableBgProgress: settings["disable_bg_progress"].(bool),
DisableBgProgressPlayback: settings["disable_bg_progress_playback"].(bool),
ForceUseTrakt: settings["force_use_trakt"].(bool),
UseCacheSelection: settings["use_cache_selection"].(bool),
UseCacheSearch: settings["use_cache_search"].(bool),
UseCacheTorrents: settings["use_cache_torrents"].(bool),
CacheSearchDuration: settings["cache_search_duration"].(int),
ResultsPerPage: settings["results_per_page"].(int),
ShowFilesWatched: settings["show_files_watched"].(bool),
GreetingEnabled: settings["greeting_enabled"].(bool),
EnableOverlayStatus: settings["enable_overlay_status"].(bool),
SilentStreamStart: settings["silent_stream_start"].(bool),
AutoYesEnabled: settings["autoyes_enabled"].(bool),
AutoYesTimeout: settings["autoyes_timeout"].(int),
ChooseStreamAutoMovie: settings["choose_stream_auto_movie"].(bool),
ChooseStreamAutoShow: settings["choose_stream_auto_show"].(bool),
ChooseStreamAutoSearch: settings["choose_stream_auto_search"].(bool),
ForceLinkType: settings["force_link_type"].(bool),
UseOriginalTitle: settings["use_original_title"].(bool),
UseAnimeEnTitle: settings["use_anime_en_title"].(bool),
UseLowestReleaseDate: settings["use_lowest_release_date"].(bool),
AddSpecials: settings["add_specials"].(bool),
AddEpisodeNumbers: settings["add_episode_numbers"].(bool),
ShowUnairedSeasons: settings["unaired_seasons"].(bool),
ShowUnairedEpisodes: settings["unaired_episodes"].(bool),
ShowSeasonsAll: settings["seasons_all"].(bool),
ShowSeasonsOrder: settings["seasons_order"].(int),
PlaybackPercent: settings["playback_percent"].(int),
SmartEpisodeStart: settings["smart_episode_start"].(bool),
SmartEpisodeMatch: settings["smart_episode_match"].(bool),
SmartEpisodeChoose: settings["smart_episode_choose"].(bool),
LibraryEnabled: settings["library_enabled"].(bool),
LibrarySyncEnabled: settings["library_sync_enabled"].(bool),
LibrarySyncPlaybackEnabled: settings["library_sync_playback_enabled"].(bool),
LibraryUpdate: settings["library_update"].(int),
StrmLanguage: settings["strm_language"].(string),
LibraryNFOMovies: settings["library_nfo_movies"].(bool),
LibraryNFOShows: settings["library_nfo_shows"].(bool),
SeedForever: settings["seed_forever"].(bool),
ShareRatioLimit: settings["share_ratio_limit"].(int),
SeedTimeRatioLimit: settings["seed_time_ratio_limit"].(int),
SeedTimeLimit: settings["seed_time_limit"].(int) * 3600,
DisableUpload: settings["disable_upload"].(bool),
DisableDHT: settings["disable_dht"].(bool),
DisableTCP: settings["disable_tcp"].(bool),
DisableUTP: settings["disable_utp"].(bool),
DisableUPNP: settings["disable_upnp"].(bool),
EncryptionPolicy: settings["encryption_policy"].(int),
ListenPortMin: settings["listen_port_min"].(int),
ListenPortMax: settings["listen_port_max"].(int),
ListenInterfaces: settings["listen_interfaces"].(string),
ListenAutoDetectIP: settings["listen_autodetect_ip"].(bool),
ListenAutoDetectPort: settings["listen_autodetect_port"].(bool),
OutgoingInterfaces: settings["outgoing_interfaces"].(string),
TunedStorage: settings["tuned_storage"].(bool),
DiskCacheSize: settings["disk_cache_size"].(int) * 1024 * 1024,
UseLibtorrentConfig: settings["use_libtorrent_config"].(bool),
UseLibtorrentLogging: settings["use_libtorrent_logging"].(bool),
UseLibtorrentDeadlines: settings["use_libtorrent_deadline"].(bool),
UseLibtorrentPauseResume: settings["use_libtorrent_pauseresume"].(bool),
LibtorrentProfile: settings["libtorrent_profile"].(int),
MagnetTrackers: settings["magnet_trackers"].(int),
MagnetResolveTimeout: settings["magnet_resolve_timeout"].(int),
ConnectionsLimit: settings["connections_limit"].(int),
ConnTrackerLimit: settings["conntracker_limit"].(int),
ConnTrackerLimitAuto: settings["conntracker_limit_auto"].(bool),
SessionSave: settings["session_save"].(int),
Scrobble: settings["trakt_scrobble"].(bool),
AutoScrapeEnabled: settings["autoscrape_is_enabled"].(bool),
AutoScrapeLibraryEnabled: settings["autoscrape_library_enabled"].(bool),
AutoScrapeStrategy: settings["autoscrape_strategy"].(int),
AutoScrapeStrategyExpect: settings["autoscrape_strategy_expect"].(int),
AutoScrapePerHours: settings["autoscrape_per_hours"].(int),
AutoScrapeLimitMovies: settings["autoscrape_limit_movies"].(int),
AutoScrapeInterval: settings["autoscrape_interval"].(int),
TraktClientID: settings["trakt_client_id"].(string),
TraktClientSecret: settings["trakt_client_secret"].(string),
TraktUsername: settings["trakt_username"].(string),
TraktToken: settings["trakt_token"].(string),
TraktRefreshToken: settings["trakt_refresh_token"].(string),
TraktTokenExpiry: settings["trakt_token_expiry"].(int),
TraktSyncEnabled: settings["trakt_sync_enabled"].(bool),
TraktSyncPlaybackEnabled: settings["trakt_sync_playback_enabled"].(bool),
TraktSyncFrequencyMin: settings["trakt_sync_frequency_min"].(int),
TraktSyncCollections: settings["trakt_sync_collections"].(bool),
TraktSyncWatchlist: settings["trakt_sync_watchlist"].(bool),
TraktSyncUserlists: settings["trakt_sync_userlists"].(bool),
TraktSyncPlaybackProgress: settings["trakt_sync_playback_progress"].(bool),
TraktSyncHidden: settings["trakt_sync_hidden"].(bool),
TraktSyncWatched: settings["trakt_sync_watched"].(bool),
TraktSyncWatchedSingle: settings["trakt_sync_watched_single"].(bool),
TraktSyncWatchedBack: settings["trakt_sync_watchedback"].(bool),
TraktSyncAddedMovies: settings["trakt_sync_added_movies"].(bool),
TraktSyncAddedMoviesLocation: settings["trakt_sync_added_movies_location"].(int),
TraktSyncAddedMoviesList: settings["trakt_sync_added_movies_list"].(int),
TraktSyncAddedShows: settings["trakt_sync_added_shows"].(bool),
TraktSyncAddedShowsLocation: settings["trakt_sync_added_shows_location"].(int),
TraktSyncAddedShowsList: settings["trakt_sync_added_shows_list"].(int),
TraktSyncRemovedMovies: settings["trakt_sync_removed_movies"].(bool),
TraktSyncRemovedMoviesLocation: settings["trakt_sync_removed_movies_location"].(int),
TraktSyncRemovedMoviesList: settings["trakt_sync_removed_movies_list"].(int),
TraktSyncRemovedShows: settings["trakt_sync_removed_shows"].(bool),
TraktSyncRemovedShowsLocation: settings["trakt_sync_removed_shows_location"].(int),
TraktSyncRemovedShowsList: settings["trakt_sync_removed_shows_list"].(int),
TraktProgressUnaired: settings["trakt_progress_unaired"].(bool),
TraktProgressSort: settings["trakt_progress_sort"].(int),
TraktProgressDateFormat: settings["trakt_progress_date_format"].(string),
TraktProgressColorDate: settings["trakt_progress_color_date"].(string),
TraktProgressColorShow: settings["trakt_progress_color_show"].(string),
TraktProgressColorEpisode: settings["trakt_progress_color_episode"].(string),
TraktProgressColorUnaired: settings["trakt_progress_color_unaired"].(string),
TraktCalendarsDateFormat: settings["trakt_calendars_date_format"].(string),
TraktCalendarsColorDate: settings["trakt_calendars_color_date"].(string),
TraktCalendarsColorShow: settings["trakt_calendars_color_show"].(string),
TraktCalendarsColorEpisode: settings["trakt_calendars_color_episode"].(string),
TraktCalendarsColorUnaired: settings["trakt_calendars_color_unaired"].(string),
UpdateFrequency: settings["library_update_frequency"].(int),
UpdateDelay: settings["library_update_delay"].(int),
UpdateAutoScan: settings["library_auto_scan"].(bool),
PlayResume: settings["play_resume"].(bool),
PlayResumeBack: settings["play_resume_back"].(int),
StoreResume: settings["store_resume"].(bool),
StoreResumeAction: settings["store_resume_action"].(int),
TMDBApiKey: settings["tmdb_api_key"].(string),
OSDBUser: settings["osdb_user"].(string),
OSDBPass: settings["osdb_pass"].(string),
OSDBLanguage: settings["osdb_language"].(string),
OSDBAutoLanguage: settings["osdb_auto_language"].(bool),
OSDBAutoLoad: settings["osdb_auto_load"].(bool),
OSDBAutoLoadCount: settings["osdb_auto_load_count"].(int),
OSDBAutoLoadDelete: settings["osdb_auto_load_delete"].(bool),
OSDBAutoLoadSkipExists: settings["osdb_auto_load_skipexists"].(bool),
OSDBIncludedEnabled: settings["osdb_included_enabled"].(bool),
OSDBIncludedSkipExists: settings["osdb_included_skipexists"].(bool),
SortingModeMovies: settings["sorting_mode_movies"].(int),
SortingModeShows: settings["sorting_mode_shows"].(int),
ResolutionPreferenceMovies: settings["resolution_preference_movies"].(int),
ResolutionPreferenceShows: settings["resolution_preference_shows"].(int),
PercentageAdditionalSeeders: settings["percentage_additional_seeders"].(int),
CustomProviderTimeoutEnabled: settings["custom_provider_timeout_enabled"].(bool),
CustomProviderTimeout: settings["custom_provider_timeout"].(int),
InternalDNSEnabled: settings["internal_dns_enabled"].(bool),
InternalDNSSkipIPv6: settings["internal_dns_skip_ipv6"].(bool),
InternalProxyEnabled: settings["internal_proxy_enabled"].(bool),
InternalProxyLogging: settings["internal_proxy_logging"].(bool),
InternalProxyLoggingBody: settings["internal_proxy_logging_body"].(bool),
AntizapretEnabled: settings["antizapret_enabled"].(bool),
ProxyType: settings["proxy_type"].(int),
ProxyEnabled: settings["proxy_enabled"].(bool),
ProxyHost: settings["proxy_host"].(string),
ProxyPort: settings["proxy_port"].(int),
ProxyLogin: settings["proxy_login"].(string),
ProxyPassword: settings["proxy_password"].(string),
ProxyUseHTTP: settings["use_proxy_http"].(bool),
ProxyUseTracker: settings["use_proxy_tracker"].(bool),
ProxyUseDownload: settings["use_proxy_download"].(bool),
CompletedMove: settings["completed_move"].(bool),
CompletedMoviesPath: settings["completed_movies_path"].(string),
CompletedShowsPath: settings["completed_shows_path"].(string),
LocalOnlyClient: settings["local_only_client"].(bool),
}
if newConfig.TraktClientID == "" {
newConfig.TraktClientID = "f37e372ec0fb7331c808a613b025ea175b771afee972c13b19ff9d7e583532bd"
}
if newConfig.TraktClientSecret == "" {
newConfig.TraktClientSecret = "4897369f643ce492ffac809cee110ff3563ef9e92e110b84c6cbeb771b583bba"
}
// Fallback for old configuration with additional storage variants
if newConfig.DownloadStorage > 1 {
newConfig.DownloadStorage = 1
}
// For memory storage we are changing configuration
// to stop downloading after playback has stopped and so on
if newConfig.DownloadStorage == 1 {
// TODO: Do we need this?
// newConfig.SeedTimeLimit = 24 * 60 * 60
// newConfig.SeedTimeRatioLimit = 10000
// newConfig.ShareRatioLimit = 10000
// Calculate possible memory size, depending of selected strategy
if newConfig.AutoMemorySize {
if newConfig.AutoMemorySizeStrategy == 0 {
newConfig.MemorySize = 40 * 1024 * 1024
} else {
pct := uint64(8)
if newConfig.AutoMemorySizeStrategy == 2 {
pct = 15
}
mem := memory.TotalMemory() / 100 * pct
if mem > 0 {
newConfig.MemorySize = int(mem)
}
log.Debugf("Total system memory: %s\n", humanize.Bytes(memory.TotalMemory()))
log.Debugf("Automatically selected memory size: %s\n", humanize.Bytes(uint64(newConfig.MemorySize)))
if newConfig.MemorySize > maxMemorySize {
log.Debugf("Selected memory size (%s) is bigger than maximum for auto-select (%s), so we decrease memory size to maximum allowed: %s", humanize.Bytes(uint64(mem)), humanize.Bytes(uint64(maxMemorySize)), humanize.Bytes(uint64(maxMemorySize)))
newConfig.MemorySize = maxMemorySize
}
}
}
}
// Set default Trakt Frequency
if newConfig.TraktToken != "" && newConfig.TraktSyncFrequencyMin == 0 {
newConfig.TraktSyncFrequencyMin = 5
}
// Setup OSDB language
if newConfig.OSDBAutoLanguage || newConfig.OSDBLanguage == "" {
newConfig.OSDBLanguage = newConfig.Language
}
// Collect proxy settings
if newConfig.ProxyEnabled && newConfig.ProxyHost != "" {
newConfig.ProxyURL = proxyTypes[newConfig.ProxyType] + "://"
if newConfig.ProxyLogin != "" || newConfig.ProxyPassword != "" {
newConfig.ProxyURL += newConfig.ProxyLogin + ":" + newConfig.ProxyPassword + "@"
}
newConfig.ProxyURL += newConfig.ProxyHost + ":" + strconv.Itoa(newConfig.ProxyPort)
}
// Reading Kodi's advancedsettings file for MemorySize variable to avoid waiting for playback
// after projectx's buffer is finished.
newConfig.KodiBufferSize = getKodiBufferSize()
if newConfig.AutoKodiBufferSize && newConfig.KodiBufferSize > newConfig.BufferSize {
newConfig.BufferSize = newConfig.KodiBufferSize
log.Debugf("Adjusting buffer size according to Kodi advancedsettings.xml configuration to %s", humanize.Bytes(uint64(newConfig.BufferSize)))
}
if newConfig.EndBufferSize < 1*1024*1024 {
newConfig.EndBufferSize = 1 * 1024 * 1024
}
// Read Strm Language settings and cut-off ISO value
if strings.Contains(newConfig.StrmLanguage, " | ") {
tokens := strings.Split(newConfig.StrmLanguage, " | ")
if len(tokens) == 2 {
newConfig.StrmLanguage = tokens[1]
} else {
newConfig.StrmLanguage = newConfig.Language
}
} else {
newConfig.StrmLanguage = newConfig.Language
}
if newConfig.SessionSave == 0 {
newConfig.SessionSave = 10
}
if newConfig.DiskCacheSize == 0 {
newConfig.DiskCacheSize = 12 * 1024 * 1024
}
if newConfig.AutoYesEnabled {
xbmc.DialogAutoclose = newConfig.AutoYesTimeout
} else {
xbmc.DialogAutoclose = 1200
}
lock.Lock()
config = &newConfig
lock.Unlock()
go CheckBurst()
// Replacing passwords with asterisks
configOutput := litter.Sdump(config)
configOutput = privacyRegex.ReplaceAllString(configOutput, `$1: "********"`)
log.Debugf("Using configuration: %s", configOutput)
return config
}
// AddonIcon ...
func AddonIcon() string {
return filepath.Join(Get().Info.Path, "icon.png")
}
// AddonResource ...
func AddonResource(args ...string) string {
return filepath.Join(Get().Info.Path, "resources", filepath.Join(args...))
}
// TranslatePath ...
func TranslatePath(path string) string {
// Special case for temporary path in Kodi
if strings.HasPrefix(path, "special://temp/") {
dir := strings.Replace(path, "special://temp/", "", 1)
kodiDir := xbmc.TranslatePath("special://temp")
pathDir := filepath.Join(kodiDir, dir)
if PathExists(pathDir) {
return pathDir
}
if err := os.MkdirAll(pathDir, 0777); err != nil {
log.Errorf("Could not create temporary directory: %#v", err)
return path
}
return pathDir
}
// Do not translate nfs/smb path
// if strings.HasPrefix(path, "nfs:") || strings.HasPrefix(path, "smb:") {
// if !strings.HasSuffix(path, "/") {
// path += "/"
// }
// return path
// }
return filepath.Dir(xbmc.TranslatePath(path))
}
// PathExists returns whether path exists in OS
func PathExists(path string) bool {
if _, err := os.Stat(path); os.IsNotExist(err) {
return false
}
return true
}
// IsWritablePath ...
func IsWritablePath(path string) error {
if path == "." {
return errors.New("Path not set")
}
// TODO: Review this after test evidences come
if strings.HasPrefix(path, "nfs") || strings.HasPrefix(path, "smb") {
return fmt.Errorf("Network paths are not supported, change %s to a locally mounted path by the OS", path)
}
if p, err := os.Stat(path); err != nil || !p.IsDir() {
if err != nil {
return err
}
return fmt.Errorf("%s is not a valid directory", path)
}
writableFile := filepath.Join(path, ".writable")
writable, err := os.Create(writableFile)
if err != nil {
return err
}
writable.Close()
os.Remove(writableFile)
return nil
}
func waitForSettingsClosed() {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if !xbmc.AddonSettingsOpened() {
return
}
}
}
}
// CheckBurst ...
func CheckBurst() {
// Check for enabled providers and projectx Burst
for _, addon := range xbmc.GetAddons("xbmc.python.script", "executable", "all", []string{"name", "version", "enabled"}).Addons {
if strings.HasPrefix(addon.ID, "script.projectx.") {
if addon.Enabled == true {
return
}
}
}
time.Sleep(5 * time.Second)
log.Info("Updating Kodi add-on repositories for Burst...")
xbmc.UpdateLocalAddons()
xbmc.UpdateAddonRepos()
if !Get().SkipBurstSearch && xbmc.DialogConfirmFocused("projectx", "LOCALIZE[30271]") {
log.Infof("Triggering Kodi to check for script.projectx.burst plugin")
xbmc.PlayURL("plugin://script.projectx.burst/")
time.Sleep(15 * time.Second)
log.Infof("Checking for existence of script.projectx.burst plugin now")
if xbmc.IsAddonInstalled("script.projectx.burst") {
xbmc.SetAddonEnabled("script.projectx.burst", true)
xbmc.Notify("projectx", "LOCALIZE[30272]", AddonIcon())
} else {
xbmc.Dialog("projectx", "LOCALIZE[30273]")
}
}
}
func findExistingPath(paths []string, addon string) string {
// We add plugin folder to avoid getting dummy path, we should take care only for real folder
for _, v := range paths {
p := filepath.Join(v, addon)
if _, err := os.Stat(p); err != nil {
continue
}
return v
}
return ""
}
func getKodiBufferSize() int {
xmlFile, err := os.Open(filepath.Join(xbmc.TranslatePath("special://userdata"), "advancedsettings.xml"))
if err != nil {
return 0
}
defer xmlFile.Close()
b, _ := ioutil.ReadAll(xmlFile)
var as *xbmc.AdvancedSettings
if err = xml.Unmarshal(b, &as); err != nil {
return 0
}
if as.Cache.MemorySizeLegacy > 0 {
return as.Cache.MemorySizeLegacy
} else if as.Cache.MemorySize > 0 {
return as.Cache.MemorySize
}
return 0
}
| [
"\"LOCALAPPDATA\"",
"\"APPDATA\""
]
| []
| [
"APPDATA",
"LOCALAPPDATA"
]
| [] | ["APPDATA", "LOCALAPPDATA"] | go | 2 | 0 | |
worker/src/main/java/io/openshift/booster/messaging/Worker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.openshift.booster.messaging;
import io.vertx.core.Future;
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
import io.vertx.proton.ProtonClient;
import io.vertx.proton.ProtonConnection;
import io.vertx.proton.ProtonReceiver;
import io.vertx.proton.ProtonSender;
import io.vertx.reactivex.config.ConfigRetriever;
import io.vertx.reactivex.core.AbstractVerticle;
import org.apache.qpid.proton.amqp.messaging.AmqpValue;
import org.apache.qpid.proton.amqp.messaging.ApplicationProperties;
import org.apache.qpid.proton.message.Message;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
public class Worker extends AbstractVerticle {
private static final Logger LOGGER = LoggerFactory.getLogger(Worker.class);
private static final String ID = "worker-vertx-" + UUID.randomUUID()
.toString().substring(0, 4);
private static final String AMQ_LOCATION_KEY =
System.getenv().getOrDefault("AMQ_LOCATION_KEY", "burrUnknown");
private static final AtomicInteger requestsProcessed = new AtomicInteger(0);
private static final AtomicInteger processingErrors = new AtomicInteger(0);
@Override
public void start(Future<Void> future) {
ConfigRetriever.create(vertx).rxGetConfig()
.doOnSuccess(json -> {
String amqpHost = json.getString("MESSAGING_SERVICE_HOST", "localhost");
int amqpPort = json.getInteger("MESSAGING_SERVICE_PORT", 5672);
String amqpUser = json.getString("MESSAGING_SERVICE_USER", "work-queue");
String amqpPassword = json.getString("MESSAGING_SERVICE_PASSWORD", "work-queue");
ProtonClient client = ProtonClient.create(vertx.getDelegate());
client.connect(amqpHost, amqpPort, amqpUser, amqpPassword, result -> {
if (result.failed()) {
future.fail(result.cause());
} else {
ProtonConnection conn = result.result();
conn.setContainer(ID);
conn.open();
receiveRequests(conn);
sendUpdates(conn);
future.complete();
}
});
}).flatMap(x -> vertx.createHttpServer().requestHandler(req -> req.response().end("Ready")).rxListen(8080))
.subscribe();
}
private void receiveRequests(ProtonConnection conn) {
// Ordinarily, a sender or receiver is tied to a named message
// source or target. By contrast, a null sender transmits
// messages using an "anonymous" link and routes them to their
// destination using the "to" property of the message.
ProtonSender sender = conn.createSender(null);
ProtonReceiver receiver = conn.createReceiver("work-requests");
receiver.handler((delivery, request) -> {
LOGGER.info("{0}: Receiving request {1}", ID, request);
String responseBody;
try {
responseBody = processRequest(request);
} catch (Exception e) {
LOGGER.error("{0}: Failed processing message: {1}", ID, e.getMessage());
processingErrors.incrementAndGet();
return;
}
Map<String, Object> props = new HashMap<>();
props.put("workerId", conn.getContainer());
props.put("AMQ_LOCATION_KEY",AMQ_LOCATION_KEY);
Message response = Message.Factory.create();
response.setAddress(request.getReplyTo());
response.setCorrelationId(request.getMessageId());
response.setBody(new AmqpValue(responseBody));
response.setApplicationProperties(new ApplicationProperties(props));
sender.send(response);
requestsProcessed.incrementAndGet();
LOGGER.info("{0}: Sent {1}", ID, response);
});
sender.open();
receiver.open();
}
private String processRequest(Message request) {
Map props = request.getApplicationProperties().getValue();
boolean uppercase = (boolean) props.get("uppercase");
boolean reverse = (boolean) props.get("reverse");
String text = (String) ((AmqpValue) request.getBody()).getValue();
if (uppercase) {
text = text.toUpperCase();
}
if (reverse) {
text = new StringBuilder(text).reverse().toString();
}
return "Ola " + text;
// return text;
}
private void sendUpdates(ProtonConnection conn) {
ProtonSender sender = conn.createSender("worker-updates");
vertx.setPeriodic(5000, timer -> {
if (conn.isDisconnected()) {
vertx.cancelTimer(timer);
return;
}
if (sender.sendQueueFull()) {
return;
}
LOGGER.debug("{0}: Sending status update", ID);
Map<String, Object> properties = new HashMap<>();
properties.put("workerId", conn.getContainer());
properties.put("AMQ_LOCATION_KEY",AMQ_LOCATION_KEY);
properties.put("timestamp", System.currentTimeMillis());
properties.put("requestsProcessed", (long) requestsProcessed.get());
properties.put("processingErrors", (long) processingErrors.get());
Message message = Message.Factory.create();
message.setApplicationProperties(new ApplicationProperties(properties));
sender.send(message);
});
sender.open();
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
pypsi/os/unix.py | #
# Copyright (c) 2015, Adam Meily <[email protected]>
# Pypsi - https://github.com/ameily/pypsi
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
'''
Unix (Cygwin, Linux, etc) specific functions
'''
import os
__all__ = [
'find_bins_in_path',
'is_path_prefix',
'make_ansi_stream',
'UnixAnsiStream'
]
class UnixAnsiStream(object):
def __init__(self, stream, width=None, isatty=None):
self._stream = stream
self.width = width
self._isatty = isatty
def isatty(self):
return self._stream.isatty() if self._isatty is None else self._isatty
def __getattr__(self, attr):
return getattr(self._stream, attr)
def __eq__(self, other):
if isinstance(other, UnixAnsiStream):
return self._stream == other.stream
return self._stream == other
def make_ansi_stream(stream, **kwargs):
'''
Create an ANSI-code compatible file stream. Unix file streams support ANSI
escape codes, so we don't need to do anything special.
'''
if isinstance(stream, UnixAnsiStream):
return stream
return UnixAnsiStream(stream, **kwargs)
def find_bins_in_path():
bins = set()
paths = [x for x in os.environ.get('PATH', '').split(':') if x.strip()]
paths.append('./')
for path in paths:
path = path or './'
try:
for entry in os.listdir(path):
p = os.path.join(path, entry)
if os.path.isfile(p) and os.access(p, os.X_OK):
bins.add(entry)
except:
# bare except here because if this fails, tab completion can be entirely broken
pass
return bins
def is_path_prefix(t):
for prefix in ('./', '../', '/'):
if t.startswith(prefix):
return True
return False
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
client/src/main/java/org/mvndaemon/mvnd/client/DefaultClient.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mvndaemon.mvnd.client;
import java.io.IOException;
import java.io.PrintWriter;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.FileTime;
import java.time.Duration;
import java.time.Instant;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import org.fusesource.jansi.Ansi;
import org.jline.utils.AttributedString;
import org.jline.utils.AttributedStyle;
import org.mvndaemon.mvnd.common.BuildProperties;
import org.mvndaemon.mvnd.common.DaemonException;
import org.mvndaemon.mvnd.common.DaemonInfo;
import org.mvndaemon.mvnd.common.DaemonRegistry;
import org.mvndaemon.mvnd.common.Environment;
import org.mvndaemon.mvnd.common.Message;
import org.mvndaemon.mvnd.common.Message.BuildException;
import org.mvndaemon.mvnd.common.OsUtils;
import org.mvndaemon.mvnd.common.TimeUtils;
import org.mvndaemon.mvnd.common.logging.ClientOutput;
import org.mvndaemon.mvnd.common.logging.TerminalOutput;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.mvndaemon.mvnd.client.DaemonParameters.LOG_EXTENSION;
public class DefaultClient implements Client {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultClient.class);
private final DaemonParameters parameters;
public static void main(String[] argv) throws Exception {
final List<String> args = new ArrayList<>(argv.length);
Path logFile = null;
int i = 0;
boolean batchMode = false;
while (i < argv.length) {
final String arg = argv[i++];
if ("-l".equals(arg) || "--log-file".equals(arg)) {
if (i < argv.length) {
logFile = Paths.get(argv[i++]);
} else {
throw new IllegalArgumentException("-l and --log-file need to be followed by a path");
}
} else {
if (!batchMode && ("-B".equals(arg) || "--batch-mode".equals(arg))) {
batchMode = true;
}
args.add(arg);
}
}
DaemonParameters parameters = new DaemonParameters();
try (TerminalOutput output = new TerminalOutput(batchMode || parameters.noBuffering(), parameters.rollingWindowSize(),
logFile)) {
try {
new DefaultClient(parameters).execute(output, args);
} catch (DaemonException.InterruptedException e) {
final AttributedStyle s = new AttributedStyle().bold().foreground(AttributedStyle.RED);
String str = new AttributedString(System.lineSeparator() + "Canceled by user", s).toAnsi();
output.accept(Message.display(str));
}
}
}
public DefaultClient(DaemonParameters parameters) {
this.parameters = parameters;
}
@Override
public ExecutionResult execute(ClientOutput output, List<String> argv) {
LOGGER.debug("Starting client");
final List<String> args = new ArrayList<>(argv.size());
boolean version = false;
boolean showVersion = false;
boolean debug = false;
for (String arg : argv) {
switch (arg) {
case "-v":
case "-version":
case "--version":
version = true;
args.add(arg);
break;
case "-V":
case "--show-version":
showVersion = true;
args.add(arg);
break;
case "-X":
case "--debug":
debug = true;
args.add(arg);
break;
default:
if (arg.startsWith("-D")) {
final int eqPos = arg.indexOf('=');
if (eqPos >= 0) {
System.setProperty(arg.substring(2, eqPos), arg.substring(eqPos + 1));
} else {
System.setProperty(arg.substring(2), "");
}
}
args.add(arg);
break;
}
}
// Print version if needed
if (version || showVersion || debug) {
// Print mvnd version
BuildProperties buildProperties = BuildProperties.getInstance();
final String nativeSuffix = Environment.isNative() ? " (native)" : "";
final String v = Ansi.ansi().bold().a(
"Maven Daemon "
+ buildProperties.getVersion()
+ "-" + buildProperties.getOsName()
+ "-" + buildProperties.getOsArch()
+ nativeSuffix)
.reset().toString();
output.accept(Message.log(v));
// Print terminal information
output.describeTerminal();
/*
* Do not return, rather pass -v to the server so that the client module does not need to depend on any
* Maven artifacts
*/
}
try (DaemonRegistry registry = new DaemonRegistry(parameters.registry())) {
boolean status = args.remove("--status");
if (status) {
final String template = " %36s %7s %5s %7s %5s %23s %s";
output.accept(Message.log(String.format(template,
"UUID", "PID", "Port", "Status", "RSS", "Last activity", "Java home")));
for (DaemonInfo d : registry.getAll()) {
if (ProcessHandle.of(d.getPid()).isEmpty()) {
/* The process does not exist anymore - remove it from the registry */
registry.remove(d.getUid());
} else {
output.accept(Message.log(String.format(template,
d.getUid(), d.getPid(), d.getAddress(), d.getState(),
OsUtils.kbTohumanReadable(OsUtils.findProcessRssInKb(d.getPid())),
LocalDateTime.ofInstant(
Instant.ofEpochMilli(Math.max(d.getLastIdle(), d.getLastBusy())),
ZoneId.systemDefault()),
d.getJavaHome())));
}
}
return new DefaultResult(argv, null);
}
boolean stop = args.remove("--stop");
if (stop) {
DaemonInfo[] dis = registry.getAll().toArray(new DaemonInfo[0]);
if (dis.length > 0) {
output.accept(Message.display("Stopping " + dis.length + " running daemons"));
for (DaemonInfo di : dis) {
try {
ProcessHandle.of(di.getPid()).ifPresent(ProcessHandle::destroyForcibly);
} catch (Exception t) {
System.out.println("Daemon " + di.getUid() + ": " + t);
} finally {
registry.remove(di.getUid());
}
}
}
return new DefaultResult(argv, null);
}
boolean purge = args.remove("--purge");
if (purge) {
String result = purgeLogs();
output.accept(Message.display(result != null ? result : "Nothing to purge"));
return new DefaultResult(argv, null);
}
if (args.stream().noneMatch(arg -> arg.startsWith("-T") || arg.equals("--threads"))) {
args.add("--threads");
args.add(parameters.threads());
}
if (args.stream().noneMatch(arg -> arg.startsWith("-b") || arg.equals("--builder"))) {
args.add("--builder");
args.add(parameters.builder());
}
final Path settings = parameters.settings();
if (settings != null && args.stream().noneMatch(arg -> arg.equals("-s") || arg.equals("--settings"))) {
args.add("--settings");
args.add(settings.toString());
}
final Path localMavenRepository = parameters.mavenRepoLocal();
if (localMavenRepository != null && args.stream().noneMatch(arg -> arg.startsWith("-Dmaven.repo.local="))) {
args.add("-Dmaven.repo.local=" + localMavenRepository.toString());
}
final DaemonConnector connector = new DaemonConnector(parameters, registry);
try (DaemonClientConnection daemon = connector.connect(output)) {
output.setDaemonDispatch(daemon::dispatch);
output.setDaemonReceive(daemon::enqueue);
output.accept(Message.buildStatus("Connected to daemon"));
daemon.dispatch(new Message.BuildRequest(
args,
parameters.userDir().toString(),
parameters.multiModuleProjectDirectory().toString(),
System.getenv()));
output.accept(Message.buildStatus("Build request sent"));
// We've sent the request, so it gives us a bit of time to purge the logs
AtomicReference<String> purgeMessage = new AtomicReference<>();
Thread purgeLog = new Thread(() -> {
purgeMessage.set(purgeLogs());
}, "Log purge");
purgeLog.setDaemon(true);
purgeLog.start();
try {
while (true) {
final List<Message> messages = daemon.receive();
output.accept(messages);
for (Message m : messages) {
switch (m.getType()) {
case Message.CANCEL_BUILD:
return new DefaultResult(argv,
new InterruptedException("The build was canceled"));
case Message.BUILD_EXCEPTION:
final BuildException e = (BuildException) m;
return new DefaultResult(argv,
new Exception(e.getClassName() + ": " + e.getMessage() + "\n" + e.getStackTrace()));
case Message.BUILD_STOPPED:
return new DefaultResult(argv, null);
}
}
}
} finally {
String msg = purgeMessage.get();
if (msg != null) {
output.accept(Message.display(msg));
}
}
}
}
}
private String purgeLogs() {
Path storage = parameters.daemonStorage();
Duration purgeLogPeriod = parameters.purgeLogPeriod();
if (!Files.isDirectory(storage) || !TimeUtils.isPositive(purgeLogPeriod)) {
return null;
}
String date = DateTimeFormatter.ofPattern("yyyy-MM-dd").withZone(ZoneId.systemDefault()).format(Instant.now());
Path log = storage.resolve("purge-" + date + ".log");
List<Path> deleted = new ArrayList<>();
List<Throwable> exceptions = new ArrayList<>();
FileTime limit = FileTime.from(Instant.now().minus(purgeLogPeriod));
try {
Files.list(storage)
.filter(p -> p.getFileName().toString().endsWith(LOG_EXTENSION))
.filter(p -> !log.equals(p))
.filter(p -> {
try {
FileTime lmt = Files.getLastModifiedTime(p);
return lmt.compareTo(limit) < 0;
} catch (IOException e) {
exceptions.add(e);
return false;
}
})
.forEach(p -> {
try {
Files.delete(p);
deleted.add(p);
} catch (IOException e) {
exceptions.add(e);
}
});
} catch (Exception e) {
exceptions.add(e);
}
if (exceptions.isEmpty() && deleted.isEmpty()) {
return null;
}
String logMessage;
try (PrintWriter w = new PrintWriter(Files.newBufferedWriter(log,
StandardOpenOption.WRITE, StandardOpenOption.APPEND, StandardOpenOption.CREATE))) {
w.printf("Purge executed at %s%n", Instant.now().toString());
if (deleted.isEmpty()) {
w.printf("No files deleted.%n");
} else {
w.printf("Deleted files:%n");
for (Path p : deleted) {
w.printf(" %s%n", p.toString());
}
}
if (!exceptions.isEmpty()) {
w.printf("%d exception(s) occurred during the purge", exceptions.size());
for (Throwable t : exceptions) {
t.printStackTrace(w);
}
}
char[] buf = new char[80];
Arrays.fill(buf, '=');
w.printf("%s%n", new String(buf));
logMessage = "log available in " + log.toString();
} catch (IOException e) {
logMessage = "an exception occurred when writing log to " + log.toString() + ": " + e.toString();
}
if (exceptions.isEmpty()) {
return String.format("Purged %d log files (%s)", deleted.size(), logMessage);
} else {
return String.format("Purged %d log files with %d exceptions (%s)", deleted.size(), exceptions.size(), logMessage);
}
}
private static class DefaultResult implements ExecutionResult {
private final Exception exception;
private final List<String> args;
private DefaultResult(List<String> args, Exception exception) {
super();
this.args = args;
this.exception = exception;
}
@Override
public ExecutionResult assertSuccess() {
if (exception != null) {
throw new AssertionError(appendCommand(new StringBuilder("Build failed: ")).toString(), exception);
}
return this;
}
@Override
public ExecutionResult assertFailure() {
if (exception == null) {
throw new AssertionError(appendCommand(new StringBuilder("Build did not fail: ")));
}
return this;
}
@Override
public boolean isSuccess() {
return exception == null;
}
StringBuilder appendCommand(StringBuilder sb) {
sb.append("mvnd");
for (String arg : args) {
sb.append(" \"").append(arg).append('"');
}
return sb;
}
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
modules/V1-vilt.py | # Code for "ActionCLIP: ActionCLIP: A New Paradigm for Action Recognition"
# arXiv:
# Mengmeng Wang, Jiazheng Xing, Yong Liu
from re import S
import torch
from torch import nn
from collections import OrderedDict
from torch.nn.modules import dropout
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from einops import rearrange, repeat
import math
import torch.nn.functional as F
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
def trunc_normal_(x, mean=0., std=1.):
# From https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/12
return x.normal_().fmod_(2).mul_(std).add_(mean)
class TAggregate(nn.Module):
def __init__(self, clip_length=None, embed_dim=2048, n_layers=6):
super(TAggregate, self).__init__()
self.clip_length = clip_length
drop_rate = 0.
enc_layer = nn.TransformerEncoderLayer(d_model=embed_dim, nhead=8)
self.transformer_enc = nn.TransformerEncoder(enc_layer, num_layers=n_layers, norm=nn.LayerNorm(
embed_dim))
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, clip_length + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
with torch.no_grad():
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
with torch.no_grad():
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
nvids = x.shape[0]
cls_tokens = self.cls_token.expand(nvids, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed
x.transpose_(1, 0)
o = self.transformer_enc(x)
return o[0]
class TemporalTransformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks((x))
class VideoAttText(nn.Module):
def __init__(self, d_model: int, n_head: int, drop_out: float, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
# ("drop1", nn.Dropout(drop_out)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model)),
# ("drop1", nn.Dropout(drop_out))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor, y: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.attention(self.ln_1(x), self.ln_1(y))
x = x + self.mlp(self.ln_2(x))
return x
class visual_prompt(nn.Module):
def __init__(self, sim_head, clip_state_dict, T):
super().__init__()
self.sim_header = sim_head
self.T = T
assert sim_head in ["meanP", "LSTM", "Transf", "Conv_1D", "Transf_cls", "Transf_att"]
if self.sim_header == "LSTM" or self.sim_header == "Transf_att" or self.sim_header == "Transf" or self.sim_header == "Transf_cls" or self.sim_header == "Conv_1D" :
embed_dim = clip_state_dict["text_projection"].shape[1]
context_length = clip_state_dict["positional_embedding"].shape[0]
vocab_size = clip_state_dict["token_embedding.weight"].shape[0]
transformer_width = clip_state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"transformer.resblocks")))
self.frame_position_embeddings = nn.Embedding(context_length, embed_dim)
self.text_position_embeddings = nn.Embedding(context_length, embed_dim)
self.frame_type_embeddings = nn.Embedding(context_length, embed_dim)
self.text_type_embeddings = nn.Embedding(context_length, embed_dim)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
with torch.no_grad():
trunc_normal_(self.cls_token, std=.02)
if self.sim_header == "Transf" or self.sim_header == "Transf_att":
self.transformer = TemporalTransformer(width=embed_dim, layers=6, heads=transformer_heads)
self.itm_head = nn.Linear(embed_dim, 2)
print('layer=6')
# self.norm = PreNorm(embed_dim, VideoAttText(dim=embed_dim, heads = 4, dim_head = 128, dropout = 0.5))
self.apply(self.init_weights)
if self.sim_header == "Transf_cls":
self.transformer = TAggregate(clip_length=self.T, embed_dim=embed_dim, n_layers=6)
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, LayerNorm):
if 'beta' in dir(module) and 'gamma' in dir(module):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
else:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, x, t_x, list_id, logits_per_img, logits_per_text):
b, t, c = x.size()
x = x.contiguous()
t_x = t_x.contiguous()
x_original = x #(48,8, dimention(512))
t_x_original = t_x
seq_length = t #t-frames
text_length = t_x_original.shape[1]
position_ids = torch.arange(seq_length, dtype=torch.long, device=x.device) #tensor([0, 1, 2, 3, 4, 5, 6, 7])
position_ids = position_ids.unsqueeze(0).expand(x.size(0), -1) #(batch_size, T)
frame_position_embeddings = self.frame_position_embeddings(position_ids)#(48,8,512)
frame_type_ids = torch.full_like(position_ids,1,device=x.device)
frame_type_embeddings = self.frame_type_embeddings(frame_type_ids)
text_type_ids = torch.zeros(t_x.shape[0],t_x.shape[1],dtype=torch.long, device=t_x.device)
text_type_embeddings = self.text_type_embeddings(text_type_ids)
text_position_ids = torch.arange(text_length, dtype=torch.long, device=x.device) #tensor([0, 1, 2, 3, 4, 5, 6, 7])
text_position_ids = text_position_ids.unsqueeze(0).expand(x.size(0), -1) #(batch_size, T)
text_position_embeddings = self.text_position_embeddings(text_position_ids)#(48,8,512)
cls_tokens = self.cls_token.expand(b, -1, -1)
cls_tokens_neg = self.cls_token.expand(b*2, -1, -1)
with torch.no_grad():
idx = list_id.reshape(1,-1)
mask = torch.eq(idx, idx.T)
weights_i2t = F.softmax(logits_per_img[:,:b]+1e-4,dim=1)
weights_t2i = F.softmax(logits_per_text[:,:b]+1e-4,dim=1)
weights_i2t.masked_fill_(mask, 0)
weights_t2i.masked_fill_(mask, 0)
x = x + frame_position_embeddings + frame_type_embeddings
t_x = t_x + text_type_embeddings+ text_position_embeddings
# select a negative image for each text
image_embeds_neg = []
for bs in range(b): #取64个负样本
neg_idx = torch.multinomial(weights_t2i[bs], 1).item()
image_embeds_neg.append(x[neg_idx])
image_embeds_neg = torch.stack(image_embeds_neg,dim=0)
# select a negative text for each image
text_embeds_neg = []
for bs in range(b):
neg_idx = torch.multinomial(weights_i2t[bs], 1).item()
text_embeds_neg.append(t_x[neg_idx])
text_embeds_neg = torch.stack(text_embeds_neg,dim=0)
text_embeds_all = torch.cat([t_x, text_embeds_neg],dim=0) #注意这个地方是颠倒的
image_embeds_all = torch.cat([image_embeds_neg,x],dim=0) #注意这个地方是颠倒的
# positive_forward
x = torch.cat((cls_tokens, x), dim=1)
co_x = torch.cat((x,t_x), dim=1)
co_x = co_x.permute(1,0,2)
co_x = self.transformer(co_x)
#out_put
out_img_feat = co_x[1:9]
out_text_feat = co_x[9:]
# negative_forward
image_embeds_all = torch.cat((cls_tokens_neg, image_embeds_all), dim=1)
co_neg_all = torch.cat((image_embeds_all, text_embeds_all), dim=1)
co_neg_all = co_neg_all.permute(1,0,2)
co_neg_all = self.transformer(co_neg_all)
vl_embeddings = torch.cat([co_x[0], co_neg_all[0]],dim=0)
vl_output = self.itm_head(vl_embeddings)
return vl_output, out_img_feat, out_text_feat
| []
| []
| []
| [] | [] | python | null | null | null |
src/test/java/UserAPI11Steps.java |
import com.aut.BaseClass;
import com.aut.DatabaseFactory;
import com.aut.EncryptionServiceImpl;
import com.aut.HttpMethodsFactory;
import com.thoughtworks.gauge.Step;
import io.restassured.path.json.JsonPath;
import org.junit.Assert;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
public class UserAPI11Steps extends BaseClass {
@Step("User enter User API view Picture by id </api/><version></user/><id></picture>")
public void enter_api(String arg0, String arg1, String arg2, long arg3, String arg4) throws Exception {
this.api = System.getenv("URI") + arg0 + arg1 + arg2 + EncryptionServiceImpl.encryptToString(arg3) + arg4;
System.out.println("API: " + api);
}
@Step("User call the User API view Picture by id")
public void call_api() {
Map<String, String> header = new HashMap();
header.put("headername", "Authorization");
header.put("headervalue", "bearer " + LogInAPISteps.token);
this.response = HttpMethodsFactory.getMethod(this.api, header);
this.setJsonPath(new JsonPath(this.response.getBody().asString()));
}
@Step("Get data from kraydel database <userid>")
public void get_db_data(String userid) throws SQLException, ClassNotFoundException {
String sql = "select person.picture from main.person where id=" + userid + "";
System.out.println(sql);
setResults(DatabaseFactory.getDBData(sql));
}
@Step("Validate view Picture by id API")
public void validate_picture_details() throws SQLException {
getResults().next();
Assert.assertEquals("Validate image DB:API", (getResults().getString("picture") == null), (getJsonPath().getString("content.picture") == null));
}
}
| [
"\"URI\""
]
| []
| [
"URI"
]
| [] | ["URI"] | java | 1 | 0 | |
google/cloud/retail_v2/services/prediction_service/client.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.retail_v2.types import prediction_service
from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import PredictionServiceGrpcTransport
from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport
class PredictionServiceClientMeta(type):
"""Metaclass for the PredictionService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[PredictionServiceTransport]]
_transport_registry["grpc"] = PredictionServiceGrpcTransport
_transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[PredictionServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class PredictionServiceClient(metaclass=PredictionServiceClientMeta):
"""Service for making recommendation prediction."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "retail.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PredictionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PredictionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> PredictionServiceTransport:
"""Returns the transport used by the client instance.
Returns:
PredictionServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def product_path(
project: str, location: str, catalog: str, branch: str, product: str,
) -> str:
"""Returns a fully-qualified product string."""
return "projects/{project}/locations/{location}/catalogs/{catalog}/branches/{branch}/products/{product}".format(
project=project,
location=location,
catalog=catalog,
branch=branch,
product=product,
)
@staticmethod
def parse_product_path(path: str) -> Dict[str, str]:
"""Parses a product path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/catalogs/(?P<catalog>.+?)/branches/(?P<branch>.+?)/products/(?P<product>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, PredictionServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the prediction service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, PredictionServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, PredictionServiceTransport):
# transport is a PredictionServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def predict(
self,
request: Union[prediction_service.PredictRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> prediction_service.PredictResponse:
r"""Makes a recommendation prediction.
Args:
request (Union[google.cloud.retail_v2.types.PredictRequest, dict]):
The request object. Request message for Predict method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.retail_v2.types.PredictResponse:
Response message for predict method.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a prediction_service.PredictRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, prediction_service.PredictRequest):
request = prediction_service.PredictRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.predict]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("placement", request.placement),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-retail",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("PredictionServiceClient",)
| []
| []
| [
"GOOGLE_API_USE_MTLS_ENDPOINT",
"GOOGLE_API_USE_CLIENT_CERTIFICATE"
]
| [] | ["GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE"] | python | 2 | 0 | |
src/test/java/com/afx/web/receiptorganizer/test/webappcontext/WebAppContextTests.java | package com.afx.web.receiptorganizer.test.webappcontext;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.when;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import javax.naming.NamingException;
import com.afx.web.receiptorganizer.config.ApplicationConfig;
import com.afx.web.receiptorganizer.dao.model.label.Label;
import com.afx.web.receiptorganizer.dao.model.user.User;
import com.afx.web.receiptorganizer.service.label.LabelService;
import com.afx.web.receiptorganizer.test.utils.TestUtils;
import com.microsoft.sqlserver.jdbc.SQLServerDataSource;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.MessageSource;
import org.springframework.context.annotation.PropertySource;
import org.springframework.dao.DataAccessException;
import org.springframework.mock.jndi.SimpleNamingContextBuilder;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import org.springframework.test.context.web.WebAppConfiguration;
import org.springframework.test.web.servlet.MockMvc;
import org.springframework.test.web.servlet.setup.MockMvcBuilders;
import org.springframework.web.context.WebApplicationContext;
@ActiveProfiles("test")
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(classes = {TestContext.class, ApplicationConfig.class})
@PropertySource("classpath:application.properties")
@WebAppConfiguration
public class WebAppContextTests {
private static final String LABEL_CREATE_PATH = "/labels/create";
private static final String LABEL_INDEX_PATH = "/labels/";
private static final String RESP_RESULT = "$.success";
private static final String RESP_MSG = "$.message";
private MockMvc mockMvc;
@Autowired
private LabelService labelServiceMock;
@Autowired
private MessageSource messageSource;
@Autowired
private WebApplicationContext webApplicationContext;
private User user = new User();
@BeforeClass
public static void classSetup() throws NamingException {
SimpleNamingContextBuilder builder = new SimpleNamingContextBuilder();
SQLServerDataSource ds = new SQLServerDataSource();
ds.setUser(System.getenv("USERNAME"));
ds.setPassword(System.getenv("PASSWORD"));
ds.setServerName(System.getenv("SERVER_NAME"));
ds.setPortNumber(Integer.valueOf(System.getenv("SERVER_PORT")));
ds.setDatabaseName(System.getenv("DATABASE_NAME"));
builder.bind("java:/MSSQLDS", ds);
builder.activate();
}
@Before
public void setUp() {
Mockito.reset(labelServiceMock);
mockMvc = MockMvcBuilders.webAppContextSetup(webApplicationContext).build();
this.user.setUsername("TestUsername");
}
@Test
public void createLabel_ShouldCreateNewLabelWithInputName() throws Exception {
//Setup
Label label = new Label();
String labelName = "Test";
label.setName(labelName);
String json = TestUtils.serialize(label);
//Test
mockMvc.perform((post(LABEL_CREATE_PATH)
.contentType(TestUtils.APPLICATION_JSON_UTF8)
.content(json)
.sessionAttr("user", this.user))
)
.andExpect(status().isOk())
.andExpect(content().contentType(TestUtils.APPLICATION_JSON_UTF8))
.andExpect(jsonPath(RESP_RESULT, is(true)))
.andExpect(jsonPath(RESP_MSG, is(messageSource.getMessage("label.create.success",
null,
Locale.US))));
//Assert
ArgumentCaptor<Label> labelArgumentCaptor = ArgumentCaptor.forClass(Label.class);
ArgumentCaptor<String> usernameArgumentCaptor = ArgumentCaptor.forClass(String.class);
verify(labelServiceMock, times(1)).addLabel(usernameArgumentCaptor.capture(), labelArgumentCaptor.capture());
verifyNoMoreInteractions(labelServiceMock);
Label formObject = labelArgumentCaptor.getValue();
assertThat(formObject.getName(), is(labelName));
String formUsername = usernameArgumentCaptor.getValue();
assert(formUsername.equals(this.user.getUsername()));
}
@Test
public void createLabel_ShouldSendNotUniqueResponseWhenDataAccessExceptionThrown() throws Exception {
//Setup
Label label = new Label();
String labelName = "Test";
label.setName(labelName);
String json = TestUtils.serialize(label);
doThrow(new DataAccessException("Dao Exception...") {})
.when(labelServiceMock)
.addLabel(anyString(), any(Label.class));
mockMvc.perform((post(LABEL_CREATE_PATH)
.contentType(TestUtils.APPLICATION_JSON_UTF8)
.content(json)
.sessionAttr("user", this.user))
)
.andExpect(status().isOk())
.andExpect(content().contentType(TestUtils.APPLICATION_JSON_UTF8))
.andExpect(jsonPath(RESP_RESULT, is(false)))
.andExpect(jsonPath(RESP_MSG, is(messageSource.getMessage("label.create.failure.notunique",
null,
Locale.US))));
ArgumentCaptor<Label> labelArgumentCaptor = ArgumentCaptor.forClass(Label.class);
ArgumentCaptor<String> usernameArgumentCaptor = ArgumentCaptor.forClass(String.class);
verify(labelServiceMock, times(1)).addLabel(usernameArgumentCaptor.capture(), labelArgumentCaptor.capture());
verifyNoMoreInteractions(labelServiceMock);
Label formObject = labelArgumentCaptor.getValue();
assertThat(formObject.getName(), is(labelName));
String formUsername = usernameArgumentCaptor.getValue();
assert(formUsername.equals(this.user.getUsername()));
}
@Test
public void createLabel_EmptyNameShouldReturnInvalidNameResponse() throws Exception {
//Setup
Label label = new Label();
String labelName = "";
label.setName(labelName);
String json = TestUtils.serialize(label);
mockMvc.perform((post(LABEL_CREATE_PATH)
.contentType(TestUtils.APPLICATION_JSON_UTF8)
.content(json)
.sessionAttr("user", this.user))
)
.andExpect(status().isOk())
.andExpect(content().contentType(TestUtils.APPLICATION_JSON_UTF8))
.andExpect(jsonPath(RESP_RESULT, is(false)))
.andExpect(jsonPath(RESP_MSG, is(messageSource.getMessage("label.create.failure.invalid",
null,
Locale.US))));
verify(labelServiceMock, never()).addLabel(anyString(), any());
}
@Test
public void createLabel_BlankNameShouldReturnInvalidNameResponse() throws Exception {
//Setup
Label label = new Label();
String labelName = " ";
label.setName(labelName);
String json = TestUtils.serialize(label);
mockMvc.perform((post(LABEL_CREATE_PATH)
.contentType(TestUtils.APPLICATION_JSON_UTF8)
.content(json)
.sessionAttr("user", this.user))
)
.andExpect(status().isOk())
.andExpect(content().contentType(TestUtils.APPLICATION_JSON_UTF8))
.andExpect(jsonPath(RESP_RESULT, is(false)))
.andExpect(jsonPath(RESP_MSG, is(messageSource.getMessage("label.create.failure.invalid",
null,
Locale.US))));
verify(labelServiceMock, never()).addLabel(anyString(), any());
}
//TODO: Check if I need throws exception
@Test
public void getLabels_shouldReturnAllUserLabels() throws Exception {
//Setup
String labelName = "Test";
Label label = new Label();
List<Label> labelList = new ArrayList<Label>();
label.setName(labelName);
labelList.add(label);
when(labelServiceMock.getAllLabels(this.user.getUsername()))
.thenReturn(labelList);
//Perform
mockMvc.perform((get(LABEL_INDEX_PATH)
.contentType(TestUtils.APPLICATION_JSON_UTF8)
.sessionAttr("user", this.user))
)
.andExpect(status().isOk())
.andExpect(content().contentType(TestUtils.APPLICATION_JSON_UTF8))
.andExpect(jsonPath(RESP_RESULT, is(true)))
.andExpect(jsonPath(RESP_MSG, is(messageSource.getMessage("label.index.success",
null,
Locale.US))))
.andExpect(jsonPath("$.labels[*]", hasSize(labelList.size())))
.andExpect(jsonPath("$.labels[0].name", is(labelName)));
//Assert
ArgumentCaptor<String> usernameArgumentCaptor = ArgumentCaptor.forClass(String.class);
verify(labelServiceMock, times(1)).getAllLabels(usernameArgumentCaptor.capture());
verifyNoMoreInteractions(labelServiceMock);
String formUsername = usernameArgumentCaptor.getValue();
assert(formUsername.equals(this.user.getUsername()));
}
}
| [
"\"USERNAME\"",
"\"PASSWORD\"",
"\"SERVER_NAME\"",
"\"SERVER_PORT\"",
"\"DATABASE_NAME\""
]
| []
| [
"USERNAME",
"SERVER_PORT",
"PASSWORD",
"DATABASE_NAME",
"SERVER_NAME"
]
| [] | ["USERNAME", "SERVER_PORT", "PASSWORD", "DATABASE_NAME", "SERVER_NAME"] | java | 5 | 0 | |
lambda/function_test.go | // Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package lambda
import (
"context"
"encoding/json"
"errors"
"os"
"testing"
"time"
"github.com/aws/aws-lambda-go/lambda/messages"
"github.com/aws/aws-lambda-go/lambdacontext"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testWrapperHandler func(ctx context.Context, input []byte) (interface{}, error)
func (h testWrapperHandler) Invoke(ctx context.Context, payload []byte) ([]byte, error) {
response, err := h(ctx, payload)
if err != nil {
return nil, err
}
responseBytes, err := json.Marshal(response)
if err != nil {
return nil, err
}
return responseBytes, nil
}
// verify testWrapperHandler implements Handler
var _ Handler = (testWrapperHandler)(nil)
func TestInvoke(t *testing.T) {
srv := &Function{handler: testWrapperHandler(
func(ctx context.Context, input []byte) (interface{}, error) {
if deadline, ok := ctx.Deadline(); ok {
return deadline.UnixNano(), nil
}
return nil, errors.New("!?!?!?!?!")
},
)}
deadline := time.Now()
var response messages.InvokeResponse
err := srv.Invoke(&messages.InvokeRequest{
Deadline: messages.InvokeRequest_Timestamp{
Seconds: deadline.Unix(),
Nanos: int64(deadline.Nanosecond()),
}}, &response)
assert.NoError(t, err)
var responseValue int64
assert.NoError(t, json.Unmarshal(response.Payload, &responseValue))
assert.Equal(t, deadline.UnixNano(), responseValue)
}
func TestInvokeWithContext(t *testing.T) {
key := struct{}{}
srv := NewFunction(testWrapperHandler(
func(ctx context.Context, input []byte) (interface{}, error) {
assert.Equal(t, "dummy", ctx.Value(key))
if deadline, ok := ctx.Deadline(); ok {
return deadline.UnixNano(), nil
}
return nil, errors.New("!?!?!?!?!")
}))
srv = srv.withContext(context.WithValue(context.Background(), key, "dummy"))
deadline := time.Now()
var response messages.InvokeResponse
err := srv.Invoke(&messages.InvokeRequest{
Deadline: messages.InvokeRequest_Timestamp{
Seconds: deadline.Unix(),
Nanos: int64(deadline.Nanosecond()),
}}, &response)
assert.NoError(t, err)
var responseValue int64
assert.NoError(t, json.Unmarshal(response.Payload, &responseValue))
assert.Equal(t, deadline.UnixNano(), responseValue)
}
type CustomError struct{}
func (e CustomError) Error() string { return "Something bad happened!" }
func TestCustomError(t *testing.T) {
srv := &Function{handler: testWrapperHandler(
func(ctx context.Context, input []byte) (interface{}, error) {
return nil, CustomError{}
},
)}
var response messages.InvokeResponse
err := srv.Invoke(&messages.InvokeRequest{}, &response)
assert.NoError(t, err)
assert.Nil(t, response.Payload)
assert.Equal(t, "Something bad happened!", response.Error.Message)
assert.Equal(t, "CustomError", response.Error.Type)
}
type CustomError2 struct{}
func (e *CustomError2) Error() string { return "Something bad happened!" }
func TestCustomErrorRef(t *testing.T) {
srv := &Function{handler: testWrapperHandler(
func(ctx context.Context, input []byte) (interface{}, error) {
return nil, &CustomError2{}
},
)}
var response messages.InvokeResponse
err := srv.Invoke(&messages.InvokeRequest{}, &response)
assert.NoError(t, err)
assert.Nil(t, response.Payload)
assert.Equal(t, "Something bad happened!", response.Error.Message)
assert.Equal(t, "CustomError2", response.Error.Type)
}
func TestContextPlumbing(t *testing.T) {
srv := &Function{handler: testWrapperHandler(
func(ctx context.Context, input []byte) (interface{}, error) {
lc, _ := lambdacontext.FromContext(ctx)
return lc, nil
},
)}
var response messages.InvokeResponse
err := srv.Invoke(&messages.InvokeRequest{
CognitoIdentityId: "dummyident",
CognitoIdentityPoolId: "dummypool",
ClientContext: []byte(`{
"Client": {
"app_title": "dummytitle",
"installation_id": "dummyinstallid",
"app_version_code": "dummycode",
"app_package_name": "dummyname"
}
}`),
RequestId: "dummyid",
InvokedFunctionArn: "dummyarn",
}, &response)
assert.NoError(t, err)
assert.NotNil(t, response.Payload)
expected := `
{
"AwsRequestID": "dummyid",
"InvokedFunctionArn": "dummyarn",
"Identity": {
"CognitoIdentityID": "dummyident",
"CognitoIdentityPoolID": "dummypool"
},
"ClientContext": {
"Client": {
"installation_id": "dummyinstallid",
"app_title": "dummytitle",
"app_version_code": "dummycode",
"app_package_name": "dummyname"
},
"env": null,
"custom": null
}
}
`
assert.JSONEq(t, expected, string(response.Payload))
}
func TestXAmznTraceID(t *testing.T) {
type XRayResponse struct {
Env string
Ctx string
}
srv := &Function{handler: testWrapperHandler(
func(ctx context.Context, input []byte) (interface{}, error) {
return &XRayResponse{
Env: os.Getenv("_X_AMZN_TRACE_ID"),
Ctx: ctx.Value("x-amzn-trace-id").(string),
}, nil
},
)}
sequence := []struct {
Input string
Expected string
}{
{
"",
`{"Env": "", "Ctx": ""}`,
},
{
"dummyid",
`{"Env": "dummyid", "Ctx": "dummyid"}`,
},
{
"",
`{"Env": "", "Ctx": ""}`,
},
{
"123dummyid",
`{"Env": "123dummyid", "Ctx": "123dummyid"}`,
},
{
"",
`{"Env": "", "Ctx": ""}`,
},
{
"",
`{"Env": "", "Ctx": ""}`,
},
{
"567",
`{"Env": "567", "Ctx": "567"}`,
},
{
"hihihi",
`{"Env": "hihihi", "Ctx": "hihihi"}`,
},
}
for i, test := range sequence {
var response messages.InvokeResponse
err := srv.Invoke(&messages.InvokeRequest{XAmznTraceId: test.Input}, &response)
require.NoError(t, err, "failed test sequence[%d]", i)
assert.JSONEq(t, test.Expected, string(response.Payload), "failed test sequence[%d]", i)
}
}
| [
"\"_X_AMZN_TRACE_ID\""
]
| []
| [
"_X_AMZN_TRACE_ID"
]
| [] | ["_X_AMZN_TRACE_ID"] | go | 1 | 0 | |
internal/extsvc/github/client_test.go | package github
import (
"context"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"net/http"
"net/url"
"os"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"testing"
"github.com/dnaeon/go-vcr/cassette"
"github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/httptestutil"
"github.com/sourcegraph/sourcegraph/internal/rcache"
"github.com/sourcegraph/sourcegraph/internal/testutil"
)
func TestUnmarshal(t *testing.T) {
type result struct {
FieldA string
FieldB string
}
cases := map[string]string{
// Valid
`[]`: "",
`[{"FieldA": "hi"}]`: "",
`[{"FieldA": "hi", "FieldB": "bye"}]`: "",
// Error
`[[]]`: `graphql: cannot unmarshal at offset 2: before "[["; after "]]": json: cannot unmarshal array into Go value of type github.result`,
`[{"FieldA": 1}]`: `graphql: cannot unmarshal at offset 13: before "[{\"FieldA\": 1"; after "}]": json: cannot unmarshal number`,
}
// Large body
repeated := strings.Repeat(`{"FieldA": "hi", "FieldB": "bye"},`, 100)
cases[fmt.Sprintf(`[%s {"FieldA": 1}, %s]`, repeated, repeated[:len(repeated)-1])] = `graphql: cannot unmarshal at offset 3414: before ", \"FieldB\": \"bye\"},{\"FieldA\": \"hi\", \"FieldB\": \"bye\"},{\"FieldA\": \"hi\", \"FieldB\": \"bye\"}, {\"FieldA\": 1"; after "}, {\"FieldA\": \"hi\", \"FieldB\": \"bye\"},{\"FieldA\": \"hi\", \"FieldB\": \"bye\"},{\"FieldA\": \"hi\", \"FieldB\": \"b": json: cannot unmarshal number`
for data, errStr := range cases {
var a []result
var b []result
errA := json.Unmarshal([]byte(data), &a)
errB := unmarshal([]byte(data), &b)
if len(data) > 50 {
data = data[:50] + "..."
}
if !reflect.DeepEqual(a, b) {
t.Errorf("Expected the same result unmarshalling %v\na: %v\nb: %v", data, a, b)
}
if !reflect.DeepEqual(errA, errors.Cause(errB)) {
t.Errorf("Expected the same underlying error unmarshalling %v\na: %v\nb: %v", data, errA, errB)
}
got := ""
if errB != nil {
got = errB.Error()
}
if !strings.HasPrefix(got, errStr) {
t.Errorf("Unexpected error message %v\ngot: %s\nwant: %s", data, got, errStr)
}
}
}
func Test_newRepoCache(t *testing.T) {
cmpOpts := cmp.AllowUnexported(rcache.Cache{})
t.Run("GitHub.com", func(t *testing.T) {
url, _ := url.Parse("https://www.github.com")
token := "asdf"
// github.com caches should:
// (1) use githubProxyURL for the prefix hash rather than the given url
// (2) have a TTL of 10 minutes
key := sha256.Sum256([]byte(token + ":" + githubProxyURL.String()))
prefix := "gh_repo:" + base64.URLEncoding.EncodeToString(key[:])
got := newRepoCache(url, token)
want := rcache.NewWithTTL(prefix, 600)
if diff := cmp.Diff(want, got, cmpOpts); diff != "" {
t.Fatal(diff)
}
})
t.Run("GitHub Enterprise", func(t *testing.T) {
url, _ := url.Parse("https://www.sourcegraph.com")
token := "asdf"
// GitHub Enterprise caches should:
// (1) use the given URL for the prefix hash
// (2) have a TTL of 30 seconds
key := sha256.Sum256([]byte(token + ":" + url.String()))
prefix := "gh_repo:" + base64.URLEncoding.EncodeToString(key[:])
got := newRepoCache(url, token)
want := rcache.NewWithTTL(prefix, 30)
if diff := cmp.Diff(want, got, cmpOpts); diff != "" {
t.Fatal(diff)
}
})
}
var updateRegex = flag.String("update", "", "Update testdata of tests matching the given regex")
func update(name string) bool {
if updateRegex == nil || *updateRegex == "" {
return false
}
return regexp.MustCompile(*updateRegex).MatchString(name)
}
func TestClient_WithToken(t *testing.T) {
uri, err := url.Parse("https://github.com")
if err != nil {
t.Fatal(err)
}
old := &Client{
apiURL: uri,
token: "old_token",
}
newToken := "new_token"
new := old.WithToken(newToken)
if old == new {
t.Fatal("both clients have the same address")
}
if new.token != newToken {
t.Fatalf("token: want %q but got %q", newToken, new.token)
}
}
func TestClient_LoadPullRequests(t *testing.T) {
cli, save := newClient(t, "LoadPullRequests")
defer save()
for i, tc := range []struct {
name string
ctx context.Context
prs []*PullRequest
err string
}{
{
name: "non-existing-repo",
prs: []*PullRequest{{RepoWithOwner: "whoisthis/sourcegraph", Number: 5550}},
err: "error in GraphQL response: Could not resolve to a Repository with the name 'sourcegraph'.",
},
{
name: "non-existing-pr",
prs: []*PullRequest{{RepoWithOwner: "sourcegraph/sourcegraph", Number: 0}},
err: "error in GraphQL response: Could not resolve to a PullRequest with the number of 0.",
},
{
name: "success",
prs: []*PullRequest{
{RepoWithOwner: "sourcegraph/sourcegraph", Number: 5550},
{RepoWithOwner: "sourcegraph/sourcegraph", Number: 5834},
{RepoWithOwner: "tsenart/vegeta", Number: 50},
{RepoWithOwner: "sourcegraph/sourcegraph", Number: 7352},
},
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
if tc.ctx == nil {
tc.ctx = context.Background()
}
if tc.err == "" {
tc.err = "<nil>"
}
err := cli.LoadPullRequests(tc.ctx, tc.prs...)
if have, want := fmt.Sprint(err), tc.err; have != want {
t.Errorf("error:\nhave: %q\nwant: %q", have, want)
}
if err != nil {
return
}
testutil.AssertGolden(t,
"testdata/golden/LoadPullRequests-"+strconv.Itoa(i),
update("LoadPullRequests"),
tc.prs,
)
})
}
}
func TestClient_CreatePullRequest(t *testing.T) {
cli, save := newClient(t, "CreatePullRequest")
defer save()
// Repository used: sourcegraph/automation-testing
// The requests here cannot be easily rerun with `-update` since you can
// only open a pull request once.
// In order to update specific tests, comment out the other ones and then
// run with -update.
for i, tc := range []struct {
name string
ctx context.Context
input *CreatePullRequestInput
err string
}{
{
name: "success",
input: &CreatePullRequestInput{
RepositoryID: "MDEwOlJlcG9zaXRvcnkyMjExNDc1MTM=",
BaseRefName: "master",
HeadRefName: "test-pr-3",
Title: "This is a test PR, feel free to ignore",
Body: "I'm opening this PR to test something. Please ignore.",
},
},
{
name: "already-existing-pr",
input: &CreatePullRequestInput{
RepositoryID: "MDEwOlJlcG9zaXRvcnkyMjExNDc1MTM=",
BaseRefName: "master",
HeadRefName: "always-open-pr",
Title: "This is a test PR that is always open",
Body: "Feel free to ignore this. This is a test PR that is always open.",
},
err: ErrPullRequestAlreadyExists.Error(),
},
{
name: "invalid-head-ref",
input: &CreatePullRequestInput{
RepositoryID: "MDEwOlJlcG9zaXRvcnkyMjExNDc1MTM=",
BaseRefName: "master",
HeadRefName: "this-head-ref-should-not-exist",
Title: "Test",
},
err: "error in GraphQL response: Head sha can't be blank, Base sha can't be blank, No commits between master and this-head-ref-should-not-exist, Head ref must be a branch",
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
if tc.ctx == nil {
tc.ctx = context.Background()
}
if tc.err == "" {
tc.err = "<nil>"
}
pr, err := cli.CreatePullRequest(tc.ctx, tc.input)
if have, want := fmt.Sprint(err), tc.err; have != want {
t.Errorf("error:\nhave: %q\nwant: %q", have, want)
}
if err != nil {
return
}
testutil.AssertGolden(t,
"testdata/golden/CreatePullRequest-"+strconv.Itoa(i),
update("CreatePullRequest"),
pr,
)
})
}
}
func TestClient_ClosePullRequest(t *testing.T) {
cli, save := newClient(t, "ClosePullRequest")
defer save()
// Repository used: sourcegraph/automation-testing
// The requests here cannot be easily rerun with `-update` since you can
// only close a pull request once.
// In order to update specific tests, comment out the other ones and then
// run with -update.
for i, tc := range []struct {
name string
ctx context.Context
pr *PullRequest
err string
}{
{
name: "success",
// github.com/sourcegraph/automation-testing/pull/44
pr: &PullRequest{ID: "MDExOlB1bGxSZXF1ZXN0MzQxMDU5OTY5"},
},
{
name: "already closed",
// github.com/sourcegraph/automation-testing/pull/29
pr: &PullRequest{ID: "MDExOlB1bGxSZXF1ZXN0MzQxMDU5OTY5"},
// Doesn't return an error
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
if tc.ctx == nil {
tc.ctx = context.Background()
}
if tc.err == "" {
tc.err = "<nil>"
}
err := cli.ClosePullRequest(tc.ctx, tc.pr)
if have, want := fmt.Sprint(err), tc.err; have != want {
t.Errorf("error:\nhave: %q\nwant: %q", have, want)
}
if err != nil {
return
}
testutil.AssertGolden(t,
"testdata/golden/ClosePullRequest-"+strconv.Itoa(i),
update("ClosePullRequest"),
tc.pr,
)
})
}
}
func TestClient_GetAuthenticatedUserOrgs(t *testing.T) {
cli, save := newClient(t, "GetAuthenticatedUserOrgs")
defer save()
ctx := context.Background()
orgs, err := cli.GetAuthenticatedUserOrgs(ctx)
if err != nil {
t.Fatal(err)
}
testutil.AssertGolden(t,
"testdata/golden/GetAuthenticatedUserOrgs",
update("GetAuthenticatedUserOrgs"),
orgs,
)
}
func newClient(t testing.TB, name string) (*Client, func()) {
t.Helper()
cassete := filepath.Join("testdata/vcr/", strings.Replace(name, " ", "-", -1))
rec, err := httptestutil.NewRecorder(cassete, update(name), func(i *cassette.Interaction) error {
return nil
})
if err != nil {
t.Fatal(err)
}
mw := httpcli.NewMiddleware(githubProxyRedirectMiddleware)
hc, err := httpcli.NewFactory(mw, httptestutil.NewRecorderOpt(rec)).Doer()
if err != nil {
t.Fatal(err)
}
uri, err := url.Parse("https://github.com")
if err != nil {
t.Fatal(err)
}
cli := NewClient(
uri,
os.Getenv("GITHUB_TOKEN"),
hc,
)
return cli, func() {
if err := rec.Stop(); err != nil {
t.Errorf("failed to update test data: %s", err)
}
}
}
func githubProxyRedirectMiddleware(cli httpcli.Doer) httpcli.Doer {
return httpcli.DoerFunc(func(req *http.Request) (*http.Response, error) {
if req.URL.Hostname() == "github-proxy" {
req.URL.Host = "api.github.com"
req.URL.Scheme = "https"
}
return cli.Do(req)
})
}
| [
"\"GITHUB_TOKEN\""
]
| []
| [
"GITHUB_TOKEN"
]
| [] | ["GITHUB_TOKEN"] | go | 1 | 0 | |
pkg/program/daemon/sys_windows.go | package daemon
import (
"os"
"syscall"
)
func setUmask() {
// NOP on windows.
}
// A subset of possible process creation flags, value taken from
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863(v=vs.85).aspx
const (
CREATE_BREAKAWAY_FROM_JOB = 0x01000000
CREATE_NEW_PROCESS_GROUP = 0x00000200
DETACHED_PROCESS = 0x00000008
DaemonCreationFlags = CREATE_BREAKAWAY_FROM_JOB | CREATE_NEW_PROCESS_GROUP | DETACHED_PROCESS
)
func procAttrForSpawn() *os.ProcAttr {
return &os.ProcAttr{
Dir: `C:\`,
Env: []string{"SystemRoot=" + os.Getenv("SystemRoot")}, // SystemRoot is needed for net.Listen for some reason
Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
Sys: &syscall.SysProcAttr{CreationFlags: DaemonCreationFlags},
}
}
| [
"\"SystemRoot\""
]
| []
| [
"SystemRoot"
]
| [] | ["SystemRoot"] | go | 1 | 0 | |
providers/naver/naver_test.go | package naver_test
import (
"fmt"
"os"
"testing"
"github.com/macuenca/goth/v3"
"github.com/macuenca/goth/v3/providers/naver"
"github.com/stretchr/testify/assert"
)
func Test_New(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
a.Equal(p.ClientKey, os.Getenv("NAVER_KEY"))
a.Equal(p.Secret, os.Getenv("NAVER_SECRET"))
a.Equal(p.CallbackURL, "/foo")
}
func Test_Implements_Provider(t *testing.T) {
t.Parallel()
a := assert.New(t)
a.Implements((*goth.Provider)(nil), provider())
}
func Test_BeginAuth(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.BeginAuth("test_state")
s := session.(*naver.Session)
a.NoError(err)
a.Contains(s.AuthURL, "https://nid.naver.com/oauth2.0/authorize")
a.Contains(s.AuthURL, fmt.Sprintf("client_id=%s", os.Getenv("NAVER_KEY")))
a.Contains(s.AuthURL, "state=test_state")
}
func Test_SessionFromJSON(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.UnmarshalSession(`{"AuthURL":"ttps://nid.naver.com/oauth2.0/authorize","AccessToken":"1234567890"}`)
a.NoError(err)
s := session.(*naver.Session)
a.Equal(s.AuthURL, "ttps://nid.naver.com/oauth2.0/authorize")
a.Equal(s.AccessToken, "1234567890")
}
func provider() *naver.Provider {
return naver.New(os.Getenv("NAVER_KEY"), os.Getenv("NAVER_SECRET"), "/foo")
}
| [
"\"NAVER_KEY\"",
"\"NAVER_SECRET\"",
"\"NAVER_KEY\"",
"\"NAVER_KEY\"",
"\"NAVER_SECRET\""
]
| []
| [
"NAVER_KEY",
"NAVER_SECRET"
]
| [] | ["NAVER_KEY", "NAVER_SECRET"] | go | 2 | 0 | |
Solutions/Lookout/Data Connectors/LookoutAPISentinelConnector/__init__.py | import requests
import json
import datetime
import azure.functions as func
import base64
import hmac
import hashlib
import os
import logging
import re
import threading
from .mes_request import MESRequest
#Azure WorkSpace credentials, if not saved by an Keyerror Exception has been raised
customer_id = os.environ['WorkspaceID']
shared_key = os.environ['WorkspaceKey']
#Azure Secret client setting
keyVaultName = str(os.environ['KeyVaultName'])
KVUri = "https://" + keyVaultName + ".vault.azure.net/"
#RISK MES API credentials
lookout_mes_uri = "https://api.lookout.com"
ent_name = os.environ.get('EnterpriseName')
api_key = os.environ.get('ApiKey')
log_type = 'Lookout'
logAnalyticsUri = os.environ.get('logAnalyticsUri')
if ((logAnalyticsUri in (None, '') or str(logAnalyticsUri).isspace())):
logAnalyticsUri = 'https://' + customer_id + '.ods.opinsights.azure.com'
pattern = r"https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$"
match = re.match(pattern,str(logAnalyticsUri))
if(not match):
raise Exception("Invalid Log Analytics Uri.")
def build_signature(customer_id, shared_key, date, content_length, method, content_type, resource):
'''
Build API Signature required to send events to Sentinel
- Returns auth signature
'''
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(customer_id,encoded_hash)
return authorization
def post_data(body):
'''
Method to send data to sentinel via POST HTTP Request
'''
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = build_signature(customer_id, shared_key, rfc1123date, content_length, method, content_type, resource)
uri = logAnalyticsUri + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
response = requests.post(uri,data=body, headers=headers)
if (response.status_code >= 200 and response.status_code <= 299):
return response.status_code
else:
logging.warn("Events are not processed into Azure. Response code: {}".format(response.status_code))
return None
def single_ent_events(KVUri= None, ent_name= None, api_key= None, lookout_mes_uri= None, ent_index= 0):
'''
Fetching events for an ENT and syncing fetched events into sentinel
'''
logging.info("Events fetching for ent_name %s..." % str(ent_name))
mes = MESRequest(lookout_mes_uri, ent_name, api_key, KVUri, ent_index)
events = mes.get_events()
if events and len(events) > 0:
logging.info("Got events")
logging.info("Processing {} events".format(len(events)))
post_status_code = post_data(json.dumps(events))
if post_status_code is not None:
logging.info("Events processed to Sentinel successfully")
else:
logging.info("Failed to Post Events to Sentinel")
def main(mytimer: func.TimerRequest) -> None:
if mytimer.past_due:
logging.info('The timer is past due!')
logging.info("Application starting")
#Check for MES credentials and fetch events using RISK API
if api_key and ent_name:
logging.info("Fetching RISK API Events")
# For now we are passing hardcoded ent index to 0
#threading mechanism is not supported in Azure function
single_ent_events(KVUri, ent_name, api_key, lookout_mes_uri, 0) | []
| []
| [
"logAnalyticsUri",
"WorkspaceID",
"KeyVaultName",
"EnterpriseName",
"ApiKey",
"WorkspaceKey"
]
| [] | ["logAnalyticsUri", "WorkspaceID", "KeyVaultName", "EnterpriseName", "ApiKey", "WorkspaceKey"] | python | 6 | 0 | |
userbot/__init__.py | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot initialization. """
import os
import re
import time
from sys import version_info
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from dotenv import load_dotenv
from requests import get
from telethon import TelegramClient
from telethon.sessions import StringSession
load_dotenv("config.env")
StartTime = time.time()
# Bot Logs setup:
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get(
"CONSOLE_LOGGER_VERBOSE") or "False")
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 8:
LOGS.info(
"You MUST have a python version of at least 3.8."
"Multiple features depend on this. Bot quitting."
)
quit(1)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = (os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________") or None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
# Telegram App KEY and HASH
API_KEY = os.environ.get("API_KEY") or None
API_HASH = os.environ.get("API_HASH") or None
# Userbot Session String
STRING_SESSION = os.environ.get("STRING_SESSION") or None
# Deezloader
DEEZER_ARL_TOKEN = os.environ.get("DEEZER_ARL_TOKEN") or None
# Logging channel/group ID configuration.
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID") or 0)
# Userbot logging feature switch.
BOTLOG = sb(os.environ.get("BOTLOG") or "False")
if BOTLOG:
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER") or "False")
else:
LOGSPAMMER = False
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN") or "False")
# Heroku Credentials for updater.
HEROKU_MEMEZ = sb(os.environ.get("HEROKU_MEMEZ") or "False")
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME") or None
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY") or None
# Github Credentials for updater and Gitupload.
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME") or None
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN") or None
# Custom (forked) repo URL and BRANCH for updater.
UPSTREAM_REPO_URL = (os.environ.get("UPSTREAM_REPO_URL")
or "https://github.com/BL4CKID/uSeRBeGo.git")
UPSTREAM_REPO_BRANCH = os.environ.get("UPSTREAM_REPO_BRANCH") or "master"
# Console verbose logging
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get(
"CONSOLE_LOGGER_VERBOSE") or "False")
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL") or None
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY") or None
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY") or None
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = os.environ.get("CHROME_DRIVER") or None
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN") or None
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID") or None
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY") or None
WEATHER_DEFLANG = os.environ.get("WEATHER_DEFLANG") or None
# Genius lyrics API
GENIUS = os.environ.get("GENIUS_ACCESS_TOKEN") or None
# Wolfram Alpha API
WOLFRAM_ID = os.environ.get("WOLFRAM_ID") or None
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT") or "False")
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT") or "False")
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME") or None
# Default .alive logo
ALIVE_LOGO = os.environ.get("ALIVE_LOGO") or None
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY") or "")
TZ_NUMBER = int(os.environ.get("TZ_NUMBER") or 1)
# Version of One4uBot
USERBOT_VERSION = os.environ.get("USERBOT_VERSION") or "3.7"
# User Terminal alias
USER_TERM_ALIAS = os.environ.get("USER_TERM_ALIAS") or "One4uBot"
# Updater alias
UPDATER_ALIAS = os.environ.get("UPDATER_ALIAS") or "One4uBot"
# Zipfile module
ZIP_DOWNLOAD_DIRECTORY = os.environ.get("ZIP_DOWNLOAD_DIRECTORY") or "./zips"
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME") or "True")
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX") or None
DEFAULT_BIO = os.environ.get("DEFAULT_BIO") or None
LASTFM_API = os.environ.get("LASTFM_API") or None
LASTFM_SECRET = os.environ.get("LASTFM_SECRET") or None
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME") or None
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD") or None
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
if LASTFM_API is not None:
lastfm = LastFMNetwork(
api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS,
)
else:
lastfm = None
# Google Drive Module
G_DRIVE_DATA = os.environ.get("G_DRIVE_DATA") or None
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID") or None
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET") or None
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA") or None
G_DRIVE_FOLDER_ID = os.environ.get("G_DRIVE_FOLDER_ID") or None
TEMP_DOWNLOAD_DIRECTORY = os.environ.get(
"TMP_DOWNLOAD_DIRECTORY") or "./downloads"
# Uptobox
USR_TOKEN = os.environ.get("USR_TOKEN_UPTOBOX", None)
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists("bin"):
os.mkdir("bin")
binaries = {
"https://raw.githubusercontent.com/adekmaulana/megadown/master/megadown": "bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py": "bin/cmrudl",
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# 'bot' variable
if STRING_SESSION:
# pylint: disable=invalid-name
bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH)
else:
# pylint: disable=invalid-name
bot = TelegramClient("userbot", API_KEY, API_HASH)
async def check_botlog_chatid():
if not BOTLOG:
return
entity = await bot.get_entity(BOTLOG_CHATID)
if entity.default_banned_rights.send_messages:
LOGS.info(
"Your account doesn't have rights to send messages to BOTLOG_CHATID "
"group. Check if you typed the Chat ID correctly.")
quit(1)
with bot:
try:
bot.loop.run_until_complete(check_botlog_chatid())
except BaseException:
LOGS.info(
"BOTLOG_CHATID environment variable isn't a "
"valid entity. Check your environment variables/config.env file."
)
quit(1)
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
ZALG_LIST = {}
ISAFK = False
AFKREASON = None
| []
| []
| [
"GOOGLE_CHROME_BIN",
"G_DRIVE_CLIENT_SECRET",
"COUNTRY",
"LASTFM_API",
"USERBOT_VERSION",
"ANTI_SPAMBOT_SHOUT",
"UPSTREAM_REPO_URL",
"OCR_SPACE_API_KEY",
"BIO_PREFIX",
"LOGSPAMMER",
"TZ_NUMBER",
"UPDATER_ALIAS",
"G_DRIVE_FOLDER_ID",
"LASTFM_PASSWORD",
"DATABASE_URL",
"HEROKU_APP_NAME",
"___________PLOX_______REMOVE_____THIS_____LINE__________",
"ALIVE_LOGO",
"GIT_REPO_NAME",
"HEROKU_API_KEY",
"DEEZER_ARL_TOKEN",
"WEATHER_DEFLANG",
"CHROME_DRIVER",
"WOLFRAM_ID",
"LASTFM_USERNAME",
"HEROKU_MEMEZ",
"G_DRIVE_CLIENT_ID",
"API_KEY",
"USER_TERM_ALIAS",
"PM_AUTO_BAN",
"DEFAULT_BIO",
"ANTI_SPAMBOT",
"OPEN_WEATHER_MAP_APPID",
"USR_TOKEN_UPTOBOX",
"LASTFM_SECRET",
"G_DRIVE_AUTH_TOKEN_DATA",
"ZIP_DOWNLOAD_DIRECTORY",
"UPSTREAM_REPO_BRANCH",
"WEATHER_DEFCITY",
"STRING_SESSION",
"CONSOLE_LOGGER_VERBOSE",
"GITHUB_ACCESS_TOKEN",
"GENIUS_ACCESS_TOKEN",
"BOTLOG_CHATID",
"ALIVE_NAME",
"CLEAN_WELCOME",
"TMP_DOWNLOAD_DIRECTORY",
"G_DRIVE_DATA",
"REM_BG_API_KEY",
"BOTLOG",
"API_HASH"
]
| [] | ["GOOGLE_CHROME_BIN", "G_DRIVE_CLIENT_SECRET", "COUNTRY", "LASTFM_API", "USERBOT_VERSION", "ANTI_SPAMBOT_SHOUT", "UPSTREAM_REPO_URL", "OCR_SPACE_API_KEY", "BIO_PREFIX", "LOGSPAMMER", "TZ_NUMBER", "UPDATER_ALIAS", "G_DRIVE_FOLDER_ID", "LASTFM_PASSWORD", "DATABASE_URL", "HEROKU_APP_NAME", "___________PLOX_______REMOVE_____THIS_____LINE__________", "ALIVE_LOGO", "GIT_REPO_NAME", "HEROKU_API_KEY", "DEEZER_ARL_TOKEN", "WEATHER_DEFLANG", "CHROME_DRIVER", "WOLFRAM_ID", "LASTFM_USERNAME", "HEROKU_MEMEZ", "G_DRIVE_CLIENT_ID", "API_KEY", "USER_TERM_ALIAS", "PM_AUTO_BAN", "DEFAULT_BIO", "ANTI_SPAMBOT", "OPEN_WEATHER_MAP_APPID", "USR_TOKEN_UPTOBOX", "LASTFM_SECRET", "G_DRIVE_AUTH_TOKEN_DATA", "ZIP_DOWNLOAD_DIRECTORY", "UPSTREAM_REPO_BRANCH", "WEATHER_DEFCITY", "STRING_SESSION", "CONSOLE_LOGGER_VERBOSE", "GITHUB_ACCESS_TOKEN", "GENIUS_ACCESS_TOKEN", "BOTLOG_CHATID", "ALIVE_NAME", "CLEAN_WELCOME", "TMP_DOWNLOAD_DIRECTORY", "G_DRIVE_DATA", "REM_BG_API_KEY", "BOTLOG", "API_HASH"] | python | 51 | 0 | |
stdlib/bench_test.go | package stdlib_test
import (
"database/sql"
"fmt"
"os"
"strconv"
"strings"
"testing"
"time"
)
func getSelectRowsCounts(b *testing.B) []int64 {
var rowCounts []int64
{
s := os.Getenv("PGX_BENCH_SELECT_ROWS_COUNTS")
if s != "" {
for _, p := range strings.Split(s, " ") {
n, err := strconv.ParseInt(p, 10, 64)
if err != nil {
b.Fatalf("Bad PGX_BENCH_SELECT_ROWS_COUNTS value: %v", err)
}
rowCounts = append(rowCounts, n)
}
}
}
if len(rowCounts) == 0 {
rowCounts = []int64{1, 10, 100, 1000}
}
return rowCounts
}
type BenchRowSimple struct {
ID int32
FirstName string
LastName string
Sex string
BirthDate time.Time
Weight int32
Height int32
UpdateTime time.Time
}
func BenchmarkSelectRowsScanSimple(b *testing.B) {
db := openDB(b)
defer closeDB(b, db)
rowCounts := getSelectRowsCounts(b)
for _, rowCount := range rowCounts {
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
br := &BenchRowSimple{}
for i := 0; i < b.N; i++ {
rows, err := db.Query("select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(1, $1) n", rowCount)
if err != nil {
b.Fatal(err)
}
for rows.Next() {
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.UpdateTime)
}
if rows.Err() != nil {
b.Fatal(rows.Err())
}
}
})
}
}
type BenchRowNull struct {
ID sql.NullInt32
FirstName sql.NullString
LastName sql.NullString
Sex sql.NullString
BirthDate sql.NullTime
Weight sql.NullInt32
Height sql.NullInt32
UpdateTime sql.NullTime
}
func BenchmarkSelectRowsScanNull(b *testing.B) {
db := openDB(b)
defer closeDB(b, db)
rowCounts := getSelectRowsCounts(b)
for _, rowCount := range rowCounts {
b.Run(fmt.Sprintf("%d rows", rowCount), func(b *testing.B) {
br := &BenchRowSimple{}
for i := 0; i < b.N; i++ {
rows, err := db.Query("select n, 'Adam', 'Smith ' || n, 'male', '1952-06-16'::date, 258, 72, '2001-01-28 01:02:03-05'::timestamptz from generate_series(100000, 100000 + $1) n", rowCount)
if err != nil {
b.Fatal(err)
}
for rows.Next() {
rows.Scan(&br.ID, &br.FirstName, &br.LastName, &br.Sex, &br.BirthDate, &br.Weight, &br.Height, &br.UpdateTime)
}
if rows.Err() != nil {
b.Fatal(rows.Err())
}
}
})
}
}
| [
"\"PGX_BENCH_SELECT_ROWS_COUNTS\""
]
| []
| [
"PGX_BENCH_SELECT_ROWS_COUNTS"
]
| [] | ["PGX_BENCH_SELECT_ROWS_COUNTS"] | go | 1 | 0 | |
src/runtime/export_unix_test.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
package runtime
import "unsafe"
var NonblockingPipe = nonblockingPipe
var Closeonexec = closeonexec
func sigismember(mask *sigset, i int) bool {
clear := *mask
sigdelset(&clear, i)
return clear != *mask
}
func Sigisblocked(i int) bool {
var sigmask sigset
sigprocmask(_SIG_SETMASK, nil, &sigmask)
return sigismember(&sigmask, i)
}
type M = m
var waitForSigusr1 struct {
rdpipe int32
wrpipe int32
mID int64
}
// WaitForSigusr1 blocks until a SIGUSR1 is received. It calls ready
// when it is set up to receive SIGUSR1. The ready function should
// cause a SIGUSR1 to be sent. The r and w arguments are a pipe that
// the signal handler can use to report when the signal is received.
//
// Once SIGUSR1 is received, it returns the ID of the current M and
// the ID of the M the SIGUSR1 was received on. If the caller writes
// a non-zero byte to w, WaitForSigusr1 returns immediately with -1, -1.
func WaitForSigusr1(r, w int32, ready func(mp *M)) (int64, int64) {
lockOSThread()
// Make sure we can receive SIGUSR1.
unblocksig(_SIGUSR1)
waitForSigusr1.rdpipe = r
waitForSigusr1.wrpipe = w
mp := getg().m
testSigusr1 = waitForSigusr1Callback
ready(mp)
// Wait for the signal. We use a pipe rather than a note
// because write is always async-signal-safe.
entersyscallblock()
var b byte
read(waitForSigusr1.rdpipe, noescape(unsafe.Pointer(&b)), 1)
exitsyscall()
gotM := waitForSigusr1.mID
testSigusr1 = nil
unlockOSThread()
if b != 0 {
// timeout signal from caller
return -1, -1
}
return mp.id, gotM
}
// waitForSigusr1Callback is called from the signal handler during
// WaitForSigusr1. It must not have write barriers because there may
// not be a P.
//
//go:nowritebarrierrec
func waitForSigusr1Callback(gp *g) bool {
if gp == nil || gp.m == nil {
waitForSigusr1.mID = -1
} else {
waitForSigusr1.mID = gp.m.id
}
b := byte(0)
write(uintptr(waitForSigusr1.wrpipe), noescape(unsafe.Pointer(&b)), 1)
return true
}
// SendSigusr1 sends SIGUSR1 to mp.
func SendSigusr1(mp *M) {
signalM(mp, _SIGUSR1)
}
| []
| []
| []
| [] | [] | go | null | null | null |
main.py | from urllib.parse import urlparse
from dotenv import load_dotenv
import requests
import os
import argparse
def shorten_link(token, url):
response = requests.post(
"https://api-ssl.bitly.com/v4/bitlinks",
headers={"Authorization": "Bearer {}".format(token)},
json={"long_url": url})
response.raise_for_status()
return response.json()["link"]
def count_clicks(token, link):
response = requests.get(
"https://api-ssl.bitly.com/v4/bitlinks/{0}{1}/clicks/summary"
.format(link.netloc, link.path),
headers={"Authorization": "Bearer {}".format(token)})
response.raise_for_status()
return response.json()["total_clicks"]
def is_bitlink(token, link):
response = requests.get(
"https://api-ssl.bitly.com/v4/bitlinks/{0}{1}"
.format(link.netloc, link.path),
headers={"Authorization": "Bearer {}".format(token)})
return response.ok
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Программа для сокращения ссылок или "
"подсчёта количества переходов для bitlink")
parser.add_argument("url", help="Введите URL или bitlink")
args = parser.parse_args()
link = args.url
parsed_bitlink = urlparse(link)
load_dotenv()
token = os.environ["BITLY_TOKEN"]
try:
if is_bitlink(token, parsed_bitlink):
clicks_count = count_clicks(token, parsed_bitlink)
print("Количество переходов по вашей ссылке: ", clicks_count)
else:
bitlink = shorten_link(token, link)
print("Сокращенная ссылка: ", bitlink)
except:
print("Вы ввели неправильную ссылку")
| []
| []
| [
"BITLY_TOKEN"
]
| [] | ["BITLY_TOKEN"] | python | 1 | 0 | |
km/api/request.go | package api
import (
"encoding/json"
"github.com/pkg/errors"
)
type Request struct {
Type string `json:"type"`
Payload interface{} `json:"payload"`
}
type DiscoveryRequest struct {}
type DiscoveryResponse struct {}
type ConfigRequest struct {
}
type ConfigResponse struct {
Version string `json:"version"`
Config ConfigPublic `json:"config"`
}
type DirectSamlAuthRequest struct {
RequestedRole string `json:"requested_role"`
SAMLResponse string `json:"saml_response"`
SigAlg string `json:"sig_alg"`
Signature string `json:"signature"`
RelayState *string `json:"relay_state,omitempty"`
}
type DirectOidcAuthRequest struct {
}
type DirectAuthResponse struct {
Credentials map[string][]byte `json:"result"`
}
type WorkflowStartRequest struct {
}
type WorkflowStartResponse struct {
IssuingNonce string `json:"issuing_nonce"`
IdpNonce string `json:"idp_nonce"`
}
type WorkflowAuthRequest struct {
Username string `json:"username"` // TODO: remove?
Role string `json:"role"`
IssuingNonce string `json:"issuing_nonce"`
IdpNonce string `json:"idp_nonce"`
Assertions []string `json:"assertions"`
}
type WorkflowAuthResponse struct {
Credentials []Cred `json:"credentials"`
}
func (c *Request) UnmarshalJSON(data []byte) error {
var t struct {
Type string `json:"type"`
Payload json.RawMessage `json:"payload"`
}
err := json.Unmarshal(data, &t)
if err != nil {
return err
}
c.Type = t.Type
var payload interface{}
switch c.Type {
case "discovery":
payload = &DiscoveryRequest{}
case "config":
payload = &ConfigRequest{}
case "direct_saml_auth":
payload = &DirectSamlAuthRequest{}
case "direct_oidc_auth":
payload = &DirectOidcAuthRequest{}
case "workflow_start":
payload = &WorkflowStartRequest{}
case "workflow_auth":
payload = &WorkflowAuthRequest{}
default:
return errors.New("unknown operation type: " + c.Type)
}
err = json.Unmarshal(t.Payload, payload)
if err != nil {
return err
}
c.Payload = payload
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
processing.py |
# Use audio loader explicitly for loading audio waveform :
from spleeter.separator import Separator
import ipdb
import glob
import time
import random
import warnings
import argparse
import sys
import traceback
warnings.filterwarnings('ignore')
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
def check_exist(folder):
if not os.path.exists(folder):
os.mkdir(folder)
def check_done_or_not(filepath, output_path):
new_filepath = os.path.join(output_path, filepath.split('/')[-1])[:-4]
if os.path.exists(new_filepath):
PROCESS = False
else:
PROCESS = True
return PROCESS
def main(file_path, output_dir, use_MWF):
st = time.time()
separator = Separator('spleeter:4stems', MWF=use_MWF)
# List of input to process.
audio_descriptors = glob.glob(os.path.join(file_path, '*.mp3'))
print('total {} songs to process'.format(len(audio_descriptors)))
random.shuffle(audio_descriptors)
# Batch separation export.
count = 0
for i in audio_descriptors:
print('='*100)
print(i)
PROCESS = check_done_or_not(i, output_dir)
print('PROCESS:', PROCESS)
if PROCESS:
try:
separator.separate_to_file(i, output_dir, synchronous=True)
except Exception as e:
# print(e)
error_class = e.__class__.__name__ #取得錯誤類型
detail = e.args[0] #取得詳細內容
cl, exc, tb = sys.exc_info() #取得Call Stack
lastCallStack = traceback.extract_tb(tb)[-1] #取得Call Stack的最後一筆資料
fileName = lastCallStack[0] #取得發生的檔案名稱
lineNum = lastCallStack[1] #取得發生的行號
funcName = lastCallStack[2] #取得發生的函數名稱
errMsg = "File \"{}\", line {}, in {}: [{}] {}".format(fileName, lineNum, funcName, error_class, detail)
print(errMsg)
count += 1
else:
print('Already processed:', i)
ed = time.time()
# Wait for batch to finish.
separator.join()
print('total spend {}s for {} song. average: {}s'.format(ed - st, count, (ed-st)/count))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='spleeter')
parser.add_argument('--CUDA', default = '0', help='GPU')
parser.add_argument('--MWF', default = True, help='<<True>> for better but slower model')
parser.add_argument('--fp', default = '/volume/youtube-audios/mp3/')
parser.add_argument('--op', default = '/volume/youtube-audios/mp3_sep/')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.CUDA
file_path = args.fp
output_dir = args.op
use_MWF = args.MWF
check_exist(output_dir)
main(file_path, output_dir, use_MWF)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
soracom/generated/cmd/stats_air_sims_get.go | // Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"fmt"
"net/url"
"os"
"github.com/spf13/cobra"
)
// StatsAirSimsGetCmdPeriod holds value of 'period' option
var StatsAirSimsGetCmdPeriod string
// StatsAirSimsGetCmdSimId holds value of 'simId' option
var StatsAirSimsGetCmdSimId string
// StatsAirSimsGetCmdFrom holds value of 'from' option
var StatsAirSimsGetCmdFrom int64
// StatsAirSimsGetCmdTo holds value of 'to' option
var StatsAirSimsGetCmdTo int64
func init() {
StatsAirSimsGetCmd.Flags().StringVar(&StatsAirSimsGetCmdPeriod, "period", "", TRAPI("Units of aggregate data. For minutes, the interval is around 5 minutes."))
StatsAirSimsGetCmd.Flags().StringVar(&StatsAirSimsGetCmdSimId, "sim-id", "", TRAPI("SIM ID"))
StatsAirSimsGetCmd.Flags().Int64Var(&StatsAirSimsGetCmdFrom, "from", 0, TRAPI("Start time in unixtime for the aggregate data."))
StatsAirSimsGetCmd.Flags().Int64Var(&StatsAirSimsGetCmdTo, "to", 0, TRAPI("End time in unixtime for the aggregate data."))
StatsAirSimsCmd.AddCommand(StatsAirSimsGetCmd)
}
// StatsAirSimsGetCmd defines 'get' subcommand
var StatsAirSimsGetCmd = &cobra.Command{
Use: "get",
Short: TRAPI("/stats/air/sims/{simId}:get:summary"),
Long: TRAPI(`/stats/air/sims/{simId}:get:description`),
RunE: func(cmd *cobra.Command, args []string) error {
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectStatsAirSimsGetCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectStatsAirSimsGetCmdParams(ac *apiClient) (*apiParams, error) {
if StatsAirSimsGetCmdPeriod == "" {
return nil, fmt.Errorf("required parameter '%s' is not specified", "period")
}
if StatsAirSimsGetCmdSimId == "" {
return nil, fmt.Errorf("required parameter '%s' is not specified", "sim-id")
}
if StatsAirSimsGetCmdFrom == 0 {
return nil, fmt.Errorf("required parameter '%s' is not specified", "from")
}
if StatsAirSimsGetCmdTo == 0 {
return nil, fmt.Errorf("required parameter '%s' is not specified", "to")
}
return &apiParams{
method: "GET",
path: buildPathForStatsAirSimsGetCmd("/stats/air/sims/{simId}"),
query: buildQueryForStatsAirSimsGetCmd(),
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForStatsAirSimsGetCmd(path string) string {
escapedSimId := url.PathEscape(StatsAirSimsGetCmdSimId)
path = strReplace(path, "{"+"simId"+"}", escapedSimId, -1)
return path
}
func buildQueryForStatsAirSimsGetCmd() url.Values {
result := url.Values{}
if StatsAirSimsGetCmdPeriod != "" {
result.Add("period", StatsAirSimsGetCmdPeriod)
}
if StatsAirSimsGetCmdFrom != 0 {
result.Add("from", sprintf("%d", StatsAirSimsGetCmdFrom))
}
if StatsAirSimsGetCmdTo != 0 {
result.Add("to", sprintf("%d", StatsAirSimsGetCmdTo))
}
return result
}
| [
"\"SORACOM_VERBOSE\""
]
| []
| [
"SORACOM_VERBOSE"
]
| [] | ["SORACOM_VERBOSE"] | go | 1 | 0 | |
test/performance/benchmarks/deployment-probe/continuous/main.go | /*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"flag"
"io/ioutil"
"log"
"os"
"path/filepath"
"time"
"github.com/google/mako/go/quickstore"
"sigs.k8s.io/yaml"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/kmeta"
"knative.dev/pkg/ptr"
"knative.dev/pkg/signals"
netv1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1"
networkingclient "knative.dev/networking/pkg/client/injection/client"
"knative.dev/pkg/test/mako"
autoscalingv1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1"
v1 "knative.dev/serving/pkg/apis/serving/v1"
servingclient "knative.dev/serving/pkg/client/injection/client"
)
var (
template = flag.String("template", "", "The service template to load from kodata/")
duration = flag.Duration("duration", 25*time.Minute, "The duration of the benchmark to run.")
frequency = flag.Duration("frequency", 5*time.Second, "The frequency at which to create services.")
)
func readTemplate() (*v1.Service, error) {
path := filepath.Join(os.Getenv("KO_DATA_PATH"), *template+"-template.yaml")
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
svc := &v1.Service{}
if err := yaml.Unmarshal(b, svc); err != nil {
return nil, err
}
svc.OwnerReferences = []metav1.OwnerReference{{
APIVersion: "v1",
Kind: "Pod",
Name: os.Getenv("POD_NAME"),
UID: types.UID(os.Getenv("POD_UID")),
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
}}
return svc, nil
}
func handle(q *quickstore.Quickstore, svc kmeta.Accessor, status duckv1.Status,
seen sets.String, metric string) {
if seen.Has(svc.GetName()) {
return
}
cc := status.GetCondition(apis.ConditionReady)
if cc == nil || cc.Status == corev1.ConditionUnknown {
return
}
seen.Insert(svc.GetName())
created := svc.GetCreationTimestamp().Time
ready := cc.LastTransitionTime.Inner.Time
elapsed := ready.Sub(created)
if cc.Status == corev1.ConditionTrue {
q.AddSamplePoint(mako.XTime(created), map[string]float64{
metric: elapsed.Seconds(),
})
log.Print("Ready: ", svc.GetName())
} else if cc.Status == corev1.ConditionFalse {
q.AddError(mako.XTime(created), cc.Message)
log.Printf("Not Ready: %s; %s: %s", svc.GetName(), cc.Reason, cc.Message)
}
}
func main() {
flag.Parse()
// We want this for properly handling Kubernetes container lifecycle events.
ctx := signals.NewContext()
tmpl, err := readTemplate()
if err != nil {
log.Fatalf("Unable to read template %s: %v", *template, err)
}
// We cron every 30 minutes, so make sure that we don't severely overrun to
// limit how noisy a neighbor we can be.
ctx, cancel := context.WithTimeout(ctx, *duration)
defer cancel()
// Tag this run with the various flag values.
tags := []string{
"template=" + *template,
"duration=" + duration.String(),
"frequency=" + frequency.String(),
}
mc, err := mako.Setup(ctx, tags...)
if err != nil {
log.Fatal("Failed to setup mako: ", err)
}
q, qclose, ctx := mc.Quickstore, mc.ShutDownFunc, mc.Context
// Use a fresh context here so that our RPC to terminate the sidecar
// isn't subject to our timeout (or we won't shut it down when we time out)
defer qclose(context.Background())
sc := servingclient.Get(ctx)
cleanup := func() error {
return sc.ServingV1().Services(tmpl.Namespace).DeleteCollection(
context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{})
}
defer cleanup()
// Wrap fatalf in a helper or our sidecar will live forever.
fatalf := func(f string, args ...interface{}) {
qclose(context.Background())
cleanup()
log.Fatalf(f, args...)
}
// Set up the threshold analyzers for the selected benchmark. This will
// cause Mako/Quickstore to analyze the results we are storing and flag
// things that are outside of expected bounds.
q.Input.ThresholdInputs = append(q.Input.ThresholdInputs,
newDeploy95PercentileLatency(tags...),
newReadyDeploymentCount(tags...),
)
if err := cleanup(); err != nil {
fatalf("Error cleaning up services: %v", err)
}
lo := metav1.ListOptions{TimeoutSeconds: ptr.Int64(int64(duration.Seconds()))}
// TODO(mattmoor): We could maybe use a duckv1.KResource to eliminate this boilerplate.
serviceWI, err := sc.ServingV1().Services(tmpl.Namespace).Watch(ctx, lo)
if err != nil {
fatalf("Unable to watch services: %v", err)
}
defer serviceWI.Stop()
serviceSeen := sets.String{}
configurationWI, err := sc.ServingV1().Configurations(tmpl.Namespace).Watch(ctx, lo)
if err != nil {
fatalf("Unable to watch configurations: %v", err)
}
defer configurationWI.Stop()
configurationSeen := sets.String{}
routeWI, err := sc.ServingV1().Routes(tmpl.Namespace).Watch(ctx, lo)
if err != nil {
fatalf("Unable to watch routes: %v", err)
}
defer routeWI.Stop()
routeSeen := sets.String{}
revisionWI, err := sc.ServingV1().Revisions(tmpl.Namespace).Watch(ctx, lo)
if err != nil {
fatalf("Unable to watch revisions: %v", err)
}
defer revisionWI.Stop()
revisionSeen := sets.String{}
nc := networkingclient.Get(ctx)
ingressWI, err := nc.NetworkingV1alpha1().Ingresses(tmpl.Namespace).Watch(ctx, lo)
if err != nil {
fatalf("Unable to watch ingresss: %v", err)
}
defer ingressWI.Stop()
ingressSeen := sets.String{}
sksWI, err := nc.NetworkingV1alpha1().ServerlessServices(tmpl.Namespace).Watch(ctx, lo)
if err != nil {
fatalf("Unable to watch skss: %v", err)
}
defer sksWI.Stop()
sksSeen := sets.String{}
paWI, err := sc.AutoscalingV1alpha1().PodAutoscalers(tmpl.Namespace).Watch(ctx, lo)
if err != nil {
fatalf("Unable to watch pas: %v", err)
}
defer paWI.Stop()
paSeen := sets.String{}
tick := time.NewTicker(*frequency)
func() {
for {
select {
case <-ctx.Done():
// If we timeout or the pod gets shutdown via SIGTERM then start to
// clean thing up.
return
case ts := <-tick.C:
svc, err := sc.ServingV1().Services(tmpl.Namespace).Create(ctx, tmpl, metav1.CreateOptions{})
if err != nil {
q.AddError(mako.XTime(ts), err.Error())
log.Println("Error creating service:", err)
break
}
log.Println("Created:", svc.Name)
case event := <-serviceWI.ResultChan():
if event.Type != watch.Modified {
// Skip events other than modifications
break
}
svc := event.Object.(*v1.Service)
handle(q, svc, svc.Status.Status, serviceSeen, "dl")
case event := <-configurationWI.ResultChan():
if event.Type != watch.Modified {
// Skip events other than modifications
break
}
cfg := event.Object.(*v1.Configuration)
handle(q, cfg, cfg.Status.Status, configurationSeen, "cl")
case event := <-routeWI.ResultChan():
if event.Type != watch.Modified {
// Skip events other than modifications
break
}
rt := event.Object.(*v1.Route)
handle(q, rt, rt.Status.Status, routeSeen, "rl")
case event := <-revisionWI.ResultChan():
if event.Type != watch.Modified {
// Skip events other than modifications
break
}
rev := event.Object.(*v1.Revision)
handle(q, rev, rev.Status.Status, revisionSeen, "rvl")
case event := <-ingressWI.ResultChan():
if event.Type != watch.Modified {
// Skip events other than modifications
break
}
ing := event.Object.(*netv1alpha1.Ingress)
handle(q, ing, ing.Status.Status, ingressSeen, "il")
case event := <-sksWI.ResultChan():
if event.Type != watch.Modified {
// Skip events other than modifications
break
}
ing := event.Object.(*netv1alpha1.ServerlessService)
handle(q, ing, ing.Status.Status, sksSeen, "sksl")
case event := <-paWI.ResultChan():
if event.Type != watch.Modified {
// Skip events other than modifications
break
}
pa := event.Object.(*autoscalingv1alpha1.PodAutoscaler)
handle(q, pa, pa.Status.Status, paSeen, "pal")
}
}
}()
// Commit this benchmark run to Mako!
out, err := q.Store()
if err != nil {
fatalf("q.Store error: %v: %v", out, err)
}
log.Printf("Done! Run: %s\n", out.GetRunChartLink())
}
| [
"\"KO_DATA_PATH\"",
"\"POD_NAME\"",
"\"POD_UID\""
]
| []
| [
"POD_UID",
"POD_NAME",
"KO_DATA_PATH"
]
| [] | ["POD_UID", "POD_NAME", "KO_DATA_PATH"] | go | 3 | 0 | |
federated_learning/FedaGrac/param_server.py | import time, os, json, time
import numpy as np
import torch
from torch._C import device
import torch.distributed as dist
from torch.autograd import Variable
def test_model(model, test_data, dev):
correct, total = 0, 0
model.eval()
with torch.no_grad():
for data, target in test_data:
data, target = Variable(data).cuda(dev), Variable(target).cuda(dev)
output = model(data)
# get the index of the max log-probability
_, predictions = output.max(1)
total += predictions.size(0)
correct += torch.sum(predictions == target.data).float()
acc = correct / total
return acc.item()
def update_model(model, global_mu, size, cpu, gpu, args):
# all_param = model.state_dict()
# receive the parameter variance from workers
for param in model.parameters():
tensor = torch.zeros_like(param.data, device=cpu)
gather_list = [torch.zeros_like(param.data, device=cpu) for _ in range(size)]
dist.gather(tensor=tensor, gather_list=gather_list, dst=0)
param.data = torch.zeros_like(param.data, device=gpu)
for w in range(size):
# Suppose the model received from clients are well processed
param.data = param.data + gather_list[w].clone().detach().to(gpu)
# receive averaged K from workers
avg_k_list = [torch.tensor(0.0) for _ in range(size)]
dist.gather(tensor=torch.tensor(0.0), gather_list=avg_k_list, dst=0)
avg_k = sum(avg_k_list)
print('Averaged K:', avg_k)
# send averaged K to workers
avg_k_list = [avg_k if args.avg_k==-1 else torch.tensor(float(args.avg_k)) for _ in range(size)]
dist.scatter(tensor=avg_k, scatter_list=avg_k_list)
# receive the mu from clients
for idx, param in enumerate(global_mu):
tensor = torch.zeros_like(param.data, device=cpu)
gather_list = [torch.zeros_like(param.data, device=cpu) for _ in range(size)]
dist.gather(tensor=tensor, gather_list=gather_list, dst=0)
global_mu[idx] = torch.zeros_like(param.data, device=gpu)
for w in range(size):
# Suppose the model received from clients are well processed
global_mu[idx] = global_mu[idx] + gather_list[w].clone().detach().to(gpu)
# send the parameters to workers
for param in model.parameters():
tmp_p = param.clone().detach().to(cpu)
scatter_p_list = [tmp_p for _ in range(size)]
dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list)
if torch.sum(torch.isnan(tmp_p)) > 0:
print("NaN occurs. Terminate. ")
exit(-1)
# send global_mu to workers
for param in global_mu:
tmp_p = param.clone().detach().to(cpu)
scatter_p_list = [tmp_p for _ in range(size)]
dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list)
# model.load_state_dict(all_param)
def run(size, model, args, test_data, f_result, cpu, gpu):
# Receive the weights from all clients
temp_w = torch.tensor([0.0 for _ in range(args.num_workers+1)])
weights = [torch.tensor([0.0 for _ in range(args.num_workers+1)]) for _ in range(size)]
dist.gather(tensor=temp_w, gather_list=weights, dst=0)
weights = sum(weights)
weights = weights / torch.sum(weights)
print('weights:', weights)
# send weights to clients
weights_list = [weights.clone().detach().to(cpu) for _ in range(size)]
dist.scatter(tensor=temp_w, scatter_list=weights_list)
start = time.time()
model = model.cuda(gpu)
for p in model.parameters():
tmp_p = p.clone().detach().to(cpu)
scatter_p_list = [tmp_p for _ in range(size)]
# dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list, group=group)
dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list)
global_mu = [torch.zeros_like(param.data, device=gpu) for param in model.parameters()]
print('Model has sent to all nodes! ')
print('Begin!')
np.random.seed(42)
for t in range(args.T):
model.train()
# send participants to all clients
participants = np.random.choice(np.arange(len(weights)), size=args.num_part, replace=True, p=weights.numpy()) if args.partial else np.arange(len(weights))
print('Participants list:', list(participants))
participants = torch.tensor(participants).to(cpu)
part_list = [participants for _ in range(size)]
dist.scatter(tensor=participants, scatter_list=part_list)
# receive the list of train loss from workers
info_list = [torch.tensor(0.0) for _ in range(size)]
# dist.gather(tensor=torch.tensor([0.0]), gather_list=info_list, group=group)
dist.gather(tensor=torch.tensor(0.0), gather_list=info_list, dst=0)
# info_list = np.concatenate([list(a) for a in info_list])
# train_loss = sum(info_list).item() / args.num_part if args.partial else sum(info_list * weights).item()
train_loss = sum(info_list).item()
# if args.partial:
# update_model_partial(model, size, cpu, gpu, args.num_part)
# else:
# update_model_full(model, size, cpu, gpu, weights)
update_model(model, global_mu, size, cpu, gpu, args)
timestamp = time.time() - start
test_acc = test_model(model, test_data, gpu)
print("Epoch: {}\t\tLoss: {}\t\tAccuracy: {}".format(t, train_loss, test_acc))
f_result.write(str(t) + "\t" + str(timestamp) + "\t" + str(train_loss) + "\t" + str(test_acc) + "\n")
f_result.flush()
def init_processes(rank, size, model, args, test_data, cpu, gpu, backend='mpi'):
if backend == 'mpi':
dist.init_process_group(backend)
elif backend == 'gloo':
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=size)
if not os.path.exists(args.result):
os.makedirs(args.result)
result_file = os.path.join(args.result, '{}.txt'.format(len(os.listdir(args.result))))
f_result = open(result_file, 'w')
f_result.write(json.dumps(vars(args)) + '\n')
run(size, model, args, test_data, f_result, cpu, gpu) | []
| []
| [
"MASTER_ADDR",
"MASTER_PORT"
]
| [] | ["MASTER_ADDR", "MASTER_PORT"] | python | 2 | 0 | |
notebooks/utils/data_cube_utilities/dask.py | import os
import psutil
import dask
from datacube.utils.dask import start_local_dask
from datacube.utils.rio import configure_s3_access
def create_local_dask_cluster(spare_mem='3Gb',
aws_unsigned= True,
display_client=True,
start_local_dask_kwargs=None,
configure_s3_access_kwargs=None):
"""
Credit belongs to Digital Earth Africa:
https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/blob/master/Scripts/deafrica_dask.py
Using the datacube utils function 'start_local_dask', generate
a local dask cluster.
Example use :
import sys
sys.path.append("../Scripts")
from deafrica_dask import create_local_dask_cluster
create_local_dask_cluster(spare_mem='4Gb')
Parameters
----------
spare_mem : String, optional
The amount of memory, in Gb, to leave for the notebook to run.
This memory will not be used by the cluster. e.g '3Gb'
aws_unsigned : Bool, optional
This parameter determines if credentials for S3 access are required and
passes them on to processing threads, either local or on dask cluster.
Set to True if working with publicly available datasets, and False if
working with private data. i.e if loading Landsat C2 provisional data set
this to aws_unsigned=False
display_client : Bool, optional
An optional boolean indicating whether to display a summary of
the dask client, including a link to monitor progress of the
analysis. Set to False to hide this display.
start_local_dask_kwargs: dict, optional
Keyword arguments for the function `datacube.utils.dask.start_local_dask`, which
creates the Dask client.
Some settings to configure include the number of workers, number of threads per worker, and the memory limit.
configure_s3_access_kwargs: dict, optional
Keyword arguments for the function `datacube.utils.rio.configure_s3_access`, which
configures the Dask to access S3.
"""
start_local_dask_kwargs = {} if start_local_dask_kwargs is None else start_local_dask_kwargs
configure_s3_access_kwargs = {} if configure_s3_access_kwargs is None else configure_s3_access_kwargs
# configure dashboard link to go over proxy
dask.config.set({"distributed.dashboard.link":
os.environ.get('JUPYTERHUB_SERVICE_PREFIX', '/')+"proxy/{port}/status"})
# start up a local cluster
num_physical_cpu = psutil.cpu_count(logical=False)
num_logical_cpu = psutil.cpu_count(logical=True)
start_local_dask_kwargs['n_workers'] = num_physical_cpu - 1
start_local_dask_kwargs['threads_per_worker'] = int(num_logical_cpu / num_physical_cpu)
client = start_local_dask(mem_safety_margin=spare_mem, **start_local_dask_kwargs)
## Configure GDAL for s3 access
configure_s3_access(aws_unsigned=aws_unsigned,
client=client, **configure_s3_access_kwargs)
return client | []
| []
| [
"JUPYTERHUB_SERVICE_PREFIX"
]
| [] | ["JUPYTERHUB_SERVICE_PREFIX"] | python | 1 | 0 | |
build/builder.go | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// package build implements a more convenient interface for building
// zoekt indices.
package build
import (
"crypto/sha1"
"fmt"
"io"
"io/ioutil"
"log"
"net/url"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"runtime/pprof"
"sort"
"strings"
"sync"
"github.com/google/zoekt"
"github.com/google/zoekt/ctags"
)
var DefaultDir = filepath.Join(os.Getenv("HOME"), ".zoekt")
// Branch describes a single branch version.
type Branch struct {
Name string
Version string
}
// Options sets options for the index building.
type Options struct {
// IndexDir is a directory that holds *.zoekt index files.
IndexDir string
// SizeMax is the maximum file size
SizeMax int
// Parallelism is the maximum number of shards to index in parallel
Parallelism int
// ShardMax sets the maximum corpus size for a single shard
ShardMax int
// RepositoryDescription holds names and URLs for the repository.
RepositoryDescription zoekt.Repository
// SubRepositories is a path => sub repository map.
SubRepositories map[string]*zoekt.Repository
// Path to exuberant ctags binary to run
CTags string
// If set, ctags must succeed.
CTagsMustSucceed bool
// Write memory profiles to this file.
MemProfile string
}
// Builder manages (parallel) creation of uniformly sized shards. The
// builder buffers up documents until it collects enough documents and
// then builds a shard and writes.
type Builder struct {
opts Options
throttle chan int
nextShardNum int
todo []*zoekt.Document
size int
parser ctags.Parser
building sync.WaitGroup
errMu sync.Mutex
buildError error
// temp name => final name for finished shards. We only rename
// them once all shards succeed to avoid Frankstein corpuses.
finishedShards map[string]string
}
type finishedShard struct {
temp, final string
}
// SetDefaults sets reasonable default options.
func (o *Options) SetDefaults() {
if o.CTags == "" {
ctags, err := exec.LookPath("universal-ctags")
if err == nil {
o.CTags = ctags
}
}
if o.CTags == "" {
ctags, err := exec.LookPath("ctags-exuberant")
if err == nil {
o.CTags = ctags
}
}
if o.Parallelism == 0 {
o.Parallelism = 1
}
if o.SizeMax == 0 {
o.SizeMax = 128 << 10
}
if o.ShardMax == 0 {
o.ShardMax = 128 << 20
}
if o.RepositoryDescription.Name == "" && o.RepositoryDescription.URL != "" {
parsed, _ := url.Parse(o.RepositoryDescription.URL)
if parsed != nil {
o.RepositoryDescription.Name = filepath.Join(parsed.Host, parsed.Path)
}
}
}
func hashString(s string) string {
h := sha1.New()
io.WriteString(h, s)
return fmt.Sprintf("%x", h.Sum(nil))
}
// ShardName returns the name the given index shard.
func (o *Options) shardName(n int) string {
abs := url.QueryEscape(o.RepositoryDescription.Name)
if len(abs) > 200 {
abs = abs[:200] + hashString(abs)[:8]
}
return filepath.Join(o.IndexDir,
fmt.Sprintf("%s_v%d.%05d.zoekt", abs, zoekt.IndexFormatVersion, n))
}
// IndexVersions returns the versions as present in the index, for
// implementing incremental indexing.
func (o *Options) IndexVersions() []zoekt.RepositoryBranch {
fn := o.shardName(0)
f, err := os.Open(fn)
if err != nil {
return nil
}
iFile, err := zoekt.NewIndexFile(f)
if err != nil {
return nil
}
defer iFile.Close()
repo, index, err := zoekt.ReadMetadata(iFile)
if err != nil {
return nil
}
if index.IndexFeatureVersion != zoekt.FeatureVersion {
return nil
}
return repo.Branches
}
// NewBuilder creates a new Builder instance.
func NewBuilder(opts Options) (*Builder, error) {
opts.SetDefaults()
if opts.RepositoryDescription.Name == "" {
return nil, fmt.Errorf("builder: must set Name")
}
b := &Builder{
opts: opts,
throttle: make(chan int, opts.Parallelism),
finishedShards: map[string]string{},
}
if b.opts.CTags == "" && b.opts.CTagsMustSucceed {
return nil, fmt.Errorf("ctags binary not found, but CTagsMustSucceed set")
}
if strings.Contains(opts.CTags, "universal-ctags") {
parser, err := ctags.NewParser(opts.CTags)
if err != nil && opts.CTagsMustSucceed {
return nil, fmt.Errorf("ctags.NewParser: %v", err)
}
b.parser = parser
}
if _, err := b.newShardBuilder(); err != nil {
return nil, err
}
return b, nil
}
// AddFile is a convenience wrapper for the Add method
func (b *Builder) AddFile(name string, content []byte) error {
return b.Add(zoekt.Document{Name: name, Content: content})
}
func (b *Builder) Add(doc zoekt.Document) error {
// We could pass the document on to the shardbuilder, but if
// we pass through a part of the source tree with binary/large
// files, the corresponding shard would be mostly empty, so
// insert a reason here too.
if len(doc.Content) > b.opts.SizeMax {
doc.SkipReason = fmt.Sprintf("document size %d larger than limit %d", len(doc.Content), b.opts.SizeMax)
} else if err := zoekt.CheckText(doc.Content); err != nil {
doc.SkipReason = err.Error()
doc.Language = "binary"
}
b.todo = append(b.todo, &doc)
b.size += len(doc.Name) + len(doc.Content)
if b.size > b.opts.ShardMax {
return b.flush()
}
return nil
}
// Finish creates a last shard from the buffered documents, and clears
// stale shards from previous runs
func (b *Builder) Finish() error {
b.flush()
b.building.Wait()
if b.buildError != nil {
for tmp := range b.finishedShards {
os.Remove(tmp)
}
return b.buildError
}
for tmp, final := range b.finishedShards {
if err := os.Rename(tmp, final); err != nil {
b.buildError = err
}
}
if b.nextShardNum > 0 {
b.deleteRemainingShards()
}
return b.buildError
}
func (b *Builder) deleteRemainingShards() {
for {
shard := b.nextShardNum
b.nextShardNum++
name := b.opts.shardName(shard)
if err := os.Remove(name); os.IsNotExist(err) {
break
}
}
}
func (b *Builder) flush() error {
todo := b.todo
b.todo = nil
b.size = 0
b.errMu.Lock()
defer b.errMu.Unlock()
if b.buildError != nil {
return b.buildError
}
hasShard := b.nextShardNum > 0
if len(todo) == 0 && hasShard {
return nil
}
shard := b.nextShardNum
b.nextShardNum++
if b.opts.Parallelism > 1 {
b.building.Add(1)
go func() {
b.throttle <- 1
done, err := b.buildShard(todo, shard)
<-b.throttle
b.errMu.Lock()
defer b.errMu.Unlock()
if err != nil && b.buildError == nil {
b.buildError = err
}
if err == nil {
b.finishedShards[done.temp] = done.final
}
b.building.Done()
}()
} else {
// No goroutines when we're not parallel. This
// simplifies memory profiling.
done, err := b.buildShard(todo, shard)
b.buildError = err
if err == nil {
b.finishedShards[done.temp] = done.final
}
if b.opts.MemProfile != "" {
// drop memory, and profile.
todo = nil
b.writeMemProfile(b.opts.MemProfile)
}
return b.buildError
}
return nil
}
var profileNumber int
func (b *Builder) writeMemProfile(name string) {
nm := fmt.Sprintf("%s.%d", name, profileNumber)
profileNumber++
f, err := os.Create(nm)
if err != nil {
log.Fatal("could not create memory profile: ", err)
}
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
log.Fatal("could not write memory profile: ", err)
}
f.Close()
log.Printf("wrote mem profile %q", nm)
}
// map [0,inf) to [0,1) monotonically
func squashRange(j int) float64 {
x := float64(j)
return x / (1 + x)
}
var testRe = regexp.MustCompile("test")
type rankedDoc struct {
*zoekt.Document
rank []float64
}
func rank(d *zoekt.Document, origIdx int) []float64 {
test := 0.0
if testRe.MatchString(d.Name) {
test = 1.0
}
// Smaller is earlier (=better).
return []float64{
// Prefer docs that are not tests
test,
// With many symbols
1.0 - squashRange(len(d.Symbols)),
// With short content
squashRange(len(d.Content)),
// With short names
squashRange(len(d.Name)),
// That is present is as many branches as possible
1.0 - squashRange(len(d.Branches)),
// Preserve original ordering.
squashRange(origIdx),
}
}
func sortDocuments(todo []*zoekt.Document) {
rs := make([]rankedDoc, 0, len(todo))
for i, t := range todo {
rd := rankedDoc{t, rank(t, i)}
rs = append(rs, rd)
}
sort.Slice(rs, func(i, j int) bool {
r1 := rs[i].rank
r2 := rs[j].rank
for i := range r1 {
if r1[i] < r2[i] {
return true
}
if r1[i] > r2[i] {
return false
}
}
return false
})
for i := range todo {
todo[i] = rs[i].Document
}
}
func (b *Builder) buildShard(todo []*zoekt.Document, nextShardNum int) (*finishedShard, error) {
if b.opts.CTags != "" {
err := ctagsAddSymbols(todo, b.parser, b.opts.CTags)
if b.opts.CTagsMustSucceed && err != nil {
return nil, err
}
if err != nil {
log.Printf("ignoring %s error: %v", b.opts.CTags, err)
}
}
name := b.opts.shardName(nextShardNum)
shardBuilder, err := b.newShardBuilder()
if err != nil {
return nil, err
}
sortDocuments(todo)
for _, t := range todo {
if err := shardBuilder.Add(*t); err != nil {
return nil, err
}
}
return b.writeShard(name, shardBuilder)
}
func (b *Builder) newShardBuilder() (*zoekt.IndexBuilder, error) {
desc := b.opts.RepositoryDescription
desc.SubRepoMap = b.opts.SubRepositories
shardBuilder, err := zoekt.NewIndexBuilder(&desc)
if err != nil {
return nil, err
}
return shardBuilder, nil
}
func (b *Builder) writeShard(fn string, ib *zoekt.IndexBuilder) (*finishedShard, error) {
dir := filepath.Dir(fn)
if err := os.MkdirAll(dir, 0700); err != nil {
return nil, err
}
f, err := ioutil.TempFile(dir, filepath.Base(fn))
if err != nil {
return nil, err
}
if runtime.GOOS != "windows" {
if err := f.Chmod(0666 &^ umask); err != nil {
return nil, err
}
}
defer f.Close()
if err := ib.Write(f); err != nil {
return nil, err
}
fi, err := f.Stat()
if err != nil {
return nil, err
}
if err := f.Close(); err != nil {
return nil, err
}
log.Printf("finished %s: %d index bytes (overhead %3.1f)", fn, fi.Size(),
float64(fi.Size())/float64(ib.ContentSize()+1))
return &finishedShard{f.Name(), fn}, nil
}
// umask holds the Umask of the current process
var umask os.FileMode
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
charts/doogie-0.7.8/src/build.go | package main
import (
"archive/tar"
"archive/zip"
"bufio"
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
)
func main() {
if err := start(); err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(2)
}
}
var debugEnabled = false
func start() error {
if runtime.GOOS != "windows" && runtime.GOOS != "linux" {
return fmt.Errorf("Unsupported OS '%v'", runtime.GOOS)
}
if runtime.GOARCH != "amd64" {
return fmt.Errorf("Unsupported OS '%v' or arch '%v'", runtime.GOOS, runtime.GOARCH)
}
if len(os.Args) < 2 {
return fmt.Errorf("No command provided")
}
switch os.Args[1] {
case "rerun":
err := clean()
if err == nil {
err = run()
}
return err
case "run":
return run()
case "clean":
return clean()
case "rebuild":
err := clean()
if err == nil {
err = build()
}
return err
case "build":
return build()
case "package":
return pkg()
case "build-cef":
return buildCef()
case "lint":
return lint()
case "unit-test":
return unitTest()
case "benchmark":
return benchmark()
default:
return fmt.Errorf("Unrecognized command '%v'", os.Args[1])
}
}
func run(extraQmakeArgs ...string) error {
if err := build(extraQmakeArgs...); err != nil {
return err
}
target, err := target()
if err != nil {
return err
}
return execCmd(filepath.Join(target, exeExt("doogie")), extraArgs()...)
}
func clean() error {
err := os.RemoveAll("debug")
if err == nil {
err = os.RemoveAll("release")
}
return err
}
func build(extraQmakeArgs ...string) error {
target, err := target()
if err != nil {
return err
}
// Get qmake path
qmakePath, err := exec.LookPath(exeExt("qmake"))
if err != nil {
return err
}
// Make the dir for the target
if err := os.MkdirAll(target, 0755); err != nil {
return err
}
// Run qmake TODO: put behind flag
qmakeArgs := extraQmakeArgs
if target == "debug" {
qmakeArgs = append(qmakeArgs, "CONFIG+=debug")
} else {
qmakeArgs = append(qmakeArgs, "CONFIG+=release", "CONFIG-=debug")
}
qmakeArgs = append(qmakeArgs, "doogie.pro")
if err := execCmd(qmakePath, qmakeArgs...); err != nil {
return fmt.Errorf("QMake failed: %v", err)
}
// Run nmake if windows, make if linux
makeExe := "make"
makeArgs := []string{}
if runtime.GOOS == "windows" {
makeExe = "nmake.exe"
// Use jom instead if it's on the path
if _, err = exec.LookPath("jom.exe"); err == nil {
makeExe = "jom.exe"
}
// This version takes the target name unlike the Linux one
makeArgs = []string{target, "/NOLOGO"}
}
if err := execCmd(makeExe, makeArgs...); err != nil {
return fmt.Errorf("NMake failed: %v", err)
}
// Chmod on linux
if runtime.GOOS == "linux" {
if err = os.Chmod(filepath.Join(target, "doogie"), 0755); err != nil {
return err
}
}
// Copy over resources
if err := copyResources(qmakePath, target); err != nil {
return err
}
return nil
}
func pkg() error {
target, err := target()
if err != nil {
return err
}
// Just move over the files that matter to a new deploy dir and zip em up
deployDir := filepath.Join(target, "package", "doogie")
if err = os.MkdirAll(deployDir, 0755); err != nil {
return err
}
// Get all base-dir items to copy, excluding only some
filesToCopy := []string{}
dirFiles, err := ioutil.ReadDir(target)
if err != nil {
return err
}
for _, file := range dirFiles {
if !file.IsDir() {
switch filepath.Ext(file.Name()) {
case ".cpp", ".h", ".obj", ".res", ".manifest", ".log", ".o":
// No-op
default:
filesToCopy = append(filesToCopy, file.Name())
}
}
}
if err = copyEachToDirIfNotPresent(target, deployDir, filesToCopy...); err != nil {
return err
}
// And other dirs if present in folder
subDirs := []string{"imageformats", "locales", "platforms", "sqldrivers", "styles"}
for _, subDir := range subDirs {
srcDir := filepath.Join(target, subDir)
if _, err = os.Stat(srcDir); err == nil {
if err = copyDirIfNotPresent(srcDir, filepath.Join(deployDir, subDir)); err != nil {
return fmt.Errorf("Unable to copy %v: %v", subDir, err)
}
}
}
// Now create a zip or tar file with all the goods
if runtime.GOOS == "windows" {
err = createSingleDirZipFile(deployDir, filepath.Join(target, "package", "doogie.zip"))
} else {
err = createSingleDirTarGzFile(deployDir, filepath.Join(target, "package", "doogie.tar.gz"))
}
if err != nil {
return err
}
return os.RemoveAll(deployDir)
}
func buildCef() error {
if runtime.GOOS == "windows" {
return buildCefWindows()
}
return buildCefLinux()
}
func buildCefLinux() error {
cefDir := os.Getenv("CEF_DIR")
if cefDir == "" {
return fmt.Errorf("Unable to find CEF_DIR env var")
}
// We have to run separate make runs for different target types
makeLib := func(target string) error {
if err := execCmdInDir(cefDir, "cmake", "-DCMAKE_BUILD_TYPE="+target, "."); err != nil {
return fmt.Errorf("CMake failed: %v", err)
}
wrapperDir := filepath.Join(cefDir, "libcef_dll_wrapper")
if err := execCmdInDir(wrapperDir, "make"); err != nil {
return fmt.Errorf("Make failed: %v", err)
}
if err := os.Rename(filepath.Join(wrapperDir, "libcef_dll_wrapper.a"),
filepath.Join(wrapperDir, "libcef_dll_wrapper_"+target+".a")); err != nil {
return fmt.Errorf("Unable to rename .a file: %v", err)
}
// We also need to run strip on the Release libcef.so per:
// https://bitbucket.org/chromiumembedded/cef/issues/1979
if target == "Release" {
// Back it up first
err := copyIfNotPresent(filepath.Join(cefDir, "Release/libcef.so"),
filepath.Join(cefDir, "Release/libcef.fullsym.so"))
if err != nil {
return fmt.Errorf("Release libcef backup failed: %v", err)
}
if err = execCmdInDir(cefDir, "strip", "--strip-all", "Release/libcef.so"); err != nil {
return fmt.Errorf("Failed stripping symbols: %v", err)
}
}
return nil
}
if err := makeLib("Debug"); err != nil {
return err
}
return makeLib("Release")
}
func buildCefWindows() error {
cefDir := os.Getenv("CEF_DIR")
if cefDir == "" {
return fmt.Errorf("Unable to find CEF_DIR env var")
}
// Build the make files
if err := execCmdInDir(cefDir, "cmake", "-G", "Visual Studio 14 Win64", "."); err != nil {
return fmt.Errorf("CMake failed: %v", err)
}
// Replace a couple of strings in VC proj file on windows
dllWrapperDir := filepath.Join(cefDir, "libcef_dll_wrapper")
vcProjFile := filepath.Join(dllWrapperDir, "libcef_dll_wrapper.vcxproj")
projXml, err := ioutil.ReadFile(vcProjFile)
if err != nil {
return fmt.Errorf("Unable to read VC proj file: %v", err)
}
// First one is debug, second is release
projXml = bytes.Replace(projXml, []byte("<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>"),
[]byte("<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>"), 1)
projXml = bytes.Replace(projXml, []byte("<RuntimeLibrary>MultiThreaded</RuntimeLibrary>"),
[]byte("<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>"), 1)
if err = ioutil.WriteFile(vcProjFile, projXml, os.ModePerm); err != nil {
return fmt.Errorf("Unable to write VC proj file: %v", err)
}
// Build debug and then build release
if err = execCmdInDir(dllWrapperDir, "msbuild", "libcef_dll_wrapper.vcxproj", "/p:Configuration=Debug"); err != nil {
return fmt.Errorf("Unable to build debug wrapper: %v", err)
}
if err = execCmdInDir(dllWrapperDir, "msbuild", "libcef_dll_wrapper.vcxproj", "/p:Configuration=Release"); err != nil {
return fmt.Errorf("Unable to build release wrapper: %v", err)
}
return nil
}
func lint() error {
toIgnore := []string{
"No copyright message found.",
"#ifndef header guard has wrong style, please use: SRC_",
"#endif line should be \"#endif // SRC_",
"Include the directory when naming .h files",
"Done processing",
"Total errors found",
}
// Run lint on all cc and h files, and trim off any of the toIgnore stuff
depotToolsDir := os.Getenv("DEPOT_TOOLS_DIR")
if depotToolsDir == "" {
return fmt.Errorf("Unable to find DEPOT_TOOLS_DIR env var")
}
args := []string{
filepath.Join(depotToolsDir, "cpplint.py"),
// Can't use, ref: https://github.com/google/styleguide/issues/22
// "--root=doogie\\",
}
integrationTestDir := filepath.Join("tests", "integration")
err := filepath.Walk(".", func(path string, info os.FileInfo, err error) error {
if !info.IsDir() && !strings.HasPrefix(info.Name(), "moc_") &&
!strings.HasPrefix(path, integrationTestDir) &&
(strings.HasSuffix(path, ".cc") || strings.HasSuffix(path, ".h")) {
args = append(args, path)
}
return nil
})
if err != nil {
return err
}
pycmd := "python"
if runtime.GOOS == "linux" {
// python by itself may refer to python3 or python2 depending on the distro,
// so invoke python2 explicitly.
pycmd = "python2"
}
cmd := exec.Command(pycmd, args...)
out, err := cmd.CombinedOutput()
if err != nil && len(out) == 0 {
return fmt.Errorf("Unable to run cpplint: %v", err)
}
scanner := bufio.NewScanner(bytes.NewReader(out))
foundAny := false
for scanner.Scan() {
// If after the trimmed string after the second colon starts w/ any toIgnore, we ignore it
ignore := false
origLine := scanner.Text()
checkLine := origLine
if firstColon := strings.Index(origLine, ":"); firstColon != -1 {
if secondColon := strings.Index(origLine[firstColon+1:], ":"); secondColon != -1 {
checkLine = strings.TrimSpace(origLine[firstColon+secondColon+2:])
}
}
for _, toCheck := range toIgnore {
if strings.HasPrefix(checkLine, toCheck) {
ignore = true
break
}
}
if !ignore {
fmt.Println(origLine)
foundAny = true
}
}
if foundAny {
return fmt.Errorf("Lint check returned one or more errors")
}
return nil
}
func unitTest() error {
if err := build("CONFIG+=test"); err != nil {
return err
}
target, err := target()
if err != nil {
return err
}
return execCmd(filepath.Join(target, exeExt("doogie-test")))
}
func benchmark() error {
if err := build("CONFIG+=benchmark"); err != nil {
return err
}
target, err := target()
if err != nil {
return err
}
return execCmd(filepath.Join(target, exeExt("doogie-benchmark")))
}
func target() (string, error) {
target := "debug"
if len(os.Args) >= 3 && !strings.HasPrefix(os.Args[2], "--") {
if os.Args[2] != "release" && os.Args[2] != "debug" {
return "", fmt.Errorf("Unknown target '%v'", os.Args[2])
}
target = os.Args[2]
}
return target, nil
}
func extraArgs() []string {
argStartIndex := 1
if len(os.Args) >= 2 {
argStartIndex = 2
if len(os.Args) > 2 && (os.Args[2] == "release" || os.Args[2] == "debug") {
argStartIndex = 3
}
}
return os.Args[argStartIndex:]
}
func exeExt(baseName string) string {
if runtime.GOOS == "windows" {
return baseName + ".exe"
}
return baseName
}
func execCmd(name string, args ...string) error {
return execCmdInDir("", name, args...)
}
func execCmdInDir(dir string, name string, args ...string) error {
cmd := exec.Command(name, args...)
cmd.Dir = dir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func copyResources(qmakePath string, target string) error {
if runtime.GOOS == "windows" {
return copyResourcesWindows(qmakePath, target)
}
return copyResourcesLinux(qmakePath, target)
}
func copyResourcesLinux(qmakePath string, target string) error {
if _, err := exec.LookPath("chrpath"); err != nil {
return fmt.Errorf("Unable to find chrpath on the PATH: %v", err)
}
cefDir := os.Getenv("CEF_DIR")
if cefDir == "" {
return fmt.Errorf("Unable to find CEF_DIR env var")
}
// Everything read only except by owner
// Copy over crash reporter cfg
err := copyAndChmodEachToDirIfNotPresent(0644, ".", target, "crash_reporter.cfg")
if err != nil {
return err
}
// Copy over some Qt DLLs
err = copyAndChmodEachToDirIfNotPresent(0644, filepath.Join(filepath.Dir(qmakePath), "../lib"), target,
"libQt5Core.so.5",
"libQt5Gui.so.5",
"libQt5Sql.so.5",
"libQt5Widgets.so.5",
// TODO: See https://bugreports.qt.io/browse/QTBUG-53865
"libicui18n.so.54",
"libicuuc.so.54",
"libicudata.so.54",
// Needed for libqxcb platform
"libQt5XcbQpa.so.5",
"libQt5DBus.so.5",
)
if err != nil {
return err
}
// Some DLLs are needed in debug only
if target == "debug" {
err := copyAndChmodEachToDirIfNotPresent(0644, filepath.Join(filepath.Dir(qmakePath), "../lib"), target,
"libQt5Network.so.5",
"libQt5Test.so.5",
"libQt5WebSockets.so.5",
)
if err != nil {
return err
}
}
// Need some plugins
// Before that, record whether the xcb plugin is there yet
hadXcbPlugin := true
xcbPluginPath := filepath.Join(target, "platforms", "libqxcb.so")
if _, err = os.Stat(xcbPluginPath); os.IsNotExist(err) {
hadXcbPlugin = false
}
copyPlugins(qmakePath, target, "imageformats", "qgif")
copyPlugins(qmakePath, target, "platforms", "qxcb")
copyPlugins(qmakePath, target, "sqldrivers", "qsqlite")
// If the xcb plugin wasn't there (but is now), change the rpath
if !hadXcbPlugin {
if err = execCmd("chrpath", "-r", "$ORIGIN/..", xcbPluginPath); err != nil {
return fmt.Errorf("Unable to run chrpath: %v", err)
}
}
// Copy over CEF libs
err = copyAndChmodEachToDirIfNotPresent(0644, filepath.Join(cefDir, strings.Title(target)), target,
"libcef.so",
"snapshot_blob.bin",
"v8_context_snapshot.bin",
)
if err != nil {
return err
}
// Copy over CEF resources
cefResDir := filepath.Join(cefDir, "Resources")
err = copyAndChmodEachToDirIfNotPresent(0644, cefResDir, target,
"icudtl.dat",
"cef.pak",
"cef_100_percent.pak",
"cef_200_percent.pak",
"cef_extensions.pak",
"devtools_resources.pak",
)
if err != nil {
return err
}
// And CEF locales
targetLocaleDir := filepath.Join(target, "locales")
if err = os.MkdirAll(targetLocaleDir, 0744); err != nil {
return err
}
err = copyAndChmodEachToDirIfNotPresent(0644, filepath.Join(cefResDir, "locales"), targetLocaleDir, "en-US.pak")
return err
}
func copyResourcesWindows(qmakePath string, target string) error {
cefDir := os.Getenv("CEF_DIR")
if cefDir == "" {
return fmt.Errorf("Unable to find CEF_DIR env var")
}
// Copy over crash reporter cfg
err := copyEachToDirIfNotPresent(".", target, "crash_reporter.cfg")
if err != nil {
return err
}
// Copy over some Qt DLLs
qtDlls := []string{
"Qt5Core.dll",
"Qt5Gui.dll",
"Qt5Sql.dll",
"Qt5Widgets.dll",
}
// Debug libs are d.dll
if target == "debug" {
// Only need web sockets during debug
qtDlls = append(qtDlls, "Qt5WebSockets.dll", "Qt5Network.dll", "Qt5Test.dll")
for i := range qtDlls {
qtDlls[i] = strings.Replace(qtDlls[i], ".dll", "d.dll", -1)
}
// Also want the PDB files if they are there
for _, dll := range qtDlls {
qtDlls = append(qtDlls, strings.Replace(dll, ".dll", ".pdb", -1))
}
}
err = copyEachToDirIfNotPresent(filepath.Dir(qmakePath), target, qtDlls...)
if err != nil {
return err
}
// Need special ucrtbased.dll for debug builds
if target == "debug" {
err = copyEachToDirIfNotPresent("C:\\Program Files (x86)\\Windows Kits\\10\\bin\\x64\\ucrt",
target, "ucrtbased.dll")
if err != nil {
return err
}
}
// TODO: statically compile this, ref: https://github.com/cretz/doogie/issues/46
// Need some plugins
copyPlugins(qmakePath, target, "imageformats", "qgif")
copyPlugins(qmakePath, target, "platforms", "qwindows")
copyPlugins(qmakePath, target, "sqldrivers", "qsqlite")
copyPlugins(qmakePath, target, "styles", "qwindowsvistastyle")
// Copy over CEF libs
err = copyEachToDirIfNotPresent(filepath.Join(cefDir, strings.Title(target)), target,
"libcef.dll",
"chrome_elf.dll",
"snapshot_blob.bin",
"v8_context_snapshot.bin",
"d3dcompiler_47.dll",
"libEGL.dll",
"libGLESv2.dll",
)
if err != nil {
return err
}
// Copy over CEF resources
cefResDir := filepath.Join(cefDir, "Resources")
err = copyEachToDirIfNotPresent(cefResDir, target,
"icudtl.dat",
"cef.pak",
"cef_100_percent.pak",
"cef_200_percent.pak",
"cef_extensions.pak",
"devtools_resources.pak",
)
if err != nil {
return err
}
// And CEF locales
targetLocaleDir := filepath.Join(target, "locales")
if err = os.MkdirAll(targetLocaleDir, 0755); err != nil {
return err
}
err = copyEachToDirIfNotPresent(filepath.Join(cefResDir, "locales"), targetLocaleDir, "en-US.pak")
return err
}
func chmodEachInDir(mode os.FileMode, dir string, filenames ...string) error {
for _, filename := range filenames {
if err := os.Chmod(filepath.Join(dir, filename), mode); err != nil {
return err
}
}
return nil
}
func copyPlugins(qmakePath string, target string, dir string, plugins ...string) error {
srcDir := filepath.Join(qmakePath, "../../plugins", dir)
if _, err := os.Stat(srcDir); os.IsExist(err) {
return fmt.Errorf("Unable to find Qt plugins dir %v: %v", dir, err)
}
destDir := filepath.Join(target, dir)
if err := os.MkdirAll(destDir, 0755); err != nil {
return fmt.Errorf("Unable to create dir: %v", err)
}
for _, plugin := range plugins {
var fileName string
if runtime.GOOS == "linux" {
fileName = "lib" + plugin + ".so"
} else if target == "debug" {
fileName = plugin + "d.dll"
} else {
fileName = plugin + ".dll"
}
if err := copyAndChmodEachToDirIfNotPresent(0644, srcDir, destDir, fileName); err != nil {
return err
}
}
return nil
}
func copyDirIfNotPresent(srcDir string, destDir string) error {
// Note, this is not recursive, but it does preserve permissions
srcFi, err := os.Stat(srcDir)
if err != nil {
return fmt.Errorf("Unable to find src dir: %v", err)
}
if err = os.MkdirAll(destDir, srcFi.Mode()); err != nil {
return fmt.Errorf("Unable to create dest dir: %v", err)
}
files, err := ioutil.ReadDir(srcDir)
if err != nil {
return fmt.Errorf("Unable to read src dir: %v", err)
}
for _, file := range files {
srcFile := filepath.Join(srcDir, file.Name())
if err = copyToDirIfNotPresent(srcFile, destDir); err != nil {
return fmt.Errorf("Error copying file: %v", err)
}
if err = os.Chmod(srcFile, file.Mode()); err != nil {
return fmt.Errorf("Unable to chmod file: %v", err)
}
}
return nil
}
func copyAndChmodEachToDirIfNotPresent(mode os.FileMode, srcDir string, destDir string, srcFilenames ...string) error {
if err := copyEachToDirIfNotPresent(srcDir, destDir, srcFilenames...); err != nil {
return err
}
return chmodEachInDir(mode, destDir, srcFilenames...)
}
func copyEachToDirIfNotPresent(srcDir string, destDir string, srcFilenames ...string) error {
for _, srcFilename := range srcFilenames {
if err := copyToDirIfNotPresent(filepath.Join(srcDir, srcFilename), destDir); err != nil {
return err
}
}
return nil
}
func copyToDirIfNotPresent(src string, destDir string) error {
return copyIfNotPresent(src, filepath.Join(destDir, filepath.Base(src)))
}
func copyIfNotPresent(src string, dest string) error {
if _, err := os.Stat(dest); os.IsExist(err) {
debugLogf("Skipping copying '%v' to '%v' because it already exists")
return nil
}
debugLogf("Copying %v to %v\n", src, dest)
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
inStat, err := in.Stat()
if err != nil {
return err
}
out, err := os.OpenFile(dest, os.O_RDWR|os.O_CREATE|os.O_TRUNC, inStat.Mode())
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, in)
cerr := out.Close()
if err != nil {
return err
}
return cerr
}
func debugLogf(format string, v ...interface{}) {
if debugEnabled {
log.Printf(format, v...)
}
}
func createSingleDirTarGzFile(dir string, tarFilename string) error {
tarFile, err := os.Create(tarFilename)
if err != nil {
return err
}
defer tarFile.Close()
gw := gzip.NewWriter(tarFile)
defer gw.Close()
w := tar.NewWriter(gw)
defer w.Close()
return filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
rel, err := filepath.Rel(dir, path)
if err != nil {
return err
}
tarPath := filepath.ToSlash(filepath.Join(filepath.Base(dir), rel))
srcPath := filepath.Join(dir, rel)
header, err := tar.FileInfoHeader(info, "")
if err != nil {
return err
}
header.Name = tarPath
// Remove owner info
header.Uname = ""
header.Gname = ""
header.Uid = 0
header.Gid = 0
if err := w.WriteHeader(header); err != nil {
return err
}
src, err := os.Open(srcPath)
if err != nil {
return err
}
defer src.Close()
_, err = io.Copy(w, src)
return err
})
}
func createSingleDirZipFile(dir string, zipFilename string) error {
zipFile, err := os.Create(zipFilename)
if err != nil {
return err
}
defer zipFile.Close()
w := zip.NewWriter(zipFile)
defer w.Close()
return filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
rel, err := filepath.Rel(dir, path)
if err != nil {
return err
}
zipPath := filepath.ToSlash(filepath.Join(filepath.Base(dir), rel))
srcPath := filepath.Join(dir, rel)
dest, err := w.Create(zipPath)
if err != nil {
return err
}
src, err := os.Open(srcPath)
if err != nil {
return err
}
defer src.Close()
_, err = io.Copy(dest, src)
return err
})
}
| [
"\"CEF_DIR\"",
"\"CEF_DIR\"",
"\"DEPOT_TOOLS_DIR\"",
"\"CEF_DIR\"",
"\"CEF_DIR\""
]
| []
| [
"CEF_DIR",
"DEPOT_TOOLS_DIR"
]
| [] | ["CEF_DIR", "DEPOT_TOOLS_DIR"] | go | 2 | 0 | |
tests/validation/tests/v3_api/test_hosted_eks_cluster.py | import os
from .common import get_user_client
from .common import random_test_name
from .common import validate_cluster
from .common import wait_for_cluster_delete
from .test_create_ha import resource_prefix
from lib.aws import AmazonWebServices
import pytest
EKS_ACCESS_KEY = os.environ.get('RANCHER_EKS_ACCESS_KEY', "")
EKS_SECRET_KEY = os.environ.get('RANCHER_EKS_SECRET_KEY', "")
EKS_REGION = os.environ.get('RANCHER_EKS_REGION', "us-east-2")
EKS_K8S_VERSION = os.environ.get('RANCHER_EKS_K8S_VERSION', "1.17")
EKS_NODESIZE = os.environ.get('RANCHER_EKS_NODESIZE', 2)
KMS_KEY = os.environ.get('RANCHER_KMS_KEY', None)
SECRET_ENCRYPTION = os.environ.get('RANCHER_SECRET_ENCRYPTION', False)
LOGGING_TYPES = os.environ.get('RANCHER_LOGGING_TYPES', None)
EKS_SERVICE_ROLE = os.environ.get('RANCHER_EKS_SERVICE_ROLE', None)
EKS_SUBNETS = os.environ.get('RANCHER_EKS_SUBNETS', None)
EKS_SECURITYGROUP = os.environ.get('RANCHER_EKS_SECURITYGROUP', None)
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
EKS_PUBLIC_ACCESS_SOURCES = \
os.environ.get('RANCHER_EKS_PUBLIC_ACCESS_SOURCES', None)
ekscredential = pytest.mark.skipif(not (EKS_ACCESS_KEY and EKS_SECRET_KEY),
reason='EKS Credentials not provided, '
'cannot create cluster')
DEFAULT_TIMEOUT_EKS = 1200
IMPORTED_EKS_CLUSTERS = []
cluster_details = {}
eks_config = {
"imported": False,
"kubernetesVersion": EKS_K8S_VERSION,
"privateAccess": False,
"publicAccess": True,
"publicAccessSources": [],
"securityGroups": [],
"serviceRole": "",
"subnets": [],
"tags": {},
"loggingTypes": [],
"secretsEncryption": False,
"kmsKey": "",
"region": EKS_REGION,
"type": "eksclusterconfigspec",
"nodeGroups": [{
"version": EKS_K8S_VERSION,
"desiredSize": EKS_NODESIZE,
"diskSize": 20,
"gpu": False,
"instanceType": "t3.medium",
"maxSize": EKS_NODESIZE,
"minSize": EKS_NODESIZE,
"nodegroupName": random_test_name("test-ng"),
"type": "nodeGroup",
"subnets": [],
"tags": {},
"labels": {},
"ec2SshKey": ""
}]
}
@ekscredential
def test_eks_v2_hosted_cluster_create_basic():
"""
Create a hosted EKS v2 cluster with all default values from the UI
"""
cluster_name = random_test_name("test-auto-eks")
eks_config_temp = get_eks_config_basic(cluster_name)
cluster_config = {
"eksConfig": eks_config_temp,
"name": cluster_name,
"type": "cluster",
"dockerRootDir": "/var/lib/docker",
"enableNetworkPolicy": False,
"enableClusterAlerting": False,
"enableClusterMonitoring": False
}
create_and_validate_eks_cluster(cluster_config)
# validate cluster created
validate_eks_cluster(cluster_name, eks_config_temp)
# validate nodegroups created
validate_nodegroup(eks_config_temp["nodeGroups"], cluster_name)
@ekscredential
def test_eks_v2_hosted_cluster_create_all():
"""
Create a hosted EKS v2 cluster by giving in value of
every param of eks config from UI
"""
cluster_name = random_test_name("test-auto-eks")
eks_config_temp = get_eks_config_all(cluster_name)
cluster_config = {
"eksConfig": eks_config_temp,
"name": cluster_name,
"type": "cluster",
"dockerRootDir": "/var/lib/docker",
"enableNetworkPolicy": False,
"enableClusterAlerting": False,
"enableClusterMonitoring": False
}
create_and_validate_eks_cluster(cluster_config)
# validate cluster created
validate_eks_cluster(cluster_name, eks_config_temp)
# validate nodegroups created
validate_nodegroup(eks_config_temp["nodeGroups"], cluster_name)
@ekscredential
def test_eks_v2_hosted_cluster_edit():
"""
Create a hosted EKS v2 cluster.
Edit the following input fields:
cluster level tags, add node groups,
add/delete logging types, add new cloud cred
"""
cluster_name = random_test_name("test-auto-eks")
eks_config_temp = get_eks_config_basic(cluster_name)
cluster_config = {
"eksConfig": eks_config_temp,
"name": cluster_name,
"type": "cluster",
"dockerRootDir": "/var/lib/docker",
"enableNetworkPolicy": False,
"enableClusterAlerting": False,
"enableClusterMonitoring": False
}
client, cluster = create_and_validate_eks_cluster(cluster_config)
# edit cluster
cluster = edit_eks_cluster(cluster, eks_config_temp)
# validate cluster created
validate_eks_cluster(cluster_name, eks_config_temp)
# validate nodegroups created
validate_nodegroup(eks_config_temp["nodeGroups"], cluster_name)
@ekscredential
def test_eks_v2_hosted_cluster_delete():
"""
Delete a created hosted EKS v2 cluster and verify it is deleted in the backend
"""
cluster_name = random_test_name("test-auto-eks")
eks_config_temp = get_eks_config_basic(cluster_name)
cluster_config = {
"eksConfig": eks_config_temp,
"name": cluster_name,
"type": "cluster",
"dockerRootDir": "/var/lib/docker",
"enableNetworkPolicy": False,
"enableClusterAlerting": False,
"enableClusterMonitoring": False
}
client, cluster = create_and_validate_eks_cluster(cluster_config)
# delete cluster
client.delete(cluster)
wait_for_cluster_delete(client, cluster)
AmazonWebServices().wait_for_delete_eks_cluster(cluster_name)
@ekscredential
def test_eks_v2_create_import_cluster():
ec2_cloud_credential = get_aws_cloud_credential()
display_name = create_resources_eks()
cluster_name = random_test_name("test-auto-eks")
eks_config_temp = {
"amazonCredentialSecret": ec2_cloud_credential.id,
"displayName": display_name,
"imported": True,
"region": EKS_REGION,
"type": "eksclusterconfigspec"
}
cluster_config = {
"eksConfig": eks_config_temp,
"name": cluster_name,
"type": "cluster",
"dockerRootDir": "/var/lib/docker",
"enableNetworkPolicy": False,
"enableClusterAlerting": False,
"enableClusterMonitoring": False
}
create_and_validate_eks_cluster(cluster_config,
imported=True)
def create_resources_eks():
cluster_name = resource_prefix + "-ekscluster"
AmazonWebServices().create_eks_cluster(cluster_name)
IMPORTED_EKS_CLUSTERS.append(cluster_name)
AmazonWebServices().wait_for_eks_cluster_state(cluster_name, "ACTIVE")
return cluster_name
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
def fin():
client = get_user_client()
for name, cluster in cluster_details.items():
if len(client.list_cluster(name=name).data) > 0:
client.delete(cluster)
for display_name in IMPORTED_EKS_CLUSTERS:
AmazonWebServices().delete_eks_cluster(cluster_name=display_name)
request.addfinalizer(fin)
def create_and_validate_eks_cluster(cluster_config, imported=False):
"""
Create and validate EKS cluster
:param cluster_config: config of the cluster
:param imported: imported is true when user creates an imported cluster
:return: client, cluster
"""
client = get_user_client()
print("Creating EKS cluster")
print("\nEKS Configuration: {}".format(cluster_config))
cluster = client.create_cluster(cluster_config)
print(cluster)
cluster_details[cluster["name"]] = cluster
intermediate_state = False if imported else True
cluster = validate_cluster(client, cluster,
check_intermediate_state=intermediate_state,
skipIngresscheck=True,
timeout=DEFAULT_TIMEOUT_EKS)
return client, cluster
def get_aws_cloud_credential():
"""
Create an AWS cloud creds
:return: ec2_cloud_credential
"""
client = get_user_client()
ec2_cloud_credential_config = {
"accessKey": EKS_ACCESS_KEY,
"secretKey": EKS_SECRET_KEY
}
ec2_cloud_credential = client.create_cloud_credential(
amazonec2credentialConfig=ec2_cloud_credential_config
)
return ec2_cloud_credential
def get_logging_types():
"""
Split all logging types
:return: logging_types
"""
logging_types = []
if LOGGING_TYPES is not None:
temp = LOGGING_TYPES.split(",")
for logging in temp:
logging_types.append(logging)
return logging_types
def get_eks_config_basic(cluster_name):
"""
FIlling in params for a basic EKS v2 cluster
created through UI with default values
:param cluster_name:
:return: eks_config
"""
ec2_cloud_credential = get_aws_cloud_credential()
global eks_config
eks_config_temp = eks_config.copy()
eks_config_temp["displayName"] = cluster_name
eks_config_temp["amazonCredentialSecret"] = ec2_cloud_credential.id
return eks_config_temp
def get_eks_config_all(cluster_name):
"""
FIlling in params for a EKS v2 cluster
created through UI with all values give
:param cluster_name:
:return: eks_config
"""
ec2_cloud_credential = get_aws_cloud_credential()
global eks_config
public_access = [] if EKS_PUBLIC_ACCESS_SOURCES \
is None else EKS_PUBLIC_ACCESS_SOURCES.split(",")
eks_config_temp = eks_config.copy()
eks_config_temp["displayName"] = cluster_name
eks_config_temp["amazonCredentialSecret"] = ec2_cloud_credential.id
if KMS_KEY is not None: eks_config_temp["kmsKey"] = KMS_KEY
if SECRET_ENCRYPTION: eks_config_temp["secretsEncryption"] = \
SECRET_ENCRYPTION
eks_config_temp["subnets"] = [] \
if EKS_SUBNETS is None else EKS_SUBNETS.split(",")
eks_config_temp["securityGroups"] = [] \
if EKS_SECURITYGROUP is None else EKS_SECURITYGROUP.split(",")
eks_config_temp["publicAccessSources"] = public_access
eks_config_temp["tags"] = {"cluster-level": "tag1"}
eks_config_temp["nodeGroups"] = []
eks_config_temp["nodeGroups"].append(get_new_node())
eks_config_temp["nodeGroups"][0]["tags"] = \
{"nodegroup-level": "tag1", "nodegroup-level": "tag2"}
eks_config_temp["nodeGroups"][0]["labels"] = {"label1": "value1"}
eks_config_temp["loggingTypes"] = get_logging_types()
eks_config_temp["serviceRole"] = EKS_SERVICE_ROLE
eks_config_temp["ec2SshKey"] = AWS_SSH_KEY_NAME
return eks_config_temp
def get_new_node():
"""
Create a new node group
:return: new_nodegroup
"""
new_nodegroup = {
"desiredSize": EKS_NODESIZE,
"diskSize": 20,
"gpu": False,
"instanceType": "t3.medium",
"maxSize": EKS_NODESIZE,
"minSize": EKS_NODESIZE,
"nodegroupName": random_test_name("test-ng"),
"ec2SshKey": AWS_SSH_KEY_NAME.split(".pem")[0],
"type": "nodeGroup"
}
return new_nodegroup
def validate_eks_cluster(cluster_name, eks_config_temp):
"""
Validate EKS cluster details
:param cluster_name: cluster name to be validated
:param eks_config_temp: eks_config
:return:
"""
eks_cluster = AmazonWebServices().describe_eks_cluster(cluster_name)
print("\nEKS cluster deployed in EKS Console: {}".
format(eks_cluster["cluster"]))
# check k8s version
assert eks_cluster["cluster"]["version"] == \
eks_config_temp["kubernetesVersion"], "K8s version is incorrect"
# check cluster status
assert eks_cluster["cluster"]["status"] == "ACTIVE", \
"Cluster is NOT in active state"
# verify security groups
assert eks_cluster["cluster"]["resourcesVpcConfig"]["securityGroupIds"].sort() \
== eks_config_temp["securityGroups"].sort()\
, "Mismatch in Security Groups"
# verify subnets
if "subnets" in eks_config_temp.keys():
assert \
eks_cluster["cluster"]["resourcesVpcConfig"]["subnetIds"].sort() \
== eks_config_temp["subnets"].sort(), "Mismatch in Security Groups"
# verify logging types
if "loggingTypes" in eks_config_temp.keys():
for logging in eks_cluster["cluster"]["logging"]["clusterLogging"]:
if logging["enabled"]:
assert logging["types"].sort() \
== eks_config_temp["loggingTypes"].sort() , \
"Mismatch in Logging types set"
# verify serviceRole
if "serviceRole" in eks_config_temp.keys():
assert eks_config_temp["serviceRole"] in \
eks_cluster["cluster"]["roleArn"]
# verify publicAccessSources
if "publicAccessSources" in eks_config_temp.keys():
assert eks_config_temp["publicAccessSources"].sort() == \
eks_cluster["cluster"]["resourcesVpcConfig"]["publicAccessCidrs"].sort()
def edit_eks_cluster(cluster, eks_config_temp):
"""
Edit EKS v2 cluster
:param cluster: cluster
:param eks_config_temp: eks_config
:return: cluster
"""
# edit eks_config_temp
# add new cloud cred
ec2_cloud_credential_new = get_aws_cloud_credential()
eks_config_temp["amazonCredentialSecret"] = ec2_cloud_credential_new.id
# add cluster level tags
eks_config_temp["tags"] = {"cluster-level-2": "tag2"}
# add node group
new_nodegroup = get_new_node()
eks_config_temp["nodeGroups"].append(new_nodegroup)
# modify logging
eks_config_temp["loggingTypes"] = ["audit","api","authenticator"]
client = get_user_client()
client.update(cluster, name=cluster.name, eksConfig=eks_config_temp)
cluster = validate_cluster(client, cluster, intermediate_state="updating",
check_intermediate_state=True,
skipIngresscheck=True,
timeout=DEFAULT_TIMEOUT_EKS)
return cluster
def validate_nodegroup(nodegroup_list, cluster_name):
"""
Validate nodegroup details
:param nodegroup_list: list of nodegroups
:param cluster_name: cluster name
:return:
"""
for nodegroup in nodegroup_list:
print("nodegroup:", nodegroup)
eks_nodegroup = AmazonWebServices().describe_eks_nodegroup(
cluster_name, nodegroup["nodegroupName"]
)
print("\nNode Group from EKS console: {}".format(eks_nodegroup))
# k8s version check
eks_cluster = AmazonWebServices().describe_eks_cluster(cluster_name)
assert eks_cluster["cluster"]["version"] == \
eks_nodegroup["nodegroup"]["version"], \
"Mismatch between K8s version of cluster and nodegroup"
# status of nodegroup
assert eks_nodegroup["nodegroup"]["status"] == "ACTIVE", \
"Nodegroups are not in active status"
# check scalingConfig
assert nodegroup["maxSize"] \
== eks_nodegroup["nodegroup"]["scalingConfig"]["maxSize"], \
"maxSize is incorrect on the nodes"
assert nodegroup["minSize"] \
== eks_nodegroup["nodegroup"]["scalingConfig"]["minSize"], \
"minSize is incorrect on the nodes"
assert nodegroup["minSize"] \
== eks_nodegroup["nodegroup"]["scalingConfig"]["minSize"], \
"minSize is incorrect on the nodes"
# check instance type
assert nodegroup["instanceType"] \
== eks_nodegroup["nodegroup"]["instanceTypes"][0], \
"instanceType is incorrect on the nodes"
# check disk size
assert nodegroup["diskSize"] \
== eks_nodegroup["nodegroup"]["diskSize"], \
"diskSize is incorrect on the nodes"
# check ec2SshKey
if "ec2SshKey" in nodegroup.keys() and \
nodegroup["ec2SshKey"] is not "":
assert nodegroup["ec2SshKey"] \
== eks_nodegroup["nodegroup"]["remoteAccess"]["ec2SshKey"], \
"Ssh key is incorrect on the nodes"
| []
| []
| [
"RANCHER_EKS_PUBLIC_ACCESS_SOURCES",
"RANCHER_EKS_SERVICE_ROLE",
"RANCHER_EKS_REGION",
"RANCHER_EKS_SUBNETS",
"RANCHER_EKS_K8S_VERSION",
"AWS_SSH_KEY_NAME",
"RANCHER_EKS_SECRET_KEY",
"RANCHER_SECRET_ENCRYPTION",
"RANCHER_EKS_ACCESS_KEY",
"RANCHER_LOGGING_TYPES",
"RANCHER_EKS_SECURITYGROUP",
"RANCHER_EKS_NODESIZE",
"RANCHER_KMS_KEY"
]
| [] | ["RANCHER_EKS_PUBLIC_ACCESS_SOURCES", "RANCHER_EKS_SERVICE_ROLE", "RANCHER_EKS_REGION", "RANCHER_EKS_SUBNETS", "RANCHER_EKS_K8S_VERSION", "AWS_SSH_KEY_NAME", "RANCHER_EKS_SECRET_KEY", "RANCHER_SECRET_ENCRYPTION", "RANCHER_EKS_ACCESS_KEY", "RANCHER_LOGGING_TYPES", "RANCHER_EKS_SECURITYGROUP", "RANCHER_EKS_NODESIZE", "RANCHER_KMS_KEY"] | python | 13 | 0 | |
test/extended/util/image/image.go | package image
import (
"crypto/sha256"
"encoding/base64"
"fmt"
"os"
"regexp"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
)
func init() {
allowedImages = map[string]int{
// used by jenkins tests
"quay.io/redhat-developer/nfs-server:1.0": -1,
// used by open ldap tests
"docker.io/mrogers950/origin-openldap-test:fedora29": -1,
// used by multicast test, should be moved to publish to quay
"docker.io/openshift/test-multicast:latest": -1,
// used by oc mirror test, should be moved to publish to quay
"docker.io/library/registry:2.7.1": -1,
// used by build s2i e2e's to verify that builder with USER root are not allowed
// the github.com/openshift/build-test-images repo is built out of github.com/openshift/release
"registry.ci.openshift.org/ocp/4.8:test-build-roots2i": -1,
// used by all the rest build s2s e2e tests
"registry.ci.openshift.org/ocp/4.8:test-build-simples2i": -1,
// moved to GCR
"k8s.gcr.io/sig-storage/csi-attacher:v2.2.0": -1,
"k8s.gcr.io/sig-storage/csi-attacher:v3.0.0": -1,
"k8s.gcr.io/sig-storage/csi-node-driver-registrar:v1.2.0": -1,
"k8s.gcr.io/sig-storage/csi-node-driver-registrar:v1.3.0": -1,
"k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.1.0": -1,
"k8s.gcr.io/sig-storage/csi-provisioner:v1.6.0": -1,
"k8s.gcr.io/sig-storage/csi-provisioner:v2.0.0": -1,
"k8s.gcr.io/sig-storage/csi-provisioner:v2.1.0": -1,
"k8s.gcr.io/sig-storage/csi-resizer:v0.4.0": -1,
"k8s.gcr.io/sig-storage/csi-resizer:v0.5.0": -1,
"k8s.gcr.io/sig-storage/csi-resizer:v1.1.0": -1,
"k8s.gcr.io/sig-storage/csi-snapshotter:v2.0.1": -1,
"k8s.gcr.io/sig-storage/csi-snapshotter:v2.1.0": -1,
"k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.2": -1,
"k8s.gcr.io/sig-storage/hostpathplugin:v1.4.0": -1,
"k8s.gcr.io/sig-storage/livenessprobe:v1.1.0": -1,
"k8s.gcr.io/sig-storage/mock-driver:v4.0.2": -1,
"k8s.gcr.io/sig-storage/mock-driver:v4.1.0": -1,
"k8s.gcr.io/sig-storage/snapshot-controller:v2.1.1": -1,
"k8s.gcr.io/sig-storage/snapshot-controller:v3.0.2": -1,
// allowed upstream kube images - index and value must match upstream or
// tests will fail (vendor/k8s.io/kubernetes/test/utils/image/manifest.go)
"k8s.gcr.io/e2e-test-images/agnhost:2.32": 1,
"k8s.gcr.io/e2e-test-images/busybox:1.29-1": 7,
"k8s.gcr.io/e2e-test-images/nginx:1.14-1": 23,
"k8s.gcr.io/e2e-test-images/nginx:1.15-1": 24,
"k8s.gcr.io/e2e-test-images/redis:5.0.5-alpine": 34,
}
images = GetMappedImages(allowedImages, os.Getenv("KUBE_TEST_REPO"))
}
var (
images map[string]string
allowedImages map[string]int
)
// ReplaceContents ensures that the provided yaml or json has the
// correct embedded image content.
func ReplaceContents(data []byte) ([]byte, error) {
// exactImageFormat attempts to match a string on word boundaries
const exactImageFormat = `\b%s\b`
patterns := make(map[string]*regexp.Regexp)
for from, to := range images {
pattern := fmt.Sprintf(exactImageFormat, regexp.QuoteMeta(from))
re, err := regexp.Compile(pattern)
if err != nil {
return nil, err
}
patterns[to] = re
}
for to, pattern := range patterns {
data = pattern.ReplaceAll(data, []byte(to))
}
return data, nil
}
// MustReplaceContents invokes ReplaceContents and panics if any
// replacement error occurs.
func MustReplaceContents(data []byte) []byte {
data, err := ReplaceContents(data)
if err != nil {
panic(err)
}
return data
}
// LocationFor returns the appropriate URL for the provided image.
func LocationFor(image string) string {
pull, ok := images[image]
if !ok {
panic(fmt.Sprintf(`The image %q is not one of the pre-approved test images.
To add a new image to OpenShift tests you must follow the process described in
the test/extended/util/image/README.md file.`, image))
}
return pull
}
// ShellImage returns a docker pull spec that any pod on the cluster
// has access to that contains bash and standard commandline tools.
// This image should be used for all generic e2e shell scripts. This
// image has oc.
//
// If the script you are running does not have a specific tool available
// that is required, open an issue to github.com/openshift/images in the
// images/tools directory to discuss adding that package. In general, try
// to avoid the need to add packages by using simpler concepts or consider
// extending an existing image.
func ShellImage() string {
return "image-registry.openshift-image-registry.svc:5000/openshift/tools:latest"
}
// LimitedShellImage returns a docker pull spec that any pod on the cluster
// has access to that contains bash and standard commandline tools.
// This image should be used when you only need oc and can't use the shell image.
// This image has oc.
//
// TODO: this will be removed when https://bugzilla.redhat.com/show_bug.cgi?id=1843232
// is fixed
func LimitedShellImage() string {
return "image-registry.openshift-image-registry.svc:5000/openshift/cli:latest"
}
// OpenLDAPTestImage returns the LDAP test image.
func OpenLDAPTestImage() string {
return LocationFor("docker.io/mrogers950/origin-openldap-test:fedora29")
}
// OriginalImages returns a map of the original image names.
func OriginalImages() map[string]int {
images := make(map[string]int)
for k, v := range allowedImages {
images[k] = v
}
return images
}
// Exceptions is a list of images we don't mirror temporarily due to various
// problems. This list should ideally be empty.
var Exceptions = sets.NewString(
"mcr.microsoft.com/windows:1809", // https://issues.redhat.com/browse/PROJQUAY-1874
)
// Images returns a map of all images known to the test package.
func Images() map[string]struct{} {
copied := make(map[string]struct{})
for k := range images {
copied[k] = struct{}{}
}
return copied
}
// GetMappedImages returns the images if they were mapped to the provided
// image repository. The keys of the returned map are the same as the keys
// in originalImages and the values are the equivalent name in the target
// repo.
func GetMappedImages(originalImages map[string]int, repo string) map[string]string {
if len(repo) == 0 {
images := make(map[string]string)
for k := range originalImages {
images[k] = k
}
return images
}
configs := make(map[string]string)
reCharSafe := regexp.MustCompile(`[^\w]`)
reDashes := regexp.MustCompile(`-+`)
h := sha256.New()
const (
// length of hash in base64-url chosen to minimize possible collisions (64^16 possible)
hashLength = 16
// maximum length of a Docker spec image tag
maxTagLength = 127
// when building a tag, there are at most 6 characters in the format (e2e and 3 dashes),
// and we should allow up to 10 digits for the index and additional qualifiers we may add
// in the future
tagFormatCharacters = 6 + 10
)
parts := strings.SplitN(repo, "/", 2)
registry, destination := parts[0], parts[1]
for pullSpec, index := range originalImages {
// Build a new tag with a the index, a hash of the image spec (to be unique) and
// shorten and make the pull spec "safe" so it will fit in the tag
h.Reset()
h.Write([]byte(pullSpec))
hash := base64.RawURLEncoding.EncodeToString(h.Sum(nil))[:hashLength]
shortName := reCharSafe.ReplaceAllLiteralString(pullSpec, "-")
shortName = reDashes.ReplaceAllLiteralString(shortName, "-")
maxLength := maxTagLength - hashLength - tagFormatCharacters
if len(shortName) > maxLength {
shortName = shortName[len(shortName)-maxLength:]
}
var newTag string
if index == -1 {
newTag = fmt.Sprintf("e2e-%s-%s", shortName, hash)
} else {
newTag = fmt.Sprintf("e2e-%d-%s-%s", index, shortName, hash)
}
configs[pullSpec] = fmt.Sprintf("%s/%s:%s", registry, destination, newTag)
}
return configs
}
| [
"\"KUBE_TEST_REPO\""
]
| []
| [
"KUBE_TEST_REPO"
]
| [] | ["KUBE_TEST_REPO"] | go | 1 | 0 | |
vendor/github.com/knative/pkg/test/e2e_flags.go | /*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file contains logic to encapsulate flags which are needed to specify
// what cluster, etc. to use for e2e tests.
package test
import (
"flag"
"os"
"os/user"
"path"
)
// Flags holds the command line flags or defaults for settings in the user's environment.
// See EnvironmentFlags for a list of supported fields.
var Flags = initializeFlags()
// EnvironmentFlags define the flags that are needed to run the e2e tests.
type EnvironmentFlags struct {
Cluster string // K8s cluster (defaults to $K8S_CLUSTER_OVERRIDE)
Kubeconfig string // Path to kubeconfig (defaults to ./kube/config)
Namespace string // K8s namespace (blank by default, to be overwritten by test suite)
LogVerbose bool // Enable verbose logging
EmitMetrics bool // Emit metrics
}
func initializeFlags() *EnvironmentFlags {
var f EnvironmentFlags
defaultCluster := os.Getenv("K8S_CLUSTER_OVERRIDE")
flag.StringVar(&f.Cluster, "cluster", defaultCluster,
"Provide the cluster to test against. Defaults to $K8S_CLUSTER_OVERRIDE, then current cluster in kubeconfig if $K8S_CLUSTER_OVERRIDE is unset.")
var defaultKubeconfig string
if usr, err := user.Current(); err == nil {
defaultKubeconfig = path.Join(usr.HomeDir, ".kube/config")
}
flag.StringVar(&f.Kubeconfig, "kubeconfig", defaultKubeconfig,
"Provide the path to the `kubeconfig` file you'd like to use for these tests. The `current-context` will be used.")
flag.StringVar(&f.Namespace, "namespace", "",
"Provide the namespace you would like to use for these tests.")
flag.BoolVar(&f.LogVerbose, "logverbose", false,
"Set this flag to true if you would like to see verbose logging.")
flag.BoolVar(&f.EmitMetrics, "emitmetrics", false,
"Set this flag to true if you would like tests to emit metrics, e.g. latency of resources being realized in the system.")
return &f
}
| [
"\"K8S_CLUSTER_OVERRIDE\""
]
| []
| [
"K8S_CLUSTER_OVERRIDE"
]
| [] | ["K8S_CLUSTER_OVERRIDE"] | go | 1 | 0 | |
bcbio/utils.py | """Helpful utilities for building analysis pipelines.
"""
import glob
import gzip
import os
import tempfile
import time
import shutil
import contextlib
import itertools
import functools
import random
import fnmatch
import subprocess
import sys
import types
import six
import toolz as tz
import yaml
from collections import Mapping, OrderedDict
try:
from concurrent import futures
except ImportError:
try:
import futures
except ImportError:
futures = None
@contextlib.contextmanager
def cpmap(cores=1):
"""Configurable parallel map context manager.
Returns appropriate map compatible function based on configuration:
- Local single core (the default)
- Multiple local cores
"""
if int(cores) == 1:
yield itertools.imap
else:
if futures is None:
raise ImportError("concurrent.futures not available")
pool = futures.ProcessPoolExecutor(cores)
yield pool.map
pool.shutdown()
def map_wrap(f):
"""Wrap standard function to easily pass into 'map' processing.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
def transform_to(ext):
"""
Decorator to create an output filename from an output filename with
the specified extension. Changes the extension, in_file is transformed
to a new type.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@transform(".bam")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.bam")
@transform(".bam")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_makedir(out_dir)
out_name = replace_suffix(os.path.basename(in_path), ext)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor
def filter_to(word):
"""
Decorator to create an output filename from an input filename by
adding a word onto the stem. in_file is filtered by the function
and the results are written to out_file. You would want to use
this over transform_to if you don't know the extension of the file
going in. This also memoizes the output file.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@filter_to(".foo")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.foo.bam")
@filter_to(".foo")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.foo.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_makedir(out_dir)
out_name = append_stem(os.path.basename(in_path), word)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor
def memoize_outfile(ext=None, stem=None):
"""
Memoization decorator.
See docstring for transform_to and filter_to for details.
"""
if ext:
return transform_to(ext)
if stem:
return filter_to(stem)
def to_single_data(input):
"""Convert an input to a single bcbio data/world object.
Handles both single sample cases (CWL) and all sample cases (standard bcbio).
"""
if (isinstance(input, (list, tuple)) and len(input) == 1):
return input[0]
else:
assert isinstance(input, dict), input
return input
def unpack_worlds(items):
"""Handle all the ways we can pass multiple samples for back-compatibility.
"""
# Unpack nested lists of samples grouped together (old IPython style)
if isinstance(items[0], (list, tuple)) and len(items[0]) == 1:
out = []
for d in items:
assert len(d) == 1 and isinstance(d[0], dict), len(d)
out.append(d[0])
# Unpack a single argument with multiple samples (CWL style)
elif isinstance(items, (list, tuple)) and len(items) == 1 and isinstance(items[0], (list, tuple)):
out = items[0]
else:
out = items
return out
def safe_makedir(dname):
"""Make a directory if it doesn't exist, handling concurrent race conditions.
"""
if not dname:
return dname
num_tries = 0
max_tries = 5
while not os.path.exists(dname):
# we could get an error here if multiple processes are creating
# the directory at the same time. Grr, concurrency.
try:
os.makedirs(dname)
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
return dname
@contextlib.contextmanager
def chdir(new_dir):
"""Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
"""
# On busy filesystems can have issues accessing main directory. Allow retries
num_tries = 0
max_tries = 5
cur_dir = None
while cur_dir is None:
try:
cur_dir = os.getcwd()
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
safe_makedir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir)
@contextlib.contextmanager
def tmpfile(*args, **kwargs):
"""Make a tempfile, safely cleaning up file descriptors on completion.
"""
(fd, fname) = tempfile.mkstemp(*args, **kwargs)
try:
yield fname
finally:
os.close(fd)
if os.path.exists(fname):
os.remove(fname)
def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
try:
return fname and os.path.exists(fname) and os.path.getsize(fname) > 0
except OSError:
return False
def get_size(path):
""" Returns the size in bytes if `path` is a file,
or the size of all files in `path` if it's a directory.
Analogous to `du -s`.
"""
if os.path.isfile(path):
return os.path.getsize(path)
return sum(get_size(os.path.join(path, f)) for f in os.listdir(path))
def file_uptodate(fname, cmp_fname):
"""Check if a file exists, is non-empty and is more recent than cmp_fname.
"""
try:
return (file_exists(fname) and file_exists(cmp_fname) and
os.path.getmtime(fname) >= os.path.getmtime(cmp_fname))
except OSError:
return False
def create_dirs(config, names=None):
if names is None:
names = config["dir"].keys()
for dname in names:
d = config["dir"][dname]
safe_makedir(d)
def save_diskspace(fname, reason, config):
"""Overwrite a file in place with a short message to save disk.
This keeps files as a sanity check on processes working, but saves
disk by replacing them with a short message.
"""
if config["algorithm"].get("save_diskspace", False):
for ext in ["", ".bai"]:
if os.path.exists(fname + ext):
with open(fname + ext, "w") as out_handle:
out_handle.write("File removed to save disk space: %s" % reason)
def read_galaxy_amqp_config(galaxy_config, base_dir):
"""Read connection information on the RabbitMQ server from Galaxy config.
"""
galaxy_config = add_full_path(galaxy_config, base_dir)
config = six.moves.configparser.ConfigParser()
config.read(galaxy_config)
amqp_config = {}
for option in config.options("galaxy_amqp"):
amqp_config[option] = config.get("galaxy_amqp", option)
return amqp_config
def add_full_path(dirname, basedir=None):
if basedir is None:
basedir = os.getcwd()
if not dirname.startswith("/"):
dirname = os.path.join(basedir, dirname)
return dirname
def splitext_plus(f):
"""Split on file extensions, allowing for zipped extensions.
"""
base, ext = os.path.splitext(f)
if ext in [".gz", ".bz2", ".zip"]:
base, ext2 = os.path.splitext(base)
ext = ext2 + ext
return base, ext
def remove_safe(f):
try:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
except OSError:
pass
def move_safe(origin, target):
"""
Move file, skip if exists
"""
if origin == target:
return origin
if file_exists(target):
return target
shutil.move(origin, target)
return target
def file_plus_index(fname):
"""Convert a file name into the file plus required indexes.
"""
exts = {".vcf": ".idx", ".bam": ".bai", ".vcf.gz": ".tbi", ".bed.gz": ".tbi",
".fq.gz": ".gbi"}
ext = splitext_plus(fname)[-1]
if ext in exts:
return [fname, fname + exts[ext]]
else:
return [fname]
def remove_plus(orig):
"""Remove a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext):
remove_safe(orig + ext)
def copy_plus(orig, new):
"""Copy a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
shutil.copyfile(orig + ext, new + ext)
def symlink_plus(orig, new):
"""Create relative symlinks and handle associated biological index files.
"""
orig = os.path.abspath(orig)
if not os.path.exists(orig):
raise RuntimeError("File not found: %s" % orig)
for ext in ["", ".idx", ".gbi", ".tbi", ".bai", ".fai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
with chdir(os.path.dirname(new)):
remove_safe(new + ext)
# Work around symlink issues on some filesystems. Randomly
# fail to symlink.
try:
os.symlink(os.path.relpath(orig + ext), os.path.basename(new + ext))
except OSError:
if not os.path.exists(new + ext) or not os.path.lexists(new + ext):
remove_safe(new + ext)
shutil.copyfile(orig + ext, new + ext)
orig_noext = splitext_plus(orig)[0]
new_noext = splitext_plus(new)[0]
for sub_ext in [".bai", ".dict"]:
if os.path.exists(orig_noext + sub_ext) and not os.path.lexists(new_noext + sub_ext):
with chdir(os.path.dirname(new_noext)):
os.symlink(os.path.relpath(orig_noext + sub_ext), os.path.basename(new_noext + sub_ext))
def open_gzipsafe(f, is_gz=False):
if f.endswith(".gz") or is_gz:
if six.PY3:
return gzip.open(f, "rt", encoding="utf-8", errors="ignore")
else:
return gzip.open(f)
else:
if six.PY3:
return open(f, encoding="utf-8", errors="ignore")
else:
return open(f)
def is_empty_gzipsafe(f):
h = open_gzipsafe(f)
is_empty = len(h.read(1)) > 0
h.close()
return is_empty
def append_stem(to_transform, word):
"""
renames a filename or list of filenames with 'word' appended to the stem
of each one:
example: append_stem("/path/to/test.sam", "_filtered") ->
"/path/to/test_filtered.sam"
"""
if is_sequence(to_transform):
return [append_stem(f, word) for f in to_transform]
elif is_string(to_transform):
(base, ext) = splitext_plus(to_transform)
return "".join([base, word, ext])
else:
raise ValueError("append_stem takes a single filename as a string or "
"a list of filenames to transform.")
def replace_suffix(to_transform, suffix):
"""
replaces the suffix on a filename or list of filenames
example: replace_suffix("/path/to/test.sam", ".bam") ->
"/path/to/test.bam"
"""
if is_sequence(to_transform):
transformed = []
for f in to_transform:
(base, _) = os.path.splitext(f)
transformed.append(base + suffix)
return transformed
elif is_string(to_transform):
(base, _) = os.path.splitext(to_transform)
return base + suffix
else:
raise ValueError("replace_suffix takes a single filename as a string or "
"a list of filenames to transform.")
# ## Functional programming
def partition_all(n, iterable):
"""Partition a list into equally sized pieces, including last smaller parts
http://stackoverflow.com/questions/5129102/python-equivalent-to-clojures-partition-all
"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, n))
if not chunk:
break
yield chunk
def robust_partition_all(n, iterable):
"""
replaces partition_all with a more robust version.
Workaround for a segfault in pybedtools when using a BedTool as an iterator:
https://github.com/daler/pybedtools/issues/88 for the discussion
"""
it = iter(iterable)
while True:
x = []
for _ in range(n):
try:
x.append(next(it))
except StopIteration:
yield x
# Omitting this StopIteration results in a segfault!
raise StopIteration
yield x
def partition(pred, iterable, tolist=False):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = itertools.tee(iterable)
ifalse = six.moves.filterfalse(pred, t1)
itrue = six.moves.filter(pred, t2)
if tolist:
return list(ifalse), list(itrue)
else:
return ifalse, itrue
# ## Dealing with configuration files
def merge_config_files(fnames):
"""Merge configuration files, preferring definitions in latter files.
"""
def _load_yaml(fname):
with open(fname) as in_handle:
config = yaml.safe_load(in_handle)
return config
out = _load_yaml(fnames[0])
for fname in fnames[1:]:
cur = _load_yaml(fname)
for k, v in cur.items():
if k in out and isinstance(out[k], dict):
out[k].update(v)
else:
out[k] = v
return out
def deepish_copy(org):
"""Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
"""
out = dict().fromkeys(org)
for k, v in org.items():
if isinstance(v, dict):
out[k] = deepish_copy(v)
else:
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out
def safe_to_float(x):
"""Convert to float, handling None and non-float inputs.
Useful for cleaning complicated output from variant callers.
"""
if x is None:
return None
else:
try:
return float(x)
except ValueError:
return None
def get_in(d, t, default=None):
"""
look up if you can get a tuple of values from a nested dictionary,
each item in the tuple a deeper layer
example: get_in({1: {2: 3}}, (1, 2)) -> 3
example: get_in({1: {2: 3}}, (2, 3)) -> {}
"""
return tz.get_in(t, d, default)
def flatten(l):
"""
flatten an irregular list of lists
example: flatten([[[1, 2, 3], [4, 5]], 6]) -> [1, 2, 3, 4, 5, 6]
lifted from: http://stackoverflow.com/questions/2158395/
"""
for el in l:
if isinstance(el, (list, tuple)):
for sub in flatten(el):
yield sub
else:
yield el
def is_sequence(arg):
"""
check if 'arg' is a sequence
example: arg([]) -> True
example: arg("lol") -> False
"""
return (not is_string(arg) and
(hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__")))
def is_pair(arg):
"""
check if 'arg' is a two-item sequence
"""
return is_sequence(arg) and len(arg) == 2
def is_string(arg):
return isinstance(arg, six.string_types)
def locate(pattern, root=os.curdir):
'''Locate all files matching supplied filename pattern in and below
supplied root directory.'''
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
def itersubclasses(cls, _seen=None):
"""
snagged from: http://code.activestate.com/recipes/576949/
itersubclasses(cls)
Generator over all subclasses of a given class, in depth first order.
>>> list(itersubclasses(int)) == [bool]
True
>>> class A(object): pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL (new-style) classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)] #doctest: +ELLIPSIS
['type', ...'tuple', ...]
"""
if not isinstance(cls, type):
raise TypeError('itersubclasses must be called with '
'new-style classes, not %.100r' % cls)
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def replace_directory(out_files, dest_dir):
"""
change the output directory to dest_dir
can take a string (single file) or a list of files
"""
if is_sequence(out_files):
filenames = map(os.path.basename, out_files)
return [os.path.join(dest_dir, x) for x in filenames]
elif is_string(out_files):
return os.path.join(dest_dir, os.path.basename(out_files))
else:
raise ValueError("in_files must either be a sequence of filenames "
"or a string")
def which(program, env=None):
""" returns the path to an executable or None if it can't be found"""
if env is None:
env = os.environ.copy()
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in env["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
for path in get_all_conda_bins():
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def reservoir_sample(stream, num_items, item_parser=lambda x: x):
"""
samples num_items from the stream keeping each with equal probability
"""
kept = []
for index, item in enumerate(stream):
if index < num_items:
kept.append(item_parser(item))
else:
r = random.randint(0, index)
if r < num_items:
kept[r] = item_parser(item)
return kept
def compose(f, g):
return lambda x: f(g(x))
def dictapply(d, fn):
"""
apply a function to all non-dict values in a dictionary
"""
for k, v in d.items():
if isinstance(v, dict):
v = dictapply(v, fn)
else:
d[k] = fn(v)
return d
def Rscript_cmd():
"""Retrieve path to locally installed Rscript or first in PATH.
Prefers Rscript version installed via conda to a system version.
"""
rscript = which(os.path.join(get_bcbio_bin(), "Rscript"))
if rscript:
return rscript
else:
return which("Rscript")
def R_sitelib():
"""Retrieve the R site-library installed with the bcbio installer.
"""
return os.path.join(os.path.dirname(get_bcbio_bin()), "lib", "R", "library")
def R_package_path(package):
"""
return the path to an installed R package
"""
local_sitelib = R_sitelib()
rscript = Rscript_cmd()
cmd = """{rscript} --no-environ -e '.libPaths(c("{local_sitelib}")); find.package("{package}")'"""
try:
output = subprocess.check_output(cmd.format(**locals()), shell=True)
except subprocess.CalledProcessError as e:
return None
for line in output.decode().split("\n"):
if "[1]" not in line:
continue
dirname = line.split("[1]")[1].replace("\"", "").strip()
if os.path.exists(dirname):
return dirname
return None
def R_package_resource(package, resource):
"""
return a path to an R package resource, if it is available
"""
package_path = R_package_path(package)
if not package_path:
return None
package_resource = os.path.join(package_path, resource)
if not file_exists(package_resource):
return None
else:
return package_resource
def get_java_binpath(cmd=None):
"""Retrieve path for java to use, handling custom BCBIO_JAVA_HOME
Defaults to the dirname of cmd, or local anaconda directory
"""
if os.environ.get("BCBIO_JAVA_HOME"):
test_cmd = os.path.join(os.environ["BCBIO_JAVA_HOME"], "bin", "java")
if os.path.exists(test_cmd):
cmd = test_cmd
if not cmd:
cmd = Rscript_cmd()
return os.path.dirname(cmd)
def clear_java_home():
"""Clear JAVA_HOME environment or reset to BCBIO_JAVA_HOME.
Avoids accidental java injection but respects custom BCBIO_JAVA_HOME
command.
"""
if os.environ.get("BCBIO_JAVA_HOME"):
test_cmd = os.path.join(os.environ["BCBIO_JAVA_HOME"], "bin", "java")
if os.path.exists(test_cmd):
return "export JAVA_HOME=%s" % os.environ["BCBIO_JAVA_HOME"]
return "unset JAVA_HOME"
def get_java_clprep(cmd=None):
"""Correctly prep command line for java commands, setting PATH and unsetting JAVA_HOME.
"""
return "%s && export PATH=%s:\"$PATH\"" % (clear_java_home(), get_java_binpath(cmd))
def get_R_exports():
return "unset R_HOME && unset R_LIBS && export PATH=%s:\"$PATH\"" % (os.path.dirname(Rscript_cmd()))
def perl_cmd():
"""Retrieve path to locally installed conda Perl or first in PATH.
"""
perl = which(os.path.join(get_bcbio_bin(), "perl"))
if perl:
return perl
else:
return which("perl")
def get_perl_exports(tmpdir=None):
"""Environmental exports to use conda installed perl.
"""
perl_path = os.path.dirname(perl_cmd())
out = "unset PERL5LIB && export PATH=%s:\"$PATH\"" % (perl_path)
if tmpdir:
out += " && export TMPDIR=%s" % (tmpdir)
return out
def get_bcbio_env():
env = os.environ.copy()
env["PATH"] = append_path(get_bcbio_bin(), env['PATH'])
return env
def append_path(bin, path, at_start=True):
if at_start:
tmpl = "{bin}:{path}"
else:
tmpl = "{path}:{bin}"
return tmpl.format(bin=bin, path=path)
def get_bcbio_bin():
return os.path.dirname(os.path.realpath(sys.executable))
def get_all_conda_bins():
"""Retrieve all possible conda bin directories, including environments.
"""
bcbio_bin = get_bcbio_bin()
conda_dir = os.path.dirname(bcbio_bin)
if os.path.join("anaconda", "envs") in conda_dir:
conda_dir = os.path.join(conda_dir[:conda_dir.rfind(os.path.join("anaconda", "envs"))], "anaconda")
return [bcbio_bin] + list(glob.glob(os.path.join(conda_dir, "envs", "*", "bin")))
def get_program_python(cmd):
"""Get the full path to a python version linked to the command.
Allows finding python based programs in python 2 versus python 3
environments.
"""
full_cmd = os.path.realpath(which(cmd))
cmd_python = os.path.join(os.path.dirname(full_cmd), "python")
env_python = None
if "envs" in cmd_python:
parts = cmd_python.split(os.sep)
env_python = os.path.join(os.sep.join(parts[:parts.index("envs") + 2]), "bin", "python")
if os.path.exists(cmd_python):
return cmd_python
elif env_python and os.path.exists(env_python):
return env_python
else:
return os.path.realpath(sys.executable)
def local_path_export(at_start=True, env_cmd=None):
"""Retrieve paths to local install, also including environment paths if env_cmd included.
"""
paths = [get_bcbio_bin()]
if env_cmd:
env_path = os.path.dirname(get_program_python(env_cmd))
if env_path not in paths:
paths.insert(0, env_path)
if at_start:
return "export PATH=%s:\"$PATH\" && " % (":".join(paths))
else:
return "export PATH=\"$PATH\":%s && " % (":".join(paths))
def locale_export():
"""Exports for dealing with Click-based programs and ASCII/Unicode errors.
RuntimeError: Click will abort further execution because Python 3 was
configured to use ASCII as encoding for the environment.
Consult https://click.palletsprojects.com/en/7.x/python3/ for mitigation steps.
Looks up available locales on the system to find an appropriate one to pick,
defaulting to C.UTF-8 which is globally available on newer systems.
"""
locale_to_use = "C.UTF-8"
try:
locales = subprocess.check_output(["locale", "-a"]).decode(errors="ignore").split("\n")
except subprocess.CalledProcessError:
locales = []
for locale in locales:
if locale.lower().endswith(("utf-8", "utf8")):
locale_to_use = locale
break
return "export LC_ALL=%s && export LANG=%s && " % (locale_to_use, locale_to_use)
def java_freetype_fix():
"""Provide workaround for issues FreeType library symbols.
libfontconfig.so.1: undefined symbol: FT_Done_MM_Var
Cheap workaround with LD_PRELOAD, I don't know a better one.
"""
return "export LD_PRELOAD=%s/lib/libfreetype.so && " % os.path.dirname(get_bcbio_bin())
def is_gzipped(fname):
_, ext = os.path.splitext(fname)
return ext in [".gz", "gzip"]
def is_bzipped(fname):
_, ext = os.path.splitext(fname)
return ext in [".bz2", "bzip2"]
def open_possible_gzip(fname, flag="r"):
if is_gzipped(fname):
if "b" not in flag:
flag += "b"
return gzip.open(fname, flag)
else:
return open(fname, flag)
def filter_missing(xs):
"""
remove items from a list if they evaluate to False
"""
return filter(lambda x: x, xs)
def rbind(dfs):
"""
acts like rbind for pandas dataframes
"""
if len(dfs) == 1:
return dfs[0]
df = dfs[0]
for d in dfs[1:]:
df = df.append(d)
return df
def max_command_length():
"""
get the maximum length of the command line, in bytes, defaulting
to a conservative number if not set
http://www.in-ulm.de/~mascheck/various/argmax/
"""
DEFAULT_MAX_LENGTH = 150000 # lowest seen so far is 200k
try:
arg_max = os.sysconf('SC_ARG_MAX')
env_lines = len(os.environ) * 4
env_chars = sum([len(x) + len(y) for x, y in os.environ.items()])
arg_length = arg_max - env_lines - 2048
except ValueError:
arg_length = DEFAULT_MAX_LENGTH
return arg_length if arg_length > 0 else DEFAULT_MAX_LENGTH
def get_abspath(path, pardir=None):
if pardir is None:
pardir = os.getcwd()
path = os.path.expandvars(path)
return os.path.normpath(os.path.join(pardir, path))
def sort_filenames(filenames):
"""
sort a list of files by filename only, ignoring the directory names
"""
basenames = [os.path.basename(x) for x in filenames]
indexes = [i[0] for i in sorted(enumerate(basenames), key=lambda x:x[1])]
return [filenames[x] for x in indexes]
# LazyImport from NIPY
# https://github.com/nipy/nitime/blob/master/nitime/lazyimports.py
class LazyImport(types.ModuleType):
"""
This class takes the module name as a parameter, and acts as a proxy for
that module, importing it only when the module is used, but effectively
acting as the module in every other way (including inside IPython with
respect to introspection and tab completion) with the *exception* of
reload()- reloading a :class:`LazyImport` raises an :class:`ImportError`.
>>> mlab = LazyImport('matplotlib.mlab')
No import happens on the above line, until we do something like call an
``mlab`` method or try to do tab completion or introspection on ``mlab``
in IPython.
>>> mlab
<module 'matplotlib.mlab' will be lazily loaded>
Now the :class:`LazyImport` will do an actual import, and call the dist
function of the imported module.
>>> mlab.dist(1969,2011)
42.0
"""
def __getattribute__(self, x):
# This method will be called only once, since we'll change
# self.__class__ to LoadedLazyImport, and __getattribute__ will point
# to module.__getattribute__
name = object.__getattribute__(self, '__name__')
__import__(name)
# if name above is 'package.foo.bar', package is returned, the docs
# recommend that in order to get back the full thing, that we import
# and then lookup the full name is sys.modules, see:
# http://docs.python.org/library/functions.html#__import__
module = sys.modules[name]
# Now that we've done the import, cutout the middleman and make self
# act as the imported module
class LoadedLazyImport(types.ModuleType):
__getattribute__ = module.__getattribute__
__repr__ = module.__repr__
object.__setattr__(self, '__class__', LoadedLazyImport)
# The next line will make "reload(l)" a silent no-op
# sys.modules[name] = self
return module.__getattribute__(x)
def __repr__(self):
return "<module '%s' will be lazily loaded>" %\
object.__getattribute__(self,'__name__')
def walk_json(d, func):
""" Walk over a parsed JSON nested structure `d`, apply `func` to each leaf element and replace it with result
"""
if isinstance(d, Mapping):
return OrderedDict((k, walk_json(v, func)) for k, v in d.items())
elif isinstance(d, list):
return [walk_json(v, func) for v in d]
else:
return func(d)
| []
| []
| [
"BCBIO_JAVA_HOME"
]
| [] | ["BCBIO_JAVA_HOME"] | python | 1 | 0 | |
contrib/registry/kubernetes/registry.go | // The package registry simply implements the Kubernetes-based Registry
package kuberegistry
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/go-kratos/kratos/v2/registry"
jsoniter "github.com/json-iterator/go"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
listerv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
)
// Defines the key name of specific fields
// Kratos needs to cooperate with the following fields to run properly on Kubernetes:
// kratos-service-id: define the ID of the service
// kratos-service-app: define the name of the service
// kratos-service-version: define the version of the service
// kratos-service-metadata: define the metadata of the service
// kratos-service-protocols: define the protocols of the service
//
// Example Deployment:
//
// apiVersion: apps/v1
// kind: Deployment
// metadata:
// name: nginx
// labels:
// app: nginx
// spec:
// replicas: 5
// selector:
// matchLabels:
// app: nginx
// template:
// metadata:
// labels:
// app: nginx
// kratos-service-id: "56991810-c77f-4a95-8190-393efa9c1a61"
// kratos-service-app: "nginx"
// kratos-service-version: "v3.5.0"
// annotations:
// kratos-service-protocols: |
// {"80": "http"}
// kratos-service-metadata: |
// {"region": "sh", "zone": "sh001", "cluster": "pd"}
// spec:
// containers:
// - name: nginx
// image: nginx:1.7.9
// ports:
// - containerPort: 80
//
const (
// LabelsKeyServiceID is used to define the ID of the service
LabelsKeyServiceID = "kratos-service-id"
// LabelsKeyServiceName is used to define the name of the service
LabelsKeyServiceName = "kratos-service-app"
// LabelsKeyServiceVersion is used to define the version of the service
LabelsKeyServiceVersion = "kratos-service-version"
// AnnotationsKeyMetadata is used to define the metadata of the service
AnnotationsKeyMetadata = "kratos-service-metadata"
// AnnotationsKeyProtocolMap is used to define the protocols of the service
// Through the value of this field, Kratos can obtain the application layer protocol corresponding to the port
// Example value: {"80": "http", "8081": "grpc"}
AnnotationsKeyProtocolMap = "kratos-service-protocols"
)
// The registry simply implements service discovery based on Kubernetes
// It has not been verified in the production environment and is currently for reference only
type Registry struct {
clientSet *kubernetes.Clientset
informerFactory informers.SharedInformerFactory
podInformer cache.SharedIndexInformer
podLister listerv1.PodLister
stopCh chan struct{}
}
// NewRegistry is used to initialize the Registry
func NewRegistry(clientSet *kubernetes.Clientset) *Registry {
informerFactory := informers.NewSharedInformerFactory(clientSet, time.Minute*10)
podInformer := informerFactory.Core().V1().Pods().Informer()
podLister := informerFactory.Core().V1().Pods().Lister()
return &Registry{
clientSet: clientSet,
informerFactory: informerFactory,
podInformer: podInformer,
podLister: podLister,
stopCh: make(chan struct{}),
}
}
// Register is used to register services
// Note that on Kubernetes, it can only be used to update the id/name/version/metadata/protocols of the current service,
// but it cannot be used to update node.
func (s *Registry) Register(ctx context.Context, service *registry.ServiceInstance) error {
// GetMetadata
metadataVal, err := marshal(service.Metadata)
if err != nil {
return err
}
// Generate ProtocolMap
protocolMap, err := getProtocolMapByEndpoints(service.Endpoints)
if err != nil {
return err
}
protocolMapVal, err := marshal(protocolMap)
if err != nil {
return err
}
patchBytes, err := jsoniter.Marshal(map[string]interface{}{
"metadata": metav1.ObjectMeta{
Labels: map[string]string{
LabelsKeyServiceID: service.ID,
LabelsKeyServiceName: service.Name,
LabelsKeyServiceVersion: service.Version,
},
Annotations: map[string]string{
AnnotationsKeyMetadata: metadataVal,
AnnotationsKeyProtocolMap: protocolMapVal,
},
},
})
if err != nil {
return err
}
if _, err = s.clientSet.
CoreV1().
Pods(GetNamespace()).
Patch(ctx, GetPodName(), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
return err
}
return nil
}
// Deregister the registration.
func (s *Registry) Deregister(ctx context.Context, service *registry.ServiceInstance) error {
return s.Register(ctx, ®istry.ServiceInstance{
Metadata: map[string]string{},
})
}
// Service return the service instances in memory according to the service name.
func (s *Registry) GetService(ctx context.Context, name string) ([]*registry.ServiceInstance, error) {
pods, err := s.podLister.List(labels.SelectorFromSet(map[string]string{
LabelsKeyServiceName: name,
}))
if err != nil {
return nil, err
}
ret := make([]*registry.ServiceInstance, 0, len(pods))
for _, pod := range pods {
if pod.Status.Phase != corev1.PodRunning {
continue
}
instance, err := getServiceInstanceFromPod(pod)
if err != nil {
return nil, err
}
ret = append(ret, instance)
}
return ret, nil
}
func (s *Registry) sendLatestInstances(ctx context.Context, name string, announcement chan []*registry.ServiceInstance) {
instances, err := s.GetService(ctx, name)
if err != nil {
panic(err)
}
announcement <- instances
}
// Watch creates a watcher according to the service name.
func (s *Registry) Watch(ctx context.Context, name string) (registry.Watcher, error) {
stopCh := make(chan struct{}, 1)
announcement := make(chan []*registry.ServiceInstance, 1)
s.podInformer.AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
select {
case <-stopCh:
return false
case <-s.stopCh:
return false
default:
pod := obj.(*corev1.Pod)
val := pod.GetLabels()[LabelsKeyServiceName]
return val == name
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
s.sendLatestInstances(ctx, name, announcement)
},
UpdateFunc: func(oldObj, newObj interface{}) {
s.sendLatestInstances(ctx, name, announcement)
},
DeleteFunc: func(obj interface{}) {
s.sendLatestInstances(ctx, name, announcement)
},
},
})
return NewIterator(announcement, stopCh), nil
}
// Start is used to start the Registry
// It is non-blocking
func (s *Registry) Start() {
s.informerFactory.Start(s.stopCh)
if !cache.WaitForCacheSync(s.stopCh, s.podInformer.HasSynced) {
return
}
}
// Close is used to close the Registry
// After closing, any callbacks generated by Watch will not be executed
func (s *Registry) Close() {
select {
case <-s.stopCh:
default:
close(s.stopCh)
}
}
// //////////// K8S Runtime ////////////
// ServiceAccountNamespacePath defines the location of the namespace file
const ServiceAccountNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
var currentNamespace = LoadNamespace()
// LoadNamespace is used to get the current namespace from the file
func LoadNamespace() string {
data, err := os.ReadFile(ServiceAccountNamespacePath)
if err != nil {
return ""
}
return string(data)
}
// GetNamespace is used to get the namespace of the Pod where the current container is located
func GetNamespace() string {
return currentNamespace
}
// GetNamespace is used to get the name of the Pod where the current container is located
func GetPodName() string {
return os.Getenv("HOSTNAME")
}
// //////////// ProtocolMap ////////////
type protocolMap map[string]string
func (m protocolMap) GetProtocol(port int32) string {
return m[strconv.Itoa(int(port))]
}
// //////////// Iterator ////////////
// Iterator performs the conversion from channel to iterator
// It reads the latest changes from the `chan []*registry.ServiceInstance`
// And the outside can sense the closure of Iterator through stopCh
type Iterator struct {
ch chan []*registry.ServiceInstance
stopCh chan struct{}
}
// NewIterator is used to initialize Iterator
func NewIterator(channel chan []*registry.ServiceInstance, stopCh chan struct{}) *Iterator {
return &Iterator{
ch: channel,
stopCh: stopCh,
}
}
// Next will block until ServiceInstance changes
func (iter *Iterator) Next() ([]*registry.ServiceInstance, error) {
select {
case instances := <-iter.ch:
return instances, nil
case <-iter.stopCh:
return nil, ErrIteratorClosed
}
}
// Close is used to close the iterator
func (iter *Iterator) Stop() error {
select {
case <-iter.stopCh:
default:
close(iter.stopCh)
}
return nil
}
// //////////// Helper Func ////////////
func marshal(in interface{}) (string, error) {
return jsoniter.MarshalToString(in)
}
func unmarshal(data string, in interface{}) error {
return jsoniter.UnmarshalFromString(data, in)
}
func isEmptyObjectString(s string) bool {
switch s {
case "", "{}", "null", "nil", "[]":
return true
}
return false
}
func getProtocolMapByEndpoints(endpoints []string) (protocolMap, error) {
ret := protocolMap{}
for _, endpoint := range endpoints {
u, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
ret[u.Port()] = u.Scheme
}
return ret, nil
}
func getProtocolMapFromPod(pod *corev1.Pod) (protocolMap, error) {
protoMap := protocolMap{}
if s := pod.Annotations[AnnotationsKeyProtocolMap]; !isEmptyObjectString(s) {
err := unmarshal(s, &protoMap)
if err != nil {
return nil, &ErrorHandleResource{Namespace: pod.Namespace, Name: pod.Name, Reason: err}
}
}
return protoMap, nil
}
func getMetadataFromPod(pod *corev1.Pod) (map[string]string, error) {
metadata := map[string]string{}
if s := pod.Annotations[AnnotationsKeyMetadata]; !isEmptyObjectString(s) {
err := unmarshal(s, &metadata)
if err != nil {
return nil, &ErrorHandleResource{Namespace: pod.Namespace, Name: pod.Name, Reason: err}
}
}
return metadata, nil
}
func getServiceInstanceFromPod(pod *corev1.Pod) (*registry.ServiceInstance, error) {
podIP := pod.Status.PodIP
podLabels := pod.GetLabels()
// Get Metadata
metadata, err := getMetadataFromPod(pod)
if err != nil {
return nil, err
}
// Get Protocols Definition
protocolMap, err := getProtocolMapFromPod(pod)
if err != nil {
return nil, err
}
// Get Endpoints
var endpoints []string
for _, container := range pod.Spec.Containers {
for _, cp := range container.Ports {
port := cp.ContainerPort
protocol := protocolMap.GetProtocol(port)
if protocol == "" {
if cp.Name != "" {
protocol = strings.Split(cp.Name, "-")[0]
} else {
protocol = string(cp.Protocol)
}
}
addr := fmt.Sprintf("%s://%s:%d", protocol, podIP, port)
endpoints = append(endpoints, addr)
}
}
return ®istry.ServiceInstance{
ID: podLabels[LabelsKeyServiceID],
Name: podLabels[LabelsKeyServiceName],
Version: podLabels[LabelsKeyServiceVersion],
Metadata: metadata,
Endpoints: endpoints,
}, nil
}
// //////////// Error Definition ////////////
// ErrIteratorClosed defines the error that the iterator is closed
var ErrIteratorClosed = errors.New("iterator closed")
// ErrorHandleResource defines the error that cannot handle K8S resources normally
type ErrorHandleResource struct {
Namespace string
Name string
Reason error
}
// Error implements the error interface
func (err *ErrorHandleResource) Error() string {
return fmt.Sprintf("failed to handle resource(namespace=%s, name=%s): %s",
err.Namespace, err.Name, err.Reason)
}
| [
"\"HOSTNAME\""
]
| []
| [
"HOSTNAME"
]
| [] | ["HOSTNAME"] | go | 1 | 0 | |
google/cloud/talent_v4beta1/services/profile_service/client.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.talent_v4beta1.services.profile_service import pagers
from google.cloud.talent_v4beta1.types import common
from google.cloud.talent_v4beta1.types import histogram
from google.cloud.talent_v4beta1.types import profile
from google.cloud.talent_v4beta1.types import profile as gct_profile
from google.cloud.talent_v4beta1.types import profile_service
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
from .transports.base import ProfileServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import ProfileServiceGrpcTransport
from .transports.grpc_asyncio import ProfileServiceGrpcAsyncIOTransport
class ProfileServiceClientMeta(type):
"""Metaclass for the ProfileService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ProfileServiceTransport]]
_transport_registry["grpc"] = ProfileServiceGrpcTransport
_transport_registry["grpc_asyncio"] = ProfileServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[ProfileServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ProfileServiceClient(metaclass=ProfileServiceClientMeta):
"""A service that handles profile management, including profile
CRUD, enumeration and search.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "jobs.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ProfileServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ProfileServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ProfileServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ProfileServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def profile_path(project: str, tenant: str, profile: str,) -> str:
"""Returns a fully-qualified profile string."""
return "projects/{project}/tenants/{tenant}/profiles/{profile}".format(
project=project, tenant=tenant, profile=profile,
)
@staticmethod
def parse_profile_path(path: str) -> Dict[str, str]:
"""Parses a profile path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/profiles/(?P<profile>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def tenant_path(project: str, tenant: str,) -> str:
"""Returns a fully-qualified tenant string."""
return "projects/{project}/tenants/{tenant}".format(
project=project, tenant=tenant,
)
@staticmethod
def parse_tenant_path(path: str) -> Dict[str, str]:
"""Parses a tenant path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ProfileServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the profile service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ProfileServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ProfileServiceTransport):
# transport is a ProfileServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def list_profiles(
self,
request: Union[profile_service.ListProfilesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListProfilesPager:
r"""Lists profiles by filter. The order is unspecified.
Args:
request (Union[google.cloud.talent_v4beta1.types.ListProfilesRequest, dict]):
The request object. List profiles request.
parent (str):
Required. The resource name of the tenant under which
the profile is created.
The format is
"projects/{project_id}/tenants/{tenant_id}". For
example, "projects/foo/tenants/bar".
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4beta1.services.profile_service.pagers.ListProfilesPager:
The List profiles response object.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a profile_service.ListProfilesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, profile_service.ListProfilesRequest):
request = profile_service.ListProfilesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_profiles]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListProfilesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def create_profile(
self,
request: Union[profile_service.CreateProfileRequest, dict] = None,
*,
parent: str = None,
profile: gct_profile.Profile = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gct_profile.Profile:
r"""Creates and returns a new profile.
Args:
request (Union[google.cloud.talent_v4beta1.types.CreateProfileRequest, dict]):
The request object. Create profile request.
parent (str):
Required. The name of the tenant this profile belongs
to.
The format is
"projects/{project_id}/tenants/{tenant_id}". For
example, "projects/foo/tenants/bar".
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
profile (google.cloud.talent_v4beta1.types.Profile):
Required. The profile to be created.
This corresponds to the ``profile`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4beta1.types.Profile:
A resource that represents the
profile for a job candidate (also
referred to as a "single-source
profile").
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, profile])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a profile_service.CreateProfileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, profile_service.CreateProfileRequest):
request = profile_service.CreateProfileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if profile is not None:
request.profile = profile
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_profile]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_profile(
self,
request: Union[profile_service.GetProfileRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> profile.Profile:
r"""Gets the specified profile.
Args:
request (Union[google.cloud.talent_v4beta1.types.GetProfileRequest, dict]):
The request object. Get profile request.
name (str):
Required. Resource name of the profile to get.
The format is
"projects/{project_id}/tenants/{tenant_id}/profiles/{profile_id}".
For example, "projects/foo/tenants/bar/profiles/baz".
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4beta1.types.Profile:
A resource that represents the
profile for a job candidate (also
referred to as a "single-source
profile").
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a profile_service.GetProfileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, profile_service.GetProfileRequest):
request = profile_service.GetProfileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_profile]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_profile(
self,
request: Union[profile_service.UpdateProfileRequest, dict] = None,
*,
profile: gct_profile.Profile = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gct_profile.Profile:
r"""Updates the specified profile and returns the updated
result.
Args:
request (Union[google.cloud.talent_v4beta1.types.UpdateProfileRequest, dict]):
The request object. Update profile request
profile (google.cloud.talent_v4beta1.types.Profile):
Required. Profile to be updated.
This corresponds to the ``profile`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4beta1.types.Profile:
A resource that represents the
profile for a job candidate (also
referred to as a "single-source
profile").
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([profile])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a profile_service.UpdateProfileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, profile_service.UpdateProfileRequest):
request = profile_service.UpdateProfileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if profile is not None:
request.profile = profile
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_profile]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("profile.name", request.profile.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_profile(
self,
request: Union[profile_service.DeleteProfileRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified profile.
Prerequisite: The profile has no associated applications
or assignments associated.
Args:
request (Union[google.cloud.talent_v4beta1.types.DeleteProfileRequest, dict]):
The request object. Delete profile request.
name (str):
Required. Resource name of the profile to be deleted.
The format is
"projects/{project_id}/tenants/{tenant_id}/profiles/{profile_id}".
For example, "projects/foo/tenants/bar/profiles/baz".
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a profile_service.DeleteProfileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, profile_service.DeleteProfileRequest):
request = profile_service.DeleteProfileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_profile]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def search_profiles(
self,
request: Union[profile_service.SearchProfilesRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchProfilesPager:
r"""Searches for profiles within a tenant.
For example, search by raw queries "software engineer in
Mountain View" or search by structured filters (location filter,
education filter, etc.).
See
[SearchProfilesRequest][google.cloud.talent.v4beta1.SearchProfilesRequest]
for more information.
Args:
request (Union[google.cloud.talent_v4beta1.types.SearchProfilesRequest, dict]):
The request object. The request body of the
`SearchProfiles` call.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4beta1.services.profile_service.pagers.SearchProfilesPager:
Response of SearchProfiles method.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a profile_service.SearchProfilesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, profile_service.SearchProfilesRequest):
request = profile_service.SearchProfilesRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.search_profiles]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.SearchProfilesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ProfileServiceClient",)
| []
| []
| [
"GOOGLE_API_USE_MTLS_ENDPOINT",
"GOOGLE_API_USE_CLIENT_CERTIFICATE"
]
| [] | ["GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE"] | python | 2 | 0 | |
upup/pkg/fi/cloudup/apply_cluster.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudup
import (
"bytes"
"context"
"fmt"
"net/url"
"os"
"path"
"strings"
"github.com/blang/semver/v4"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
kopsbase "k8s.io/kops"
"k8s.io/kops/pkg/acls"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/apis/kops/validation"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/client/simple/vfsclientset"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model"
"k8s.io/kops/pkg/model/alimodel"
"k8s.io/kops/pkg/model/awsmodel"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/pkg/model/components/etcdmanager"
"k8s.io/kops/pkg/model/components/kubeapiserver"
"k8s.io/kops/pkg/model/domodel"
"k8s.io/kops/pkg/model/gcemodel"
"k8s.io/kops/pkg/model/openstackmodel"
"k8s.io/kops/pkg/model/spotinstmodel"
"k8s.io/kops/pkg/resources/digitalocean"
"k8s.io/kops/pkg/templates"
"k8s.io/kops/upup/models"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/aliup"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/upup/pkg/fi/cloudup/cloudformation"
"k8s.io/kops/upup/pkg/fi/cloudup/do"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
"k8s.io/kops/util/pkg/architectures"
"k8s.io/kops/util/pkg/hashing"
"k8s.io/kops/util/pkg/vfs"
)
const (
starline = "*********************************************************************************"
)
var (
// AlphaAllowDO is a feature flag that gates DigitalOcean support while it is alpha
AlphaAllowDO = featureflag.New("AlphaAllowDO", featureflag.Bool(false))
// AlphaAllowGCE is a feature flag that gates GCE support while it is alpha
AlphaAllowGCE = featureflag.New("AlphaAllowGCE", featureflag.Bool(false))
// AlphaAllowALI is a feature flag that gates aliyun support while it is alpha
AlphaAllowALI = featureflag.New("AlphaAllowALI", featureflag.Bool(false))
// OldestSupportedKubernetesVersion is the oldest kubernetes version that is supported in Kops
OldestSupportedKubernetesVersion = "1.11.0"
// OldestRecommendedKubernetesVersion is the oldest kubernetes version that is not deprecated in Kops
OldestRecommendedKubernetesVersion = "1.13.0"
)
type ApplyClusterCmd struct {
Cluster *kops.Cluster
InstanceGroups []*kops.InstanceGroup
// NodeUpSource is the location from which we download nodeup
NodeUpSource map[architectures.Architecture]string
// NodeUpHash is the sha hash
NodeUpHash map[architectures.Architecture]string
// TargetName specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform
TargetName string
// Target is the fi.Target we will operate against
Target fi.Target
// OutDir is a local directory in which we place output, can cache files etc
OutDir string
// Assets is a list of sources for files (primarily when not using everything containerized)
// Formats:
// raw url: http://... or https://...
// url with hash: <hex>@http://... or <hex>@https://...
Assets map[architectures.Architecture][]*MirroredAsset
Clientset simple.Clientset
// DryRun is true if this is only a dry run
DryRun bool
// AllowKopsDowngrade permits applying with a kops version older than what was last used to apply to the cluster.
AllowKopsDowngrade bool
// RunTasksOptions defines parameters for task execution, e.g. retry interval
RunTasksOptions *fi.RunTasksOptions
// The channel we are using
channel *kops.Channel
// Phase can be set to a Phase to run the specific subset of tasks, if we don't want to run everything
Phase Phase
// LifecycleOverrides is passed in to override the lifecycle for one of more tasks.
// The key value is the task name such as InternetGateway and the value is the fi.Lifecycle
// that is re-mapped.
LifecycleOverrides map[string]fi.Lifecycle
// TaskMap is the map of tasks that we built (output)
TaskMap map[string]fi.Task
}
func (c *ApplyClusterCmd) Run(ctx context.Context) error {
if c.InstanceGroups == nil {
list, err := c.Clientset.InstanceGroupsFor(c.Cluster).List(ctx, metav1.ListOptions{})
if err != nil {
return err
}
var instanceGroups []*kops.InstanceGroup
for i := range list.Items {
instanceGroups = append(instanceGroups, &list.Items[i])
}
c.InstanceGroups = instanceGroups
}
for _, ig := range c.InstanceGroups {
// Try to guess the path for additional third party volume plugins in Flatcar
image := strings.ToLower(ig.Spec.Image)
if strings.Contains(image, "flatcar") {
if c.Cluster.Spec.Kubelet == nil {
c.Cluster.Spec.Kubelet = &kops.KubeletConfigSpec{}
}
if c.Cluster.Spec.Kubelet.VolumePluginDirectory == "" {
c.Cluster.Spec.Kubelet.VolumePluginDirectory = "/var/lib/kubelet/volumeplugins/"
}
}
}
modelStore, err := findModelStore()
if err != nil {
return err
}
channel, err := ChannelForCluster(c.Cluster)
if err != nil {
klog.Warningf("%v", err)
}
c.channel = channel
stageAssetsLifecycle := fi.LifecycleSync
securityLifecycle := fi.LifecycleSync
networkLifecycle := fi.LifecycleSync
clusterLifecycle := fi.LifecycleSync
switch c.Phase {
case Phase(""):
// Everything ... the default
// until we implement finding assets we need to Ignore them
stageAssetsLifecycle = fi.LifecycleIgnore
case PhaseStageAssets:
networkLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleIgnore
clusterLifecycle = fi.LifecycleIgnore
case PhaseNetwork:
stageAssetsLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleIgnore
clusterLifecycle = fi.LifecycleIgnore
case PhaseSecurity:
stageAssetsLifecycle = fi.LifecycleIgnore
networkLifecycle = fi.LifecycleExistsAndWarnIfChanges
clusterLifecycle = fi.LifecycleIgnore
case PhaseCluster:
if c.TargetName == TargetDryRun {
stageAssetsLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleExistsAndWarnIfChanges
networkLifecycle = fi.LifecycleExistsAndWarnIfChanges
} else {
stageAssetsLifecycle = fi.LifecycleIgnore
networkLifecycle = fi.LifecycleExistsAndValidates
securityLifecycle = fi.LifecycleExistsAndValidates
}
default:
return fmt.Errorf("unknown phase %q", c.Phase)
}
// This is kinda a hack. Need to move phases out of fi. If we use Phase here we introduce a circular
// go dependency.
phase := string(c.Phase)
assetBuilder := assets.NewAssetBuilder(c.Cluster, phase)
err = c.upgradeSpecs(assetBuilder)
if err != nil {
return err
}
err = c.validateKopsVersion()
if err != nil {
return err
}
err = c.validateKubernetesVersion()
if err != nil {
return err
}
cluster := c.Cluster
configBase, err := vfs.Context.BuildVfsPath(cluster.Spec.ConfigBase)
if err != nil {
return fmt.Errorf("error parsing config base %q: %v", cluster.Spec.ConfigBase, err)
}
if !c.AllowKopsDowngrade {
kopsVersionUpdatedBytes, err := configBase.Join(registry.PathKopsVersionUpdated).ReadFile()
if err == nil {
kopsVersionUpdated := strings.TrimSpace(string(kopsVersionUpdatedBytes))
version, err := semver.Parse(kopsVersionUpdated)
if err != nil {
return fmt.Errorf("error parsing last kops version updated: %v", err)
}
if version.GT(semver.MustParse(kopsbase.Version)) {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("The cluster was last updated by kops version %s\n", kopsVersionUpdated)
fmt.Printf("To permit updating by the older version %s, run with the --allow-kops-downgrade flag\n", kopsbase.Version)
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
return fmt.Errorf("kops version older than last used to update the cluster")
}
} else if err != os.ErrNotExist {
return fmt.Errorf("error reading last kops version used to update: %v", err)
}
}
cloud, err := BuildCloud(cluster)
if err != nil {
return err
}
err = validation.DeepValidate(c.Cluster, c.InstanceGroups, true, cloud)
if err != nil {
return err
}
if cluster.Spec.KubernetesVersion == "" {
return fmt.Errorf("KubernetesVersion not set")
}
if cluster.Spec.DNSZone == "" && !dns.IsGossipHostname(cluster.ObjectMeta.Name) {
return fmt.Errorf("DNSZone not set")
}
l := &Loader{}
l.Init()
l.Cluster = c.Cluster
keyStore, err := c.Clientset.KeyStore(cluster)
if err != nil {
return err
}
sshCredentialStore, err := c.Clientset.SSHCredentialStore(cluster)
if err != nil {
return err
}
secretStore, err := c.Clientset.SecretStore(cluster)
if err != nil {
return err
}
// Normalize k8s version
versionWithoutV := strings.TrimSpace(cluster.Spec.KubernetesVersion)
versionWithoutV = strings.TrimPrefix(versionWithoutV, "v")
if cluster.Spec.KubernetesVersion != versionWithoutV {
klog.Warningf("Normalizing kubernetes version: %q -> %q", cluster.Spec.KubernetesVersion, versionWithoutV)
cluster.Spec.KubernetesVersion = versionWithoutV
}
// check if we should recommend turning off anonymousAuth
{
// we do a check here because setting modifying the kubelet object messes with the output
warn := false
if cluster.Spec.Kubelet == nil {
warn = true
} else if cluster.Spec.Kubelet.AnonymousAuth == nil {
warn = true
}
if warn {
fmt.Println("")
fmt.Printf("%s\n", starline)
fmt.Println("")
fmt.Println("Kubelet anonymousAuth is currently turned on. This allows RBAC escalation and remote code execution possibilities.")
fmt.Println("It is highly recommended you turn it off by setting 'spec.kubelet.anonymousAuth' to 'false' via 'kops edit cluster'")
fmt.Println("")
fmt.Println("See https://kops.sigs.k8s.io/security/#kubelet-api")
fmt.Println("")
fmt.Printf("%s\n", starline)
fmt.Println("")
}
}
if err := c.addFileAssets(assetBuilder); err != nil {
return err
}
// Only setup transfer of kops assets if using a FileRepository
if c.Cluster.Spec.Assets != nil && c.Cluster.Spec.Assets.FileRepository != nil {
if err := SetKopsAssetsLocations(assetBuilder); err != nil {
return err
}
}
checkExisting := true
region := ""
project := ""
var sshPublicKeys [][]byte
{
keys, err := sshCredentialStore.FindSSHPublicKeys(fi.SecretNameSSHPrimary)
if err != nil {
return fmt.Errorf("error retrieving SSH public key %q: %v", fi.SecretNameSSHPrimary, err)
}
for _, k := range keys {
sshPublicKeys = append(sshPublicKeys, []byte(k.Spec.PublicKey))
}
}
modelContext := &model.KopsModelContext{
Cluster: cluster,
InstanceGroups: c.InstanceGroups,
}
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderGCE:
{
gceCloud := cloud.(gce.GCECloud)
region = gceCloud.Region()
project = gceCloud.Project()
if !AlphaAllowGCE.Enabled() {
return fmt.Errorf("GCE support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowGCE")
}
modelContext.SSHPublicKeys = sshPublicKeys
}
case kops.CloudProviderDO:
{
if !AlphaAllowDO.Enabled() {
return fmt.Errorf("DigitalOcean support is currently (very) alpha and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowDO to enable it")
}
if len(sshPublicKeys) == 0 && (c.Cluster.Spec.SSHKeyName == nil || *c.Cluster.Spec.SSHKeyName == "") {
return fmt.Errorf("SSH public key must be specified when running with DigitalOcean (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
}
case kops.CloudProviderAWS:
{
awsCloud := cloud.(awsup.AWSCloud)
region = awsCloud.Region()
if len(sshPublicKeys) == 0 && c.Cluster.Spec.SSHKeyName == nil {
return fmt.Errorf("SSH public key must be specified when running with AWS (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
if len(sshPublicKeys) > 1 {
return fmt.Errorf("exactly one 'admin' SSH public key can be specified when running with AWS; please delete a key using `kops delete secret`")
}
}
case kops.CloudProviderALI:
{
if !AlphaAllowALI.Enabled() {
return fmt.Errorf("aliyun support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowALI")
}
aliCloud := cloud.(aliup.ALICloud)
region = aliCloud.Region()
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with ALICloud (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
if len(sshPublicKeys) != 1 {
return fmt.Errorf("exactly one 'admin' SSH public key can be specified when running with ALICloud; please delete a key using `kops delete secret`")
}
}
case kops.CloudProviderOpenstack:
{
osCloud := cloud.(openstack.OpenstackCloud)
region = osCloud.Region()
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with Openstack (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
if len(sshPublicKeys) != 1 {
return fmt.Errorf("exactly one 'admin' SSH public key can be specified when running with Openstack; please delete a key using `kops delete secret`")
}
}
default:
return fmt.Errorf("unknown CloudProvider %q", cluster.Spec.CloudProvider)
}
modelContext.Region = region
if dns.IsGossipHostname(cluster.ObjectMeta.Name) {
klog.Infof("Gossip DNS: skipping DNS validation")
} else {
err = validateDNS(cluster, cloud)
if err != nil {
return err
}
}
clusterTags, err := buildCloudupTags(cluster)
if err != nil {
return err
}
tf := &TemplateFunctions{
KopsModelContext: *modelContext,
tags: clusterTags,
}
{
templates, err := templates.LoadTemplates(cluster, models.NewAssetPath("cloudup/resources"))
if err != nil {
return fmt.Errorf("error loading templates: %v", err)
}
err = tf.AddTo(templates.TemplateFunctions, secretStore)
if err != nil {
return err
}
l.Builders = append(l.Builders,
&BootstrapChannelBuilder{
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
assetBuilder: assetBuilder,
templates: templates,
},
&model.PKIModelBuilder{
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
&kubeapiserver.KubeApiserverBuilder{
AssetBuilder: assetBuilder,
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
&etcdmanager.EtcdManagerBuilder{
AssetBuilder: assetBuilder,
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
)
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
awsModelContext := &awsmodel.AWSModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&awsmodel.APILoadBalancerBuilder{AWSModelContext: awsModelContext, Lifecycle: &clusterLifecycle, SecurityLifecycle: &securityLifecycle},
&model.BastionModelBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle, SecurityLifecycle: &securityLifecycle},
&model.DNSModelBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&model.ExternalAccessModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
&model.FirewallModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
&model.SSHKeyModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
)
l.Builders = append(l.Builders,
&model.NetworkModelBuilder{KopsModelContext: modelContext, Lifecycle: &networkLifecycle},
)
l.Builders = append(l.Builders,
&model.IAMModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
)
case kops.CloudProviderDO:
doModelContext := &domodel.DOModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&domodel.APILoadBalancerModelBuilder{DOModelContext: doModelContext, Lifecycle: &securityLifecycle},
)
case kops.CloudProviderGCE:
gceModelContext := &gcemodel.GCEModelContext{
KopsModelContext: modelContext,
}
storageACLLifecycle := securityLifecycle
if storageACLLifecycle != fi.LifecycleIgnore {
// This is a best-effort permissions fix
storageACLLifecycle = fi.LifecycleWarnIfInsufficientAccess
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&gcemodel.APILoadBalancerBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.ExternalAccessModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.FirewallModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.NetworkModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &networkLifecycle},
)
l.Builders = append(l.Builders,
&gcemodel.StorageAclBuilder{GCEModelContext: gceModelContext, Cloud: cloud.(gce.GCECloud), Lifecycle: &storageACLLifecycle},
)
case kops.CloudProviderALI:
aliModelContext := &alimodel.ALIModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&alimodel.APILoadBalancerModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.NetworkModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.RAMModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.SSHKeyModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.FirewallModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.ExternalAccessModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
)
case kops.CloudProviderOpenstack:
openstackModelContext := &openstackmodel.OpenstackModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
// &openstackmodel.APILBModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &clusterLifecycle},
&openstackmodel.NetworkModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &networkLifecycle},
&openstackmodel.SSHKeyModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &securityLifecycle},
&openstackmodel.FirewallModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &securityLifecycle},
)
default:
return fmt.Errorf("unknown cloudprovider %q", cluster.Spec.CloudProvider)
}
}
l.TemplateFunctions["CA"] = func() fi.CAStore {
return keyStore
}
l.TemplateFunctions["Secrets"] = func() fi.SecretStore {
return secretStore
}
configBuilder, err := c.newNodeUpConfigBuilder(assetBuilder)
if err != nil {
return err
}
bootstrapScriptBuilder := &model.BootstrapScriptBuilder{
NodeUpConfigBuilder: configBuilder,
NodeUpSource: c.NodeUpSource,
NodeUpSourceHash: c.NodeUpHash,
}
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
{
awsModelContext := &awsmodel.AWSModelContext{
KopsModelContext: modelContext,
}
awsModelBuilder := &awsmodel.AutoscalingGroupModelBuilder{
AWSModelContext: awsModelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
SecurityLifecycle: &securityLifecycle,
}
if featureflag.Spotinst.Enabled() {
l.Builders = append(l.Builders, &spotinstmodel.InstanceGroupModelBuilder{
KopsModelContext: modelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
SecurityLifecycle: &securityLifecycle,
})
if featureflag.SpotinstHybrid.Enabled() {
l.Builders = append(l.Builders, awsModelBuilder)
}
} else {
l.Builders = append(l.Builders, awsModelBuilder)
}
}
case kops.CloudProviderDO:
doModelContext := &domodel.DOModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &domodel.DropletBuilder{
DOModelContext: doModelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
case kops.CloudProviderGCE:
{
gceModelContext := &gcemodel.GCEModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &gcemodel.AutoscalingGroupModelBuilder{
GCEModelContext: gceModelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
}
case kops.CloudProviderALI:
{
aliModelContext := &alimodel.ALIModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &alimodel.ScalingGroupModelBuilder{
ALIModelContext: aliModelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
}
case kops.CloudProviderOpenstack:
openstackModelContext := &openstackmodel.OpenstackModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &openstackmodel.ServerGroupModelBuilder{
OpenstackModelContext: openstackModelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
default:
return fmt.Errorf("unknown cloudprovider %q", cluster.Spec.CloudProvider)
}
l.TemplateFunctions["Masters"] = tf.MasterInstanceGroups
err = tf.AddTo(l.TemplateFunctions, secretStore)
if err != nil {
return err
}
taskMap, err := l.BuildTasks(modelStore, assetBuilder, &stageAssetsLifecycle, c.LifecycleOverrides)
if err != nil {
return fmt.Errorf("error building tasks: %v", err)
}
c.TaskMap = taskMap
var target fi.Target
dryRun := false
shouldPrecreateDNS := true
switch c.TargetName {
case TargetDirect:
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderGCE:
target = gce.NewGCEAPITarget(cloud.(gce.GCECloud))
case kops.CloudProviderAWS:
target = awsup.NewAWSAPITarget(cloud.(awsup.AWSCloud))
case kops.CloudProviderDO:
target = do.NewDOAPITarget(cloud.(*digitalocean.Cloud))
case kops.CloudProviderOpenstack:
target = openstack.NewOpenstackAPITarget(cloud.(openstack.OpenstackCloud))
case kops.CloudProviderALI:
target = aliup.NewALIAPITarget(cloud.(aliup.ALICloud))
default:
return fmt.Errorf("direct configuration not supported with CloudProvider:%q", cluster.Spec.CloudProvider)
}
case TargetTerraform:
checkExisting = false
outDir := c.OutDir
tfVersion := terraform.Version011
if featureflag.Terraform012.Enabled() {
tfVersion = terraform.Version012
}
tf := terraform.NewTerraformTarget(cloud, region, project, outDir, tfVersion, cluster.Spec.Target)
// We include a few "util" variables in the TF output
if err := tf.AddOutputVariable("region", terraform.LiteralFromStringValue(region)); err != nil {
return err
}
if project != "" {
if err := tf.AddOutputVariable("project", terraform.LiteralFromStringValue(project)); err != nil {
return err
}
}
if err := tf.AddOutputVariable("cluster_name", terraform.LiteralFromStringValue(cluster.ObjectMeta.Name)); err != nil {
return err
}
target = tf
// Can cause conflicts with terraform management
shouldPrecreateDNS = false
case TargetCloudformation:
checkExisting = false
outDir := c.OutDir
target = cloudformation.NewCloudformationTarget(cloud, region, project, outDir)
// Can cause conflicts with cloudformation management
shouldPrecreateDNS = false
case TargetDryRun:
target = fi.NewDryRunTarget(assetBuilder, os.Stdout)
dryRun = true
// Avoid making changes on a dry-run
shouldPrecreateDNS = false
default:
return fmt.Errorf("unsupported target type %q", c.TargetName)
}
c.Target = target
if !dryRun {
acl, err := acls.GetACL(configBase, cluster)
if err != nil {
return err
}
err = configBase.Join(registry.PathKopsVersionUpdated).WriteFile(bytes.NewReader([]byte(kopsbase.Version)), acl)
if err != nil {
return fmt.Errorf("error writing kops version: %v", err)
}
err = registry.WriteConfigDeprecated(cluster, configBase.Join(registry.PathClusterCompleted), c.Cluster)
if err != nil {
return fmt.Errorf("error writing completed cluster spec: %v", err)
}
vfsMirror := vfsclientset.NewInstanceGroupMirror(cluster, configBase)
for _, g := range c.InstanceGroups {
// TODO: We need to update the mirror (below), but do we need to update the primary?
_, err := c.Clientset.InstanceGroupsFor(c.Cluster).Update(ctx, g, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("error writing InstanceGroup %q to registry: %v", g.ObjectMeta.Name, err)
}
// TODO: Don't write if vfsMirror == c.ClientSet
if err := vfsMirror.WriteMirror(g); err != nil {
return fmt.Errorf("error writing instance group spec to mirror: %v", err)
}
}
}
context, err := fi.NewContext(target, cluster, cloud, keyStore, secretStore, configBase, checkExisting, taskMap)
if err != nil {
return fmt.Errorf("error building context: %v", err)
}
defer context.Close()
var options fi.RunTasksOptions
if c.RunTasksOptions != nil {
options = *c.RunTasksOptions
} else {
options.InitDefaults()
}
err = context.RunTasks(options)
if err != nil {
return fmt.Errorf("error running tasks: %v", err)
}
if dns.IsGossipHostname(cluster.Name) {
shouldPrecreateDNS = false
}
if shouldPrecreateDNS {
if err := precreateDNS(ctx, cluster, cloud); err != nil {
klog.Warningf("unable to pre-create DNS records - cluster startup may be slower: %v", err)
}
}
err = target.Finish(taskMap) //This will finish the apply, and print the changes
if err != nil {
return fmt.Errorf("error closing target: %v", err)
}
return nil
}
// upgradeSpecs ensures that fields are fully populated / defaulted
func (c *ApplyClusterCmd) upgradeSpecs(assetBuilder *assets.AssetBuilder) error {
fullCluster, err := PopulateClusterSpec(c.Clientset, c.Cluster, assetBuilder)
if err != nil {
return err
}
c.Cluster = fullCluster
for i, g := range c.InstanceGroups {
fullGroup, err := PopulateInstanceGroupSpec(fullCluster, g, c.channel)
if err != nil {
return err
}
c.InstanceGroups[i] = fullGroup
}
return nil
}
// validateKopsVersion ensures that kops meet the version requirements / recommendations in the channel
func (c *ApplyClusterCmd) validateKopsVersion() error {
kopsVersion, err := semver.ParseTolerant(kopsbase.Version)
if err != nil {
klog.Warningf("unable to parse kops version %q", kopsbase.Version)
// Not a hard-error
return nil
}
versionInfo := kops.FindKopsVersionSpec(c.channel.Spec.KopsVersions, kopsVersion)
if versionInfo == nil {
klog.Warningf("unable to find version information for kops version %q in channel", kopsVersion)
// Not a hard-error
return nil
}
recommended, err := versionInfo.FindRecommendedUpgrade(kopsVersion)
if err != nil {
klog.Warningf("unable to parse version recommendation for kops version %q in channel", kopsVersion)
}
required, err := versionInfo.IsUpgradeRequired(kopsVersion)
if err != nil {
klog.Warningf("unable to parse version requirement for kops version %q in channel", kopsVersion)
}
if recommended != nil && !required {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("A new kops version is available: %s", recommended)
fmt.Printf("\n")
fmt.Printf("Upgrading is recommended\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_kops", recommended.String()))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
} else if required {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
if recommended != nil {
fmt.Printf("a new kops version is available: %s\n", recommended)
}
fmt.Println("")
fmt.Printf("This version of kops (%s) is no longer supported; upgrading is required\n", kopsbase.Version)
fmt.Printf("(you can bypass this check by exporting KOPS_RUN_OBSOLETE_VERSION)\n")
fmt.Println("")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_kops", recommended.String()))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
}
if required {
if os.Getenv("KOPS_RUN_OBSOLETE_VERSION") == "" {
return fmt.Errorf("kops upgrade is required")
}
}
return nil
}
// validateKubernetesVersion ensures that kubernetes meet the version requirements / recommendations in the channel
func (c *ApplyClusterCmd) validateKubernetesVersion() error {
parsed, err := util.ParseKubernetesVersion(c.Cluster.Spec.KubernetesVersion)
if err != nil {
klog.Warningf("unable to parse kubernetes version %q", c.Cluster.Spec.KubernetesVersion)
// Not a hard-error
return nil
}
kopsVersion, err := semver.Parse(kopsbase.KOPS_RELEASE_VERSION)
if err != nil {
klog.Warningf("unable to parse kops version %q", kopsVersion)
} else {
tooNewVersion := kopsVersion
tooNewVersion.Minor++
tooNewVersion.Pre = nil
tooNewVersion.Build = nil
if util.IsKubernetesGTE(tooNewVersion.String(), *parsed) {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("This version of kubernetes is not yet supported; upgrading kops is required\n")
fmt.Printf("(you can bypass this check by exporting KOPS_RUN_TOO_NEW_VERSION)\n")
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
if os.Getenv("KOPS_RUN_TOO_NEW_VERSION") == "" {
return fmt.Errorf("kops upgrade is required")
}
}
}
if !util.IsKubernetesGTE(OldestSupportedKubernetesVersion, *parsed) {
fmt.Printf("This version of Kubernetes is no longer supported; upgrading Kubernetes is required\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", OldestRecommendedKubernetesVersion))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
return fmt.Errorf("kubernetes upgrade is required")
}
if !util.IsKubernetesGTE(OldestRecommendedKubernetesVersion, *parsed) {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("Kops support for this Kubernetes version is deprecated and will be removed in a future release.\n")
fmt.Printf("\n")
fmt.Printf("Upgrading Kubernetes is recommended\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", OldestRecommendedKubernetesVersion))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
}
// TODO: make util.ParseKubernetesVersion not return a pointer
kubernetesVersion := *parsed
versionInfo := kops.FindKubernetesVersionSpec(c.channel.Spec.KubernetesVersions, kubernetesVersion)
if versionInfo == nil {
klog.Warningf("unable to find version information for kubernetes version %q in channel", kubernetesVersion)
// Not a hard-error
return nil
}
recommended, err := versionInfo.FindRecommendedUpgrade(kubernetesVersion)
if err != nil {
klog.Warningf("unable to parse version recommendation for kubernetes version %q in channel", kubernetesVersion)
}
required, err := versionInfo.IsUpgradeRequired(kubernetesVersion)
if err != nil {
klog.Warningf("unable to parse version requirement for kubernetes version %q in channel", kubernetesVersion)
}
if recommended != nil && !required {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("A new kubernetes version is available: %s\n", recommended)
fmt.Printf("Upgrading is recommended (try kops upgrade cluster)\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", recommended.String()))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
} else if required {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
if recommended != nil {
fmt.Printf("A new kubernetes version is available: %s\n", recommended)
}
fmt.Printf("\n")
fmt.Printf("This version of kubernetes is no longer supported; upgrading is required\n")
fmt.Printf("(you can bypass this check by exporting KOPS_RUN_OBSOLETE_VERSION)\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", recommended.String()))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
}
if required {
if os.Getenv("KOPS_RUN_OBSOLETE_VERSION") == "" {
return fmt.Errorf("kubernetes upgrade is required")
}
}
return nil
}
// addFileAssets adds the file assets within the assetBuilder
func (c *ApplyClusterCmd) addFileAssets(assetBuilder *assets.AssetBuilder) error {
var baseURL string
var err error
if components.IsBaseURL(c.Cluster.Spec.KubernetesVersion) {
baseURL = c.Cluster.Spec.KubernetesVersion
} else {
baseURL = "https://storage.googleapis.com/kubernetes-release/release/v" + c.Cluster.Spec.KubernetesVersion
}
c.Assets = make(map[architectures.Architecture][]*MirroredAsset)
c.NodeUpSource = make(map[architectures.Architecture]string)
c.NodeUpHash = make(map[architectures.Architecture]string)
for _, arch := range architectures.GetSupported() {
c.Assets[arch] = []*MirroredAsset{}
c.NodeUpSource[arch] = ""
c.NodeUpHash[arch] = ""
k8sAssetsNames := []string{
fmt.Sprintf("/bin/linux/%s/kubelet", arch),
fmt.Sprintf("/bin/linux/%s/kubectl", arch),
}
if needsMounterAsset(c.Cluster, c.InstanceGroups) {
k8sAssetsNames = append(k8sAssetsNames, fmt.Sprintf("/bin/linux/%s/mounter", arch))
}
for _, an := range k8sAssetsNames {
k, err := url.Parse(baseURL)
if err != nil {
return err
}
k.Path = path.Join(k.Path, an)
u, hash, err := assetBuilder.RemapFileAndSHA(k)
if err != nil {
return err
}
c.Assets[arch] = append(c.Assets[arch], BuildMirroredAsset(u, hash))
}
cniAsset, cniAssetHash, err := findCNIAssets(c.Cluster, assetBuilder, arch)
if err != nil {
return err
}
c.Assets[arch] = append(c.Assets[arch], BuildMirroredAsset(cniAsset, cniAssetHash))
if c.Cluster.Spec.Networking.LyftVPC != nil {
var hash *hashing.Hash
urlString := os.Getenv("LYFT_VPC_DOWNLOAD_URL")
if urlString == "" {
switch arch {
case architectures.ArchitectureAmd64:
urlString = "https://github.com/lyft/cni-ipvlan-vpc-k8s/releases/download/v0.6.0/cni-ipvlan-vpc-k8s-amd64-v0.6.0.tar.gz"
hash, err = hashing.FromString("871757d381035f64020a523e7a3e139b6177b98eb7a61b547813ff25957fc566")
case architectures.ArchitectureArm64:
urlString = "https://github.com/lyft/cni-ipvlan-vpc-k8s/releases/download/v0.6.0/cni-ipvlan-vpc-k8s-arm64-v0.6.0.tar.gz"
hash, err = hashing.FromString("3aadcb32ffda53990153790203eb72898e55a985207aa5b4451357f9862286f0")
default:
return fmt.Errorf("unknown arch for lyft asset %s", arch)
}
if err != nil {
// Should be impossible
return fmt.Errorf("invalid hard-coded hash for lyft url")
}
} else {
klog.Warningf("Using url from LYFT_VPC_DOWNLOAD_URL env var: %q", urlString)
hashString := os.Getenv("LYFT_VPC_DOWNLOAD_HASH")
hash, err = hashing.FromString(hashString)
if err != nil {
return fmt.Errorf("invalid hash supplied for lyft: %q", hashString)
}
}
u, err := url.Parse(urlString)
if err != nil {
return fmt.Errorf("unable to parse lyft-vpc URL %q", urlString)
}
c.Assets[arch] = append(c.Assets[arch], BuildMirroredAsset(u, hash))
}
asset, err := NodeUpAsset(assetBuilder, arch)
if err != nil {
return err
}
c.NodeUpSource[arch] = strings.Join(asset.Locations, ",")
c.NodeUpHash[arch] = asset.Hash.Hex()
}
// Explicitly add the protokube image,
// otherwise when the Target is DryRun this asset is not added
// Is there a better way to call this?
_, _, err = ProtokubeImageSource(assetBuilder)
if err != nil {
return err
}
return nil
}
// buildPermalink returns a link to our "permalink docs", to further explain an error message
func buildPermalink(key, anchor string) string {
url := "https://github.com/kubernetes/kops/blob/master/permalinks/" + key + ".md"
if anchor != "" {
url += "#" + anchor
}
return url
}
func ChannelForCluster(c *kops.Cluster) (*kops.Channel, error) {
channelLocation := c.Spec.Channel
if channelLocation == "" {
channelLocation = kops.DefaultChannel
}
return kops.LoadChannel(channelLocation)
}
// needsMounterAsset checks if we need the mounter program
// This is only needed currently on ContainerOS i.e. GCE, but we don't have a nice way to detect it yet
func needsMounterAsset(c *kops.Cluster, instanceGroups []*kops.InstanceGroup) bool {
// TODO: Do real detection of ContainerOS (but this has to work with image names, and maybe even forked images)
switch kops.CloudProviderID(c.Spec.CloudProvider) {
case kops.CloudProviderGCE:
return true
default:
return false
}
}
type nodeUpConfigBuilder struct {
*ApplyClusterCmd
assetBuilder *assets.AssetBuilder
channels []string
configBase vfs.Path
cluster *kops.Cluster
etcdManifests map[kops.InstanceGroupRole][]string
images map[kops.InstanceGroupRole]map[architectures.Architecture][]*nodeup.Image
protokubeImage map[kops.InstanceGroupRole]*nodeup.Image
}
func (c *ApplyClusterCmd) newNodeUpConfigBuilder(assetBuilder *assets.AssetBuilder) (model.NodeUpConfigBuilder, error) {
cluster := c.Cluster
configBase, err := vfs.Context.BuildVfsPath(cluster.Spec.ConfigBase)
if err != nil {
return nil, fmt.Errorf("error parsing config base %q: %v", cluster.Spec.ConfigBase, err)
}
channels := []string{
configBase.Join("addons", "bootstrap-channel.yaml").Path(),
}
for i := range cluster.Spec.Addons {
channels = append(channels, cluster.Spec.Addons[i].Manifest)
}
useGossip := dns.IsGossipHostname(cluster.Spec.MasterInternalName)
etcdManifests := map[kops.InstanceGroupRole][]string{}
images := map[kops.InstanceGroupRole]map[architectures.Architecture][]*nodeup.Image{}
protokubeImage := map[kops.InstanceGroupRole]*nodeup.Image{}
for _, role := range kops.AllInstanceGroupRoles {
isMaster := role == kops.InstanceGroupRoleMaster
images[role] = make(map[architectures.Architecture][]*nodeup.Image)
if components.IsBaseURL(cluster.Spec.KubernetesVersion) {
// When using a custom version, we want to preload the images over http
components := []string{"kube-proxy"}
if isMaster {
components = append(components, "kube-apiserver", "kube-controller-manager", "kube-scheduler")
}
for _, arch := range architectures.GetSupported() {
for _, component := range components {
baseURL, err := url.Parse(cluster.Spec.KubernetesVersion)
if err != nil {
return nil, err
}
baseURL.Path = path.Join(baseURL.Path, "/bin/linux", string(arch), component+".tar")
u, hash, err := assetBuilder.RemapFileAndSHA(baseURL)
if err != nil {
return nil, err
}
image := &nodeup.Image{
Sources: []string{u.String()},
Hash: hash.Hex(),
}
images[role][arch] = append(images[role][arch], image)
}
}
}
// `docker load` our images when using a KOPS_BASE_URL, so we
// don't need to push/pull from a registry
if os.Getenv("KOPS_BASE_URL") != "" && isMaster {
for _, arch := range architectures.GetSupported() {
// TODO: Build multi-arch Kops images
if arch != architectures.ArchitectureAmd64 {
continue
}
for _, name := range []string{"kops-controller", "dns-controller", "kube-apiserver-healthcheck"} {
baseURL, err := url.Parse(os.Getenv("KOPS_BASE_URL"))
if err != nil {
return nil, err
}
baseURL.Path = path.Join(baseURL.Path, "/images/"+name+".tar.gz")
u, hash, err := assetBuilder.RemapFileAndSHA(baseURL)
if err != nil {
return nil, err
}
image := &nodeup.Image{
Sources: []string{u.String()},
Hash: hash.Hex(),
}
images[role][arch] = append(images[role][arch], image)
}
}
}
if isMaster || useGossip {
u, hash, err := ProtokubeImageSource(assetBuilder)
if err != nil {
return nil, err
}
asset := BuildMirroredAsset(u, hash)
protokubeImage[role] = &nodeup.Image{
Name: kopsbase.DefaultProtokubeImageName(),
Sources: asset.Locations,
Hash: asset.Hash.Hex(),
}
}
if role == kops.InstanceGroupRoleMaster {
for _, etcdCluster := range cluster.Spec.EtcdClusters {
if etcdCluster.Provider == kops.EtcdProviderTypeManager {
p := configBase.Join("manifests/etcd/" + etcdCluster.Name + ".yaml").Path()
etcdManifests[role] = append(etcdManifests[role], p)
}
}
}
}
configBuilder := nodeUpConfigBuilder{
ApplyClusterCmd: c,
assetBuilder: assetBuilder,
channels: channels,
configBase: configBase,
cluster: cluster,
etcdManifests: etcdManifests,
images: images,
protokubeImage: protokubeImage,
}
return &configBuilder, nil
}
// BuildNodeUpConfig returns the NodeUp config, in YAML format
func (n *nodeUpConfigBuilder) BuildConfig(ig *kops.InstanceGroup, apiserverAdditionalIPs []string) (*nodeup.Config, error) {
cluster := n.cluster
if ig == nil {
return nil, fmt.Errorf("instanceGroup cannot be nil")
}
role := ig.Spec.Role
if role == "" {
return nil, fmt.Errorf("cannot determine role for instance group: %v", ig.ObjectMeta.Name)
}
config := nodeup.NewConfig(cluster, ig)
config.Assets = make(map[architectures.Architecture][]string)
for _, arch := range architectures.GetSupported() {
config.Assets[arch] = []string{}
for _, a := range n.Assets[arch] {
config.Assets[arch] = append(config.Assets[arch], a.CompactString())
}
}
config.ClusterName = cluster.ObjectMeta.Name
config.ConfigBase = fi.String(n.configBase.Path())
config.InstanceGroupName = ig.ObjectMeta.Name
if role == kops.InstanceGroupRoleMaster {
config.ApiserverAdditionalIPs = apiserverAdditionalIPs
}
for _, manifest := range n.assetBuilder.StaticManifests {
match := false
for _, r := range manifest.Roles {
if r == role {
match = true
}
}
if !match {
continue
}
config.StaticManifests = append(config.StaticManifests, &nodeup.StaticManifest{
Key: manifest.Key,
Path: manifest.Path,
})
}
config.Images = n.images[role]
config.Channels = n.channels
config.EtcdManifests = n.etcdManifests[role]
config.ProtokubeImage = n.protokubeImage[role]
return config, nil
}
| [
"\"KOPS_RUN_OBSOLETE_VERSION\"",
"\"KOPS_RUN_TOO_NEW_VERSION\"",
"\"KOPS_RUN_OBSOLETE_VERSION\"",
"\"LYFT_VPC_DOWNLOAD_URL\"",
"\"LYFT_VPC_DOWNLOAD_HASH\"",
"\"KOPS_BASE_URL\"",
"\"KOPS_BASE_URL\""
]
| []
| [
"KOPS_RUN_TOO_NEW_VERSION",
"LYFT_VPC_DOWNLOAD_HASH",
"KOPS_BASE_URL",
"KOPS_RUN_OBSOLETE_VERSION",
"LYFT_VPC_DOWNLOAD_URL"
]
| [] | ["KOPS_RUN_TOO_NEW_VERSION", "LYFT_VPC_DOWNLOAD_HASH", "KOPS_BASE_URL", "KOPS_RUN_OBSOLETE_VERSION", "LYFT_VPC_DOWNLOAD_URL"] | go | 5 | 0 | |
fabfile.py | #!/usr/bin/env python
import os
from fabric.api import * # noqa
from fabric.colors import green, yellow
from fabric.contrib.console import confirm
from prettyprint import pp
import re
VERSION_PATTERN = r'^v\d+(\.\d+)+?$'
env.releases_directory = "release"
env.root_dir = os.path.abspath(os.path.dirname(__file__))
env.release = "HEAD"
proxy = os.environ.get('http_proxy', None)
env.http_proxy = env.http_proxy_port = None
if proxy is not None:
env.http_proxy, env.http_proxy_port = proxy.rsplit(":")
def latest_git_tag():
description = local('git describe master', capture=True).rstrip('\n')
if '-' in description:
latest_tag = description[:description.find('-')]
else:
latest_tag = description
if not re.match(VERSION_PATTERN, latest_tag):
latest_tag = None
return latest_tag
def compare_versions(x, y):
"""
Expects 2 strings in the format of 'X.Y.Z' where X, Y and Z are
integers. It will compare the items which will organize things
properly by their major, minor and bugfix version.
::
>>> my_list = ['v1.13', 'v1.14.2', 'v1.14.1', 'v1.9', 'v1.1']
>>> sorted(my_list, cmp=compare_versions)
['v1.1', 'v1.9', 'v1.13', 'v1.14.1', 'v1.14.2']
"""
def version_to_tuple(version):
# Trim off the leading v
version_list = version[1:].split('.', 2)
if len(version_list) <= 3:
[version_list.append(0) for _ in range(3 - len(version_list))]
try:
return tuple((int(version) for version in version_list))
except ValueError: # not an integer, so it goes to the bottom
return (0, 0, 0)
x_major, x_minor, x_bugfix = version_to_tuple(x)
y_major, y_minor, y_bugfix = version_to_tuple(y)
return (cmp(x_major, y_major) or cmp(x_minor, y_minor)
or cmp(x_bugfix, y_bugfix))
def make_tag():
if confirm(yellow("Tag this release?"), default=True):
print(green("The last 5 tags were: "))
tags = local('git tag | tail -n 20', capture=True)
pp(sorted(tags.split('\n'), compare_versions, reverse=True))
prompt("New release tag in the format vX.Y[.Z]?", 'tag',
validate=VERSION_PATTERN)
local('git tag -as %(tag)s' % env)
local('git push origin', capture=True)
local('git push --tags origin', capture=True)
local('git fetch --tags origin', capture=True)
@task
def release():
make_tag()
@task
def sequence_diagrams():
with lcd("docs/sequences"):
local("make")
def release_descriptor(path):
with lcd(path):
return local('git describe HEAD', capture=True).rstrip("\n")
| []
| []
| [
"http_proxy"
]
| [] | ["http_proxy"] | python | 1 | 0 | |
pkg/providers/cyberark_conjur.go | package providers
import (
"fmt"
"os"
"github.com/cyberark/conjur-api-go/conjurapi"
"github.com/cyberark/conjur-api-go/conjurapi/authn"
"github.com/spectralops/teller/pkg/core"
)
type ResourceFilter struct {
Kind string
Search string
Limit int
Offset int
}
type ConjurClient interface {
AddSecret(variableID string, secretValue string) error
RetrieveSecret(variableID string) ([]byte, error)
}
type CyberArkConjur struct {
client ConjurClient
}
func NewConjurClient() (core.Provider, error) {
config, err := conjurapi.LoadConfig()
if err != nil {
return nil, err
}
conjur, err := conjurapi.NewClientFromKey(config,
authn.LoginPair{
Login: os.Getenv("CONJUR_AUTHN_LOGIN"),
APIKey: os.Getenv("CONJUR_AUTHN_API_KEY"),
},
)
if err != nil {
return nil, err
}
return &CyberArkConjur{client: conjur}, nil
}
func (c *CyberArkConjur) Name() string {
return "cyberark_conjur"
}
func (c *CyberArkConjur) Put(p core.KeyPath, val string) error {
err := c.putSecret(p, val)
return err
}
func (c *CyberArkConjur) PutMapping(p core.KeyPath, m map[string]string) error {
return fmt.Errorf("provider %q does not implement put mapping yet", c.Name())
}
func (c *CyberArkConjur) GetMapping(p core.KeyPath) ([]core.EnvEntry, error) {
return nil, fmt.Errorf("provider %q does not implement get mapping yet", c.Name())
}
func (c *CyberArkConjur) Get(p core.KeyPath) (*core.EnvEntry, error) {
secret, err := c.getSecret(p)
if err != nil {
return nil, err
}
if secret == nil {
ent := p.Missing()
return &ent, nil
}
ent := p.Found(string(secret))
return &ent, nil
}
func (c *CyberArkConjur) Delete(kp core.KeyPath) error {
return fmt.Errorf("%s does not implement delete yet", c.Name())
}
func (c *CyberArkConjur) DeleteMapping(kp core.KeyPath) error {
return fmt.Errorf("%s does not implement delete yet", c.Name())
}
func (c *CyberArkConjur) getSecret(kp core.KeyPath) ([]byte, error) {
return c.client.RetrieveSecret(kp.Path)
}
func (c *CyberArkConjur) putSecret(kp core.KeyPath, val string) error {
return c.client.AddSecret(kp.Path, val)
}
| [
"\"CONJUR_AUTHN_LOGIN\"",
"\"CONJUR_AUTHN_API_KEY\""
]
| []
| [
"CONJUR_AUTHN_API_KEY",
"CONJUR_AUTHN_LOGIN"
]
| [] | ["CONJUR_AUTHN_API_KEY", "CONJUR_AUTHN_LOGIN"] | go | 2 | 0 | |
doc.go | // Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package sessions provides cookie and filesystem sessions and
infrastructure for custom session backends.
The key features are:
* Simple API: use it as an easy way to set signed (and optionally
encrypted) cookies.
* Built-in backends to store sessions in cookies or the filesystem.
* Flash messages: session values that last until read.
* Convenient way to switch session persistency (aka "remember me") and set
other attributes.
* Mechanism to rotate authentication and encryption keys.
* Multiple sessions per request, even using different backends.
* Interfaces and infrastructure for custom session backends: sessions from
different stores can be retrieved and batch-saved using a common API.
Let's start with an example that shows the sessions API in a nutshell:
import (
"net/http"
"github.com/gorilla/sessions"
)
// Note: Don't store your key in your source code. Pass it via an
// environmental variable, or flag (or both), and don't accidentally commit it
// alongside your code. Ensure your key is sufficiently random - i.e. use Go's
// crypto/rand or securecookie.GenerateRandomKey(32) and persist the result.
// Ensure SESSION_KEY exists in the environment, or sessions will fail.
var store = sessions.NewCookieStore([]byte(os.Getenv("SESSION_KEY")))
func MyHandler(w http.ResponseWriter, r *http.Request) {
// Get a session. Get() always returns a session, even if empty.
session, err := store.Get(r, "session-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Set some session values.
session.Values["foo"] = "bar"
session.Values[42] = 43
// Save it before we write to the response/return from the handler.
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
First we initialize a session store calling NewCookieStore() and passing a
secret key used to authenticate the session. Inside the handler, we call
store.Get() to retrieve an existing session or a new one. Then we set some
session values in session.Values, which is a map[interface{}]interface{}.
And finally we call session.Save() to save the session in the response.
Note that in production code, we should check for errors when calling
session.Save(r, w), and either display an error message or otherwise handle it.
Save must be called before writing to the response, otherwise the session
cookie will not be sent to the client.
That's all you need to know for the basic usage. Let's take a look at other
options, starting with flash messages.
Flash messages are session values that last until read. The term appeared with
Ruby On Rails a few years back. When we request a flash message, it is removed
from the session. To add a flash, call session.AddFlash(), and to get all
flashes, call session.Flashes(). Here is an example:
func MyHandler(w http.ResponseWriter, r *http.Request) {
// Get a session.
session, err := store.Get(r, "session-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Get the previous flashes, if any.
if flashes := session.Flashes(); len(flashes) > 0 {
// Use the flash values.
} else {
// Set a new flash.
session.AddFlash("Hello, flash messages world!")
}
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
Flash messages are useful to set information to be read after a redirection,
like after form submissions.
There may also be cases where you want to store a complex datatype within a
session, such as a struct. Sessions are serialised using the encoding/gob package,
so it is easy to register new datatypes for storage in sessions:
import(
"encoding/gob"
"github.com/gorilla/sessions"
)
type Person struct {
FirstName string
LastName string
Email string
Age int
}
type M map[string]interface{}
func init() {
gob.Register(&Person{})
gob.Register(&M{})
}
As it's not possible to pass a raw type as a parameter to a function, gob.Register()
relies on us passing it a value of the desired type. In the example above we've passed
it a pointer to a struct and a pointer to a custom type representing a
map[string]interface. (We could have passed non-pointer values if we wished.) This will
then allow us to serialise/deserialise values of those types to and from our sessions.
Note that because session values are stored in a map[string]interface{}, there's
a need to type-assert data when retrieving it. We'll use the Person struct we registered above:
func MyHandler(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "session-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Retrieve our struct and type-assert it
val := session.Values["person"]
var person = &Person{}
if person, ok := val.(*Person); !ok {
// Handle the case that it's not an expected type
}
// Now we can use our person object
}
By default, session cookies last for a month. This is probably too long for
some cases, but it is easy to change this and other attributes during
runtime. Sessions can be configured individually or the store can be
configured and then all sessions saved using it will use that configuration.
We access session.Options or store.Options to set a new configuration. The
fields are basically a subset of http.Cookie fields. Let's change the
maximum age of a session to one week:
session.Options = &sessions.Options{
Path: "/",
MaxAge: 86400 * 7,
HttpOnly: true,
}
Sometimes we may want to change authentication and/or encryption keys without
breaking existing sessions. The CookieStore supports key rotation, and to use
it you just need to set multiple authentication and encryption keys, in pairs,
to be tested in order:
var store = sessions.NewCookieStore(
[]byte("new-authentication-key"),
[]byte("new-encryption-key"),
[]byte("old-authentication-key"),
[]byte("old-encryption-key"),
)
New sessions will be saved using the first pair. Old sessions can still be
read because the first pair will fail, and the second will be tested. This
makes it easy to "rotate" secret keys and still be able to validate existing
sessions. Note: for all pairs the encryption key is optional; set it to nil
or omit it and and encryption won't be used.
Multiple sessions can be used in the same request, even with different
session backends. When this happens, calling Save() on each session
individually would be cumbersome, so we have a way to save all sessions
at once: it's sessions.Save(). Here's an example:
var store = sessions.NewCookieStore([]byte("something-very-secret"))
func MyHandler(w http.ResponseWriter, r *http.Request) {
// Get a session and set a value.
session1, _ := store.Get(r, "session-one")
session1.Values["foo"] = "bar"
// Get another session and set another value.
session2, _ := store.Get(r, "session-two")
session2.Values[42] = 43
// Save all sessions.
err = sessions.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
This is possible because when we call Get() from a session store, it adds the
session to a common registry. Save() uses it to save all registered sessions.
*/
package bzhysessions
| [
"\"SESSION_KEY\""
]
| []
| [
"SESSION_KEY"
]
| [] | ["SESSION_KEY"] | go | 1 | 0 | |
sending_scheduler/celery.py | from __future__ import absolute_import, unicode_literals
from celery import Celery
from django.conf import settings
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sending_scheduler.settings')
app = Celery('sending_scheduler')
app.config_from_object(settings, namespace='CELERY')
# # Load task modules from all registered Django apps.
app.autodiscover_tasks()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
util/os.go | // Copyright (c) 2016 ECS Team, Inc. - All Rights Reserved
// https://github.com/ECSTeam/cloudfoundry-top-plugin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"os"
"strings"
)
func IsCygwin() bool {
if IsMSWindows() {
shell := os.Getenv("SHELL")
if len(shell) > 0 {
return true
}
}
return false
}
func IsMSWindows() bool {
if strings.Contains(strings.ToLower(os.Getenv("OS")), "windows") {
return true
}
return false
}
| [
"\"SHELL\"",
"\"OS\""
]
| []
| [
"SHELL",
"OS"
]
| [] | ["SHELL", "OS"] | go | 2 | 0 | |
tools/make-fuchsia-vol/make-fuchsia-vol.go | // Copyright 2017 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// make-fuchsia-vol is a temporarily useful script that provisions Fuchsia
// volumes based on paths provided.
package main
import (
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"go.fuchsia.dev/fuchsia/garnet/go/src/thinfs/block/file"
"go.fuchsia.dev/fuchsia/garnet/go/src/thinfs/fs"
"go.fuchsia.dev/fuchsia/garnet/go/src/thinfs/fs/msdosfs"
"go.fuchsia.dev/fuchsia/garnet/go/src/thinfs/gpt"
"go.fuchsia.dev/fuchsia/garnet/go/src/thinfs/mbr"
)
var (
verbose = flag.Bool("verbose", false, "enable verbose logging")
fuchsiaBuildDir = flag.String("fuchsia-build-dir", os.Getenv("FUCHSIA_BUILD_DIR"), "fuchsia build dir")
bootloader = flag.String("bootloader", "", "path to bootx64.efi")
zbi = flag.String("zbi", "", "path to zbi (default: zircon-a from image manifests)")
cmdline = flag.String("cmdline", "", "path to command line file (if exists)")
zedboot = flag.String("zedboot", "", "path to zedboot.zbi (default: zircon-r from image manifests)")
ramdiskOnly = flag.Bool("ramdisk-only", false, "ramdisk-only mode - only write an ESP partition")
blob = flag.String("blob", "", "path to blob partition image (not used with ramdisk)")
data = flag.String("data", "", "path to data partition image (not used with ramdisk)")
abr = flag.Bool("abr", true, "add Zircon-{A,B,R} partitions")
zirconA = flag.String("zirconA", "", "path to partition image for Zircon-A (default: from -zbi)")
zirconB = flag.String("zirconB", "", "path to partition image for Zircon-B (default: from -zbi)")
zirconR = flag.String("zirconR", "", "path to partition image for Zircon-R (default: zircon-r from image manifests)")
abrSize = flag.Int64("abr-size", 64*1024*1024, "Kernel partition size for A/B/R")
blockSize = flag.Int64("block-size", 0, "the block size of the target disk (0 means detect)")
physicalBlockSize = flag.Int64("physical-block-size", 0, "the physical block size of the target disk (0 means detect)")
optimalTransferSize = flag.Int64("optimal-transfer-size", 0, "the optimal transfer size of the target disk (0 means unknown/unused)")
efiSize = flag.Int64("efi-size", 63*1024*1024, "efi partition size in bytes")
fvmSize = flag.Int64("fvm-size", 0, "fvm partition size in bytes (0 means `fill`)")
resize = flag.Int64("resize", 0, "create or resize the image to this size in bytes")
)
// imageManifest is basename of the image manifest file.
const imageManifest = "images.json"
type imageManifestEntry struct {
Name string `json:"name"`
Path string `json:"path"`
Type string `json:"type"`
}
// imagePaths contains the default image paths that are produced by a build manifest, populated by tryLoadManifests.
var imagePaths = map[string]string{}
// getImage fetches an image by name or exits fatally
func getImage(name string) string {
if path, ok := imagePaths[name]; ok {
return path
}
log.Fatalf("Missing image path: %q cannot continue", name)
return ""
}
func init() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s disk-path\n", filepath.Base(os.Args[0]))
flag.PrintDefaults()
}
}
func tryLoadManifests() {
if *fuchsiaBuildDir == "" {
return
}
f, err := os.Open(filepath.Join(*fuchsiaBuildDir, imageManifest))
if err != nil {
log.Printf("warning: failed to load %s: %v", imageManifest, err)
return
}
defer f.Close()
var entries []imageManifestEntry
if err := json.NewDecoder(f).Decode(&entries); err != nil {
log.Printf("warning: failed to load %s: %v", imageManifest, err)
return
}
for _, image := range entries {
imagePaths[image.Type+"_"+image.Name] = image.Path
}
}
func needFuchsiaBuildDir() {
if *fuchsiaBuildDir == "" {
log.Fatalf("either pass -fuchsia-build-dir or set $FUCHSIA_BUILD_DIR")
}
}
func main() {
flag.Parse()
tryLoadManifests()
if *bootloader == "" {
needFuchsiaBuildDir()
*bootloader = filepath.Join(*fuchsiaBuildDir, "efi_x64/bootx64.efi")
}
if _, err := os.Stat(*bootloader); err != nil {
log.Fatalf("cannot read %q: %s", *bootloader, err)
}
if *zbi == "" {
needFuchsiaBuildDir()
*zbi = filepath.Join(*fuchsiaBuildDir, getImage("zbi_zircon-a"))
}
if _, err := os.Stat(*zbi); err != nil {
log.Fatalf("cannot read %q: %s", *zbi, err)
}
if *zedboot == "" {
needFuchsiaBuildDir()
*zedboot = filepath.Join(*fuchsiaBuildDir, getImage("zbi_zircon-r"))
}
if _, err := os.Stat(*zedboot); err != nil {
log.Fatalf("cannot read %q: %s", *zedboot, err)
}
if *cmdline == "" {
needFuchsiaBuildDir()
p := filepath.Join(*fuchsiaBuildDir, "cmdline")
if _, err := os.Stat(p); err == nil {
*cmdline = p
}
} else {
if _, err := os.Stat(*cmdline); err != nil {
log.Fatal(err)
}
}
if *abr {
if *zirconA == "" {
needFuchsiaBuildDir()
*zirconA = *zbi
}
if *zirconB == "" {
*zirconB = *zbi
}
if *zirconR == "" {
*zirconR = filepath.Join(*fuchsiaBuildDir, getImage("zbi_zircon-r"))
}
}
if !*ramdiskOnly {
if *blob == "" {
needFuchsiaBuildDir()
*blob = filepath.Join(*fuchsiaBuildDir, getImage("blk_blob"))
}
if *data == "" {
needFuchsiaBuildDir()
*data = filepath.Join(*fuchsiaBuildDir, getImage("blk_data"))
}
if _, err := os.Stat(*blob); err != nil {
log.Fatalf("Blob image error: %s\nEither provide a blob image, or pass -ramdisk-only", err)
}
if _, err := os.Stat(*data); err != nil {
log.Fatalf("Data image error: %s\nEither provide a data image, or pass -ramdisk-only", err)
}
}
if len(flag.Args()) != 1 {
flag.Usage()
os.Exit(1)
}
disk, err := filepath.Abs(flag.Args()[0])
if err != nil {
log.Fatal(err)
}
if *resize != 0 {
s, err := os.Stat(disk)
if err == nil {
if s.Size() != *resize {
if *verbose {
log.Printf("Resizing %q from %d to %d", disk, s.Size(), *resize)
}
if err := os.Truncate(disk, *resize); err != nil {
log.Fatalf("failed to truncate %q to %d: %s", disk, *resize, err)
}
}
} else if os.IsNotExist(err) {
if *verbose {
log.Printf("Creating %q", disk)
}
f, err := os.Create(disk)
if err != nil {
log.Fatalf("failed to create %q: %s", disk, err)
}
if err := f.Truncate(*resize); err != nil {
log.Fatalf("failed to truncate %q to %d: %s", disk, *resize, err)
}
f.Close()
} else {
log.Fatal(err)
}
} else {
if _, err := os.Stat(disk); err != nil {
log.Fatalf("cannot read %q: %s\n", disk, err)
}
}
f, err := os.Open(disk)
if err != nil {
log.Fatal(err)
}
if *blockSize == 0 {
lbs, err := gpt.GetLogicalBlockSize(f)
if err != nil {
log.Printf("WARNING: could not detect logical block size: %s. Assuming %d\n", err, lbs)
}
*blockSize = int64(lbs)
}
if *physicalBlockSize == 0 {
pbs, err := gpt.GetPhysicalBlockSize(f)
if err != nil {
log.Printf("WARNING: could not detect physical block size: %s. Assuming %d\n", err, pbs)
}
*physicalBlockSize = int64(pbs)
}
if *physicalBlockSize < 4096 && runtime.GOOS == "darwin" {
// OSX is not reliably returning correct values for USB sticks, unclear why
*physicalBlockSize = 4096
}
var (
logical = uint64(*blockSize)
physical = uint64(*physicalBlockSize)
optimal = uint64(*optimalTransferSize)
diskSize uint64
)
// ignore the error here as it may be an image file...
diskSize, _ = gpt.GetDiskSize(f)
if diskSize == 0 {
s, err := os.Stat(disk)
if err != nil {
log.Fatalf("could not stat %q: %s\n", disk, err)
}
diskSize = uint64(s.Size())
}
if diskSize == 0 {
log.Fatalf("could not determine size of %q", disk)
}
// Note: this isn't entirely correct, as it doesn't take into account padding.
// Consider adding a real API for this in the GPT lib.
minGPTSize := int64((gpt.MinPartitionEntryArraySize + gpt.HeaderSize) * 2)
if uint64(*efiSize+minGPTSize) > diskSize {
log.Fatalf("%q is not large enough for the partition layout\n", disk)
}
if *verbose {
log.Printf("Disk: %s", disk)
log.Printf("Disk size: %d", diskSize)
log.Printf("Block Size: %d", logical)
log.Printf("Physical block size: %d", physical)
log.Printf("Optimal transfer size: %d", optimal)
}
g, err := gpt.ReadGPT(f, logical, diskSize)
if err != nil {
log.Fatal(err)
}
lbaSize := diskSize / logical
g.MBR = mbr.NewProtectiveMBR(lbaSize)
g.Primary.Partitions = []gpt.PartitionEntry{}
g.Update(logical, physical, optimal, diskSize) // for the firstusablelba
end := g.Primary.FirstUsableLBA
var efiStart uint64
efiStart, end = optimalBlockAlign(end, uint64(*efiSize), logical, physical, optimal)
// compute the size of the fat geometry that fits within the well-aligned GPT
// partition that was computed above.
*efiSize = fitFAT(int64((end-1)-efiStart) * int64(logical))
// efiEnd is the last sector of viable fat geometry, which may be different
// from end, which is the last sector of the gpt partition.
efiEnd := efiStart + (uint64(*efiSize) / logical) - 1
if *verbose {
log.Printf("EFI START: %d", efiStart)
log.Printf("EFI END: %d", efiEnd)
log.Printf("EFI LB SIZE: %d", efiEnd-efiStart+1)
}
g.Primary.Partitions = append(g.Primary.Partitions, gpt.PartitionEntry{
PartitionTypeGUID: gpt.GUIDEFI,
UniquePartitionGUID: gpt.NewRandomGUID(),
PartitionName: gpt.NewPartitionName("efi-system"),
StartingLBA: efiStart,
EndingLBA: end,
})
var startingCHS [3]byte
startingCHS[0] = byte(efiStart / (16 * 63))
startingCHS[1] = byte((efiStart / 63) % 16)
startingCHS[2] = byte((efiStart % 63) + 1)
var endingCHS [3]byte
endingCHS[0] = byte(efiEnd / (16 * 63))
endingCHS[1] = byte((efiEnd / 63) % 16)
endingCHS[2] = byte((efiEnd % 63) + 1)
// Install a "hybrid MBR" hack for the case of bios bootloaders that might
// need it (e.g. rpi's binary blob that's stuck in MBR land).
g.MBR.PartitionRecord[2] = mbr.PartitionRecord{
BootIndicator: 0x80,
StartingCHS: startingCHS,
EndingCHS: endingCHS,
OSType: mbr.FAT32,
StartingLBA: uint32(efiStart),
SizeInLBA: uint32(efiEnd),
}
var aStart, bStart, rStart uint64
if *abr {
aStart, end = optimalBlockAlign(end, uint64(*abrSize), logical, physical, optimal)
g.Primary.Partitions = append(g.Primary.Partitions, gpt.PartitionEntry{
PartitionTypeGUID: gpt.GUIDFuchsiaZirconA,
UniquePartitionGUID: gpt.NewRandomGUID(),
PartitionName: gpt.NewPartitionName("ZIRCON-A"),
StartingLBA: aStart,
EndingLBA: end,
})
bStart, end = optimalBlockAlign(end, uint64(*abrSize), logical, physical, optimal)
g.Primary.Partitions = append(g.Primary.Partitions, gpt.PartitionEntry{
PartitionTypeGUID: gpt.GUIDFuchsiaZirconB,
UniquePartitionGUID: gpt.NewRandomGUID(),
PartitionName: gpt.NewPartitionName("ZIRCON-B"),
StartingLBA: bStart,
EndingLBA: end,
})
rStart, end = optimalBlockAlign(end, uint64(*abrSize), logical, physical, optimal)
g.Primary.Partitions = append(g.Primary.Partitions, gpt.PartitionEntry{
PartitionTypeGUID: gpt.GUIDFuchsiaZirconR,
UniquePartitionGUID: gpt.NewRandomGUID(),
PartitionName: gpt.NewPartitionName("ZIRCON-R"),
StartingLBA: rStart,
EndingLBA: end,
})
}
var fvmStart uint64
fvmStart, end = optimalBlockAlign(end+1, uint64(*fvmSize), logical, physical, optimal)
if !*ramdiskOnly {
if *fvmSize == 0 {
end = g.Primary.LastUsableLBA
}
*fvmSize = int64((end - fvmStart) * logical)
g.Primary.Partitions = append(g.Primary.Partitions, gpt.PartitionEntry{
PartitionTypeGUID: gpt.GUIDFuchsiaFVM,
UniquePartitionGUID: gpt.NewRandomGUID(),
PartitionName: gpt.NewPartitionName("FVM"),
StartingLBA: fvmStart,
EndingLBA: end,
})
}
g.Update(logical, physical, optimal, diskSize)
if err := g.Validate(); err != nil {
log.Fatal(err)
}
if *verbose {
log.Printf("EFI size: %d", *efiSize)
if !*ramdiskOnly {
log.Printf("FVM size: %d", *fvmSize)
}
log.Printf("Writing GPT")
}
f, err = os.OpenFile(disk, os.O_RDWR, 0750)
if err != nil {
log.Fatal(err)
}
if _, err := g.WriteTo(f); err != nil {
log.Fatalf("error writing partition table: %s", err)
}
f.Sync()
aStart = aStart * logical
bStart = bStart * logical
rStart = rStart * logical
efiStart = efiStart * logical
fvmStart = fvmStart * logical
if *verbose {
log.Printf("Writing EFI partition and files")
}
cmd := exec.Command(fuchsiaTool("mkfs-msdosfs"),
"-@", strconv.FormatUint(efiStart, 10),
// XXX(raggi): mkfs-msdosfs offset gets subtracted by the tool for available
// size, so we have to add the offset back on to get the correct geometry.
"-S", strconv.FormatUint(uint64(*efiSize)+efiStart, 10),
"-F", "32",
"-L", "ESP",
"-O", "Fuchsia",
"-b", fmt.Sprintf("%d", logical),
disk,
)
if out, err := cmd.CombinedOutput(); err != nil {
log.Printf("mkfs-msdosfs failed:\n%s", out)
log.Fatal(err)
}
dev, err := file.NewRange(f, int64(logical), int64(efiStart), *efiSize)
if err != nil {
log.Fatal(err)
}
fatfs, err := msdosfs.New(disk, dev, fs.ReadWrite|fs.Force)
if err != nil {
log.Fatal(err)
}
root := fatfs.RootDirectory()
tf, err := ioutil.TempFile("", "gsetup-boot")
if err != nil {
log.Fatal(err)
}
tf.WriteString("efi\\boot\\bootx64.efi")
tf.Close()
defer os.Remove(tf.Name())
msCopyIn(root, tf.Name(), "EFI/Google/GSetup/Boot")
msCopyIn(root, *bootloader, "EFI/BOOT/bootx64.efi")
if !*abr {
msCopyIn(root, *zbi, "zircon.bin")
msCopyIn(root, *zedboot, "zedboot.bin")
}
if *cmdline != "" {
msCopyIn(root, *cmdline, "cmdline")
}
root.Sync()
if err := root.Close(); err != nil {
log.Fatal(err)
}
if err := fatfs.Close(); err != nil {
log.Fatal(err)
}
f.Sync()
if *abr {
if *verbose {
log.Print("Populating A/B/R partitions")
}
partitionCopy(f, int64(aStart), *abrSize, *zirconA)
partitionCopy(f, int64(bStart), *abrSize, *zirconB)
partitionCopy(f, int64(rStart), *abrSize, *zirconR)
}
f.Sync()
if !*ramdiskOnly {
if *verbose {
log.Print("Populating FVM in GPT image")
}
fvm(disk, int64(fvmStart), *fvmSize, "create", "--blob", *blob, "--data", *data)
}
// Keep the file open so that OSX doesn't try to remount the disk while tools are working on it.
if err := f.Close(); err != nil {
log.Fatal(err)
}
if *verbose {
log.Printf("Done")
}
}
func partitionCopy(f *os.File, start, size int64, path string) {
input, err := os.Open(path)
if err != nil {
log.Fatalf("partition copy failed for input: %s: %s", path, err)
}
defer input.Close()
input_info, err := input.Stat()
if err != nil {
log.Fatalf("stat failed for input: %s: %s", path, err)
}
if input_info.Size() > size {
log.Printf("WARNING: %s is larger than the provided ABR size", path)
}
r := io.LimitReader(input, size)
if _, err := f.Seek(start, os.SEEK_SET); err != nil {
log.Fatalf("partition copy failed for input: %s: %s", path, err)
}
_, err = io.Copy(f, r)
if err != nil {
log.Fatalf("partition copy failed for input: %s: %s", path, err)
}
}
func fvm(disk string, offset, size int64, command string, args ...string) {
offs := strconv.FormatInt(offset, 10)
szs := strconv.FormatInt(size, 10)
argv := []string{disk, command, "--offset", offs, "--length", szs}
argv = append(argv, args...)
cmd := exec.Command(fuchsiaTool("fvm"), argv...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatalf("fvm %s failed", argv)
}
}
// msCopyIn copies src from the host filesystem into dst under the given
// msdosfs root.
func msCopyIn(root fs.Directory, src, dst string) {
d := root
defer d.Sync()
dStack := []fs.Directory{}
defer func() {
for _, d := range dStack {
d.Sync()
d.Close()
}
}()
destdir := filepath.Dir(dst)
name := filepath.Base(dst)
for _, part := range strings.Split(destdir, "/") {
if part == "." {
continue
}
var err error
_, d, _, err = d.Open(part, fs.OpenFlagRead|fs.OpenFlagCreate|fs.OpenFlagDirectory)
if err != nil {
log.Fatalf("open/create %s: %#v %s", part, err, err)
}
d.Sync()
dStack = append(dStack, d)
}
to, _, _, err := d.Open(name, fs.OpenFlagWrite|fs.OpenFlagCreate|fs.OpenFlagFile)
if err != nil {
log.Fatalf("creating %s in msdosfs: %s", name, err)
}
defer to.Close()
from, err := os.Open(src)
if err != nil {
log.Fatal(err)
}
defer from.Close()
b := make([]byte, 4096)
for err == nil {
var n int
n, err = from.Read(b)
if n > 0 {
if _, err := to.Write(b[:n], 0, fs.WhenceFromCurrent); err != nil {
log.Fatalf("writing %s to msdosfs file: %s", name, err)
}
}
}
to.Sync()
if err != nil && err != io.EOF {
log.Fatal(err)
}
}
// optimalBlockAlign computes a start and end logical block address for a
// partition that starts at or after first (block address), of size byteSize,
// for a disk with logical, physical and optimal byte sizes. It returns the
// start and end block addresses.
func optimalBlockAlign(first, byteSize, logical, physical, optimal uint64) (start, end uint64) {
var alignTo = logical
if physical > alignTo {
alignTo = physical
}
if optimal > alignTo {
alignTo = optimal
}
lAlign := alignTo / logical
if d := first % lAlign; d != 0 {
start = first + lAlign - d
}
lSize := byteSize / logical
if byteSize%logical == 0 {
lSize++
}
end = start + lSize
return
}
func fuchsiaTool(name string) string {
var tool string
tool, _ = exec.LookPath(name)
if tool == "" {
needFuchsiaBuildDir()
tool, _ = exec.LookPath(filepath.Join(*fuchsiaBuildDir, "host_x64", name))
}
if tool == "" {
log.Fatalf("Could not find %q, you might need to build fuchsia", name)
}
return tool
}
// sectors per track is 63, and a sector is 512, so we must round to the nearest
// 32256.
const sizeAlignment = 32256
// N.B. fitFAT shrinks, not grows, as it intends to identify the nearest
// FAT-compatible geometry that fits inside of "total".
func fitFAT(total int64) int64 {
if d := total % sizeAlignment; d != 0 {
total = total - d
}
return total
}
| [
"\"FUCHSIA_BUILD_DIR\""
]
| []
| [
"FUCHSIA_BUILD_DIR"
]
| [] | ["FUCHSIA_BUILD_DIR"] | go | 1 | 0 | |
test/python/WMComponent_t/AnalyticsDataCollector_t/Plugins_t/Tier0Plugin_t.py | """
_Tier0Plugin_t_
Test the Tier-0 plugin for the AnalyticsDataCollector
Created on Nov 7, 2012
@author: dballest
"""
import os
import unittest
from WMComponent.AnalyticsDataCollector.Plugins.Tier0Plugin import Tier0Plugin
from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMBS.Subscription import Subscription
from WMCore.WMBS.Workflow import Workflow
from WMCore.WMSpec.StdSpecs.PromptReco import PromptRecoWorkloadFactory
from WMCore.WMSpec.WMWorkload import newWorkload
from WMCore.WorkQueue.WMBSHelper import WMBSHelper
from WMQuality.TestInitCouchApp import TestInitCouchApp as TestInit
class Tier0PluginTest(unittest.TestCase):
def setUp(self):
"""
_setUp_
Setup the test environment
"""
self.testInit = TestInit(__file__)
self.testInit.setDatabaseConnection()
self.testInit.setSchema(["WMCore.WMBS"])
self.requestCouchDB = 'wmstats_plugin_t'
self.testInit.setupCouch(self.requestCouchDB, 'T0Request')
self.testDir = self.testInit.generateWorkDir()
reqDBURL = "%s/%s" % (os.environ['COUCHURL'], self.requestCouchDB)
self.requestDBWriter = RequestDBWriter(reqDBURL, couchapp="T0Request")
self.requestDBWriter._setNoStale()
self.stateMap = {}
self.orderedStates = []
self.plugin = None
return
def tearDown(self):
"""
_tearDown_
Clear databases and delete files
"""
self.testInit.tearDownCouch()
self.testInit.clearDatabase()
self.testInit.delWorkDir()
return
def setupRepackWorkflow(self):
"""
_setupRepackWorkflow_
Populate WMBS with a repack-like workflow,
every subscription must be unfinished at first
"""
workflowName = 'Repack_Run481516_StreamZ'
mergeTasks = ['RepackMergewrite_QuadElectron_RAW', 'RepackMergewrite_TriPhoton_RAW',
'RepackMergewrite_SingleNeutrino_RAW']
self.stateMap = {'Merge': [],
'Processing Done': []}
self.orderedStates = ['Merge', 'Processing Done']
# Populate WMStats
self.requestDBWriter.insertGenericRequest({'RequestName': workflowName})
self.requestDBWriter.updateRequestStatus(workflowName, 'Closed')
# Create a wmspec in disk
workload = newWorkload(workflowName)
repackTask = workload.newTask('Repack')
for task in mergeTasks:
repackTask.addTask(task)
repackTask.addTask('RepackCleanupUnmergedwrite_QuadElectron_RAW')
specPath = os.path.join(self.testDir, 'Repack.pkl')
workload.save(specPath)
# Populate WMBS
topFileset = Fileset(name='TestStreamerFileset')
topFileset.create()
options = {'spec': specPath, 'owner': 'ItsAMeMario',
'name': workflowName, 'wfType': 'tier0'}
topLevelWorkflow = Workflow(task='/%s/Repack' % workflowName,
**options)
topLevelWorkflow.create()
topLevelSub = Subscription(topFileset, topLevelWorkflow)
topLevelSub.create()
self.stateMap['Merge'].append(topFileset)
for task in mergeTasks:
mergeWorkflow = Workflow(task='/%s/Repack/%s' % (workflowName, task), **options)
mergeWorkflow.create()
unmergedFileset = Fileset(name='TestUnmergedFileset%s' % task)
unmergedFileset.create()
mergeSub = Subscription(unmergedFileset, mergeWorkflow)
mergeSub.create()
self.stateMap['Processing Done'].append(unmergedFileset)
cleanupWorkflow = Workflow(task='/Repack_Run481516_StreamZ/Repack/RepackCleanupUnmergedwrite_QuadElectron_RAW',
**options)
cleanupWorkflow.create()
unmergedFileset = Fileset(name='TestUnmergedFilesetToCleanup')
unmergedFileset.create()
cleanupSub = Subscription(unmergedFileset, cleanupWorkflow)
cleanupSub.create()
return
def setupExpressWorkflow(self):
"""
_setupExpressWorkflow_
Populate WMBS with a express-like workflow,
every subscription must be unfinished at first
"""
workflowName = 'Express_Run481516_StreamZFast'
secondLevelTasks = ['ExpressMergewrite_StreamZFast_DQM', 'ExpressMergewrite_ExpressPhysics_FEVT',
'ExpressAlcaSkimwrite_StreamZFast_ALCARECO', 'ExpressCleanupUnmergedwrite_StreamZFast_DQM',
'ExpressCleanupUnmergedwrite_ExpressPhysics_FEVT',
'ExpressCleanupUnmergedwrite_StreamZFast_ALCARECO']
alcaHarvestTask = 'ExpressAlcaSkimwrite_StreamZFast_ALCARECOAlcaHarvestALCARECOStreamPromptCalibProd'
dqmHarvestTask = 'ExpressMergewrite_StreamZFast_DQMEndOfRunDQMHarvestMerged'
self.stateMap = {'Merge': [],
'Harvesting': [],
'Processing Done': []}
self.orderedStates = ['Merge', 'Harvesting', 'Processing Done']
# Populate WMStats
self.requestDBWriter.insertGenericRequest({'RequestName': workflowName})
self.requestDBWriter.updateRequestStatus(workflowName, 'Closed')
# Create a wmspec in disk
workload = newWorkload(workflowName)
expressTask = workload.newTask('Express')
for task in secondLevelTasks:
secondLevelTask = expressTask.addTask(task)
if task == 'ExpressAlcaSkimwrite_StreamZFast_ALCARECO':
secondLevelTask.addTask(alcaHarvestTask)
elif task == 'ExpressMergewrite_StreamZFast_DQM':
secondLevelTask.addTask(dqmHarvestTask)
specPath = os.path.join(self.testDir, 'Express.pkl')
workload.save(specPath)
# Populate WMBS
sharedFileset = Fileset(name='TestFileset')
sharedFileset.create()
sharedFileset.markOpen(False)
options = {'spec': specPath, 'owner': 'ItsAMeMario',
'name': workflowName, 'wfType': 'tier0'}
topLevelWorkflow = Workflow(task='/%s/Express' % workflowName,
**options)
topLevelWorkflow.create()
topLevelSub = Subscription(sharedFileset, topLevelWorkflow)
topLevelSub.create()
self.stateMap['Merge'].append(topLevelSub)
for task in [x for x in secondLevelTasks if not x.count('CleanupUnmerged')]:
secondLevelWorkflow = Workflow(task='/%s/Express/%s' % (workflowName, task), **options)
secondLevelWorkflow.create()
mergeSub = Subscription(sharedFileset, secondLevelWorkflow)
mergeSub.create()
self.stateMap['Harvesting'].append(mergeSub)
for (parent, child) in [('ExpressAlcaSkimwrite_StreamZFast_ALCARECO', alcaHarvestTask),
('ExpressMergewrite_StreamZFast_DQM', dqmHarvestTask)]:
harvestingWorkflow = Workflow(task='/%s/Express/%s/%s' % (workflowName, parent, child),
**options)
harvestingWorkflow.create()
harvestingSub = Subscription(sharedFileset, harvestingWorkflow)
harvestingSub.create()
self.stateMap['Processing Done'].append(harvestingSub)
return
def setupPromptRecoWorkflow(self):
"""
_setupPromptRecoWorkflow_
Populate WMBS with a real PromptReco workflow,
every subscription must be unfinished at first
"""
# Populate disk and WMBS
testArguments = PromptRecoWorkloadFactory.getTestArguments()
workflowName = 'PromptReco_Run195360_Cosmics'
factory = PromptRecoWorkloadFactory()
testArguments["EnableHarvesting"] = True
testArguments["CouchURL"] = os.environ["COUCHURL"]
workload = factory.factoryWorkloadConstruction(workflowName, testArguments)
wmbsHelper = WMBSHelper(workload, 'Reco', 'SomeBlock', cachepath=self.testDir)
wmbsHelper.createTopLevelFileset()
wmbsHelper._createSubscriptionsInWMBS(wmbsHelper.topLevelTask, wmbsHelper.topLevelFileset)
self.stateMap = {'AlcaSkim': [],
'Merge': [],
'Harvesting': [],
'Processing Done': []}
self.orderedStates = ['AlcaSkim', 'Merge', 'Harvesting', 'Processing Done']
# Populate WMStats
self.requestDBWriter.insertGenericRequest({'RequestName': workflowName})
self.requestDBWriter.updateRequestStatus(workflowName, 'Closed')
topLevelTask = '/%s/Reco' % workflowName
alcaSkimTask = '%s/AlcaSkim' % topLevelTask
mergeTasks = ['%s/AlcaSkim/AlcaSkimMergeALCARECOStreamHcalCalHOCosmics',
'%s/AlcaSkim/AlcaSkimMergeALCARECOStreamTkAlCosmics0T',
'%s/AlcaSkim/AlcaSkimMergeALCARECOStreamMuAlGlobalCosmics',
'%s/RecoMergewrite_AOD',
'%s/RecoMergewrite_DQM',
'%s/RecoMergewrite_RECO']
harvestingTask = '%s/RecoMergewrite_DQM/RecoMergewrite_DQMEndOfRunDQMHarvestMerged' % topLevelTask
self.stateMap['AlcaSkim'].append(wmbsHelper.topLevelSubscription)
alcaSkimWorkflow = Workflow(name=workflowName, task=alcaSkimTask)
alcaSkimWorkflow.load()
alcarecoFileset = Fileset(name='/PromptReco_Run195360_Cosmics/Reco/unmerged-write_ALCARECOALCARECO')
alcarecoFileset.load()
alcaSkimSub = Subscription(alcarecoFileset, alcaSkimWorkflow)
alcaSkimSub.load()
self.stateMap['Merge'].append(alcaSkimSub)
for task in mergeTasks:
mergeTask = task % topLevelTask
mergeWorkflow = Workflow(name=workflowName, task=mergeTask)
mergeWorkflow.load()
if 'AlcaSkim' in mergeTask:
stream = mergeTask.split('/')[-1][13:]
unmergedFileset = Fileset(name='%s/unmerged-%sALCARECO' % (alcaSkimTask, stream))
unmergedFileset.load()
else:
dataTier = mergeTask.split('/')[-1].split('_')[-1]
unmergedFileset = Fileset(name='%s/unmerged-write_%s%s' % (topLevelTask, dataTier, dataTier))
unmergedFileset.load()
mergeSub = Subscription(unmergedFileset, mergeWorkflow)
mergeSub.load()
self.stateMap['Harvesting'].append(mergeSub)
harvestingWorkflow = Workflow(name=workflowName, task=harvestingTask)
harvestingWorkflow.load()
harvestingFileset = Fileset(name='/PromptReco_Run195360_Cosmics/Reco/RecoMergewrite_DQM/merged-MergedDQM')
harvestingFileset.load()
harvestingSub = Subscription(harvestingFileset, harvestingWorkflow)
harvestingSub.load()
self.stateMap['Processing Done'].append(harvestingSub)
return
def verifyStateTransitions(self, transitionMethod='markFinished', transitionTrigger=True):
"""
_verifyStateTransitions_
Utility method which goes through the list of states in self.orderedStates and
finishes the tasks that demand a state transition in each step. This according
to the defined transition method and trigger.
It verifies that the request document in WMStats is moving according to the transitions
"""
for idx in range(0, len(self.orderedStates) * 2):
nextState = self.orderedStates[idx / 2]
if (idx / 2) == 0:
currentState = 'Closed'
else:
currentState = self.orderedStates[idx / 2 - 1]
if idx % 2 == 0:
for transitionObject in self.stateMap[nextState][:-1]:
method = getattr(transitionObject, transitionMethod)
method(transitionTrigger)
self.plugin([], self.requestDBWriter, self.requestDBWriter)
currentStateWorkflows = self.requestDBWriter.getRequestByStatus([currentState])
nextStateWorkflows = self.requestDBWriter.getRequestByStatus([nextState])
self.assertEqual(len(currentStateWorkflows), 1, 'Workflow moved incorrectly from %s' % currentState)
self.assertEqual(len(nextStateWorkflows), 0, 'Workflow moved incorrectly to %s' % nextState)
else:
transitionObject = self.stateMap[nextState][-1]
method = getattr(transitionObject, transitionMethod)
method(transitionTrigger)
self.plugin([], self.requestDBWriter, self.requestDBWriter)
currentStateWorkflows = self.requestDBWriter.getRequestByStatus([currentState])
nextStateWorkflows = self.requestDBWriter.getRequestByStatus([nextState])
self.assertEqual(len(currentStateWorkflows), 0,
'Workflow did not move correctly from %s' % currentState)
self.assertEqual(len(nextStateWorkflows), 1, 'Workflow did not move correctly to %s' % nextState)
return
def testA_RepackStates(self):
"""
_testA_RepackStates_
Setup an environment with a Repack workflow
and traverse through the different states.
Check that the transitions are sane.
"""
# Set the environment
self.setupRepackWorkflow()
self.plugin = Tier0Plugin()
# Verify the transitions
self.verifyStateTransitions('markOpen', False)
return
def testB_ExpressStates(self):
"""
_testB_ExpressStates_
Setup an environment with a Express workflow
and traverse through the different states.
Check that the transitions are sane.
"""
# Set the environment
self.setupExpressWorkflow()
self.plugin = Tier0Plugin()
# Verify the transitions
self.verifyStateTransitions()
return
def testC_PromptRecoStates(self):
"""
_testC_PromptRecoStates_
Setup an environment with a PromptReco workflow
and traverse through the different states.
Check that the transitions are sane.
"""
# Set the environment
self.setupPromptRecoWorkflow()
self.plugin = Tier0Plugin()
# Verify the transitions
self.verifyStateTransitions()
return
if __name__ == "__main__":
unittest.main()
| []
| []
| [
"COUCHURL"
]
| [] | ["COUCHURL"] | python | 1 | 0 | |
provider/postgis/postgis_internal_test.go | package postgis
import (
"os"
"reflect"
"strconv"
"strings"
"testing"
"github.com/go-spatial/geom"
"github.com/go-spatial/tegola-postgis/dict"
"github.com/go-spatial/tegola-postgis/internal/ttools"
)
// TESTENV is the environment variable that must be set to "yes" to run postgis tests.
const TESTENV = "RUN_POSTGIS_TESTS"
func GetTestPort(t *testing.T) int {
ttools.ShouldSkip(t, TESTENV)
port, err := strconv.ParseInt(os.Getenv("PGPORT"), 10, 32)
if err != nil {
t.Skipf("err parsing PGPORT: %v", err)
}
return int(port)
}
func TestLayerGeomType(t *testing.T) {
port := GetTestPort(t)
type tcase struct {
config map[string]interface{}
configOverride map[string]string
layerConfig map[string]interface{}
layerName string
geom geom.Geometry
err string
}
defaultConfig := map[string]interface{}{
ConfigKeyHost: os.Getenv("PGHOST"),
ConfigKeyPort: port,
ConfigKeyDB: os.Getenv("PGDATABASE"),
ConfigKeyUser: os.Getenv("PGUSER"),
ConfigKeyPassword: os.Getenv("PGPASSWORD"),
ConfigKeySSLMode: os.Getenv("PGSSLMODE"),
ConfigKeySSLKey: os.Getenv("PGSSLKEY"),
ConfigKeySSLCert: os.Getenv("PGSSLCERT"),
ConfigKeySSLRootCert: os.Getenv("PGSSLROOTCERT"),
}
fn := func(tc tcase) func(t *testing.T) {
return func(t *testing.T) {
// check if we have env vars to override
if len(tc.configOverride) > 0 {
conf := map[string]interface{}{}
// copy the original config
for k, v := range tc.config {
conf[k] = v
}
// set the config overrides
for k, v := range tc.configOverride {
conf[k] = v
}
// override the test's config with our new one
tc.config = conf
}
tc.config[ConfigKeyLayers] = []map[string]interface{}{tc.layerConfig}
provider, err := NewTileProvider(dict.Dict(tc.config))
if tc.err != "" {
if err == nil || !strings.Contains(err.Error(), tc.err) {
t.Errorf("expected error with %q in NewProvider, got: %v", tc.err, err)
}
return
}
if err != nil {
t.Errorf("NewProvider unexpected error: %v", err)
return
}
p := provider.(Provider)
layer := p.layers[tc.layerName]
if !reflect.DeepEqual(tc.geom, layer.geomType) {
t.Errorf("geom type, expected %v got %v", tc.geom, layer.geomType)
return
}
}
}
tests := map[string]tcase{
"1": {
config: defaultConfig,
layerConfig: map[string]interface{}{
ConfigKeyLayerName: "land",
ConfigKeySQL: "SELECT gid, ST_AsBinary(geom) FROM ne_10m_land_scale_rank WHERE geom && !BBOX!",
},
layerName: "land",
geom: geom.MultiPolygon{},
},
"zoom token replacement": {
config: defaultConfig,
layerConfig: map[string]interface{}{
ConfigKeyLayerName: "land",
ConfigKeySQL: "SELECT gid, ST_AsBinary(geom) FROM ne_10m_land_scale_rank WHERE gid = !ZOOM! AND geom && !BBOX!",
},
layerName: "land",
geom: geom.MultiPolygon{},
},
"configured geometry_type": {
config: defaultConfig,
layerConfig: map[string]interface{}{
ConfigKeyLayerName: "land",
ConfigKeyGeomType: "multipolygon",
ConfigKeySQL: "SELECT gid, ST_AsBinary(geom) FROM invalid_table_to_check_query_table_was_not_inspected WHERE geom && !BBOX!",
},
layerName: "land",
geom: geom.MultiPolygon{},
},
"configured geometry_type (case insensitive)": {
config: defaultConfig,
layerConfig: map[string]interface{}{
ConfigKeyLayerName: "land",
ConfigKeyGeomType: "MultiPolyGOn",
ConfigKeySQL: "SELECT gid, ST_AsBinary(geom) FROM invalid_table_to_check_query_table_was_not_inspected WHERE geom && !BBOX!",
},
layerName: "land",
geom: geom.MultiPolygon{},
},
"invalid configured geometry_type": {
config: defaultConfig,
layerConfig: map[string]interface{}{
ConfigKeyLayerName: "land",
ConfigKeyGeomType: "invalid",
ConfigKeySQL: "SELECT gid, ST_AsBinary(geom) FROM invalid_table_to_check_query_table_was_not_inspected WHERE geom && !BBOX!",
},
layerName: "land",
err: "unsupported geometry_type",
geom: geom.MultiPolygon{},
},
"role no access to table": {
config: defaultConfig,
configOverride: map[string]string{
ConfigKeyUser: os.Getenv("PGUSER_NO_ACCESS"),
},
err: "error fetching geometry type for layer (land): ERROR: permission denied for relation ne_10m_land_scale_rank (SQLSTATE 42501)",
layerConfig: map[string]interface{}{
ConfigKeyLayerName: "land",
ConfigKeySQL: "SELECT gid, ST_AsBinary(geom) FROM ne_10m_land_scale_rank WHERE geom && !BBOX!",
},
layerName: "land",
geom: geom.MultiPolygon{},
},
}
for name, tc := range tests {
t.Run(name, fn(tc))
}
}
| [
"\"PGPORT\"",
"\"PGHOST\"",
"\"PGDATABASE\"",
"\"PGUSER\"",
"\"PGPASSWORD\"",
"\"PGSSLMODE\"",
"\"PGSSLKEY\"",
"\"PGSSLCERT\"",
"\"PGSSLROOTCERT\"",
"\"PGUSER_NO_ACCESS\""
]
| []
| [
"PGPORT",
"PGSSLKEY",
"PGDATABASE",
"PGUSER",
"PGSSLMODE",
"PGHOST",
"PGPASSWORD",
"PGSSLCERT",
"PGUSER_NO_ACCESS",
"PGSSLROOTCERT"
]
| [] | ["PGPORT", "PGSSLKEY", "PGDATABASE", "PGUSER", "PGSSLMODE", "PGHOST", "PGPASSWORD", "PGSSLCERT", "PGUSER_NO_ACCESS", "PGSSLROOTCERT"] | go | 10 | 0 | |
windgate-project/cli/src/main/java/com/asakusafw/windgate/cli/CommandLineUtil.java | /**
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.windgate.cli;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLClassLoader;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.TreeMap;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import com.asakusafw.runtime.core.context.RuntimeContext;
import com.asakusafw.windgate.core.ParameterList;
import com.asakusafw.windgate.core.WindGateLogger;
/**
* Utilities for command line interfaces.
* @since 0.2.2
* @version 0.4.0
*/
public final class CommandLineUtil {
static final WindGateLogger WGLOG = new WindGateCliLogger(CommandLineUtil.class);
static final Logger LOG = LoggerFactory.getLogger(CommandLineUtil.class);
/**
* Prepares runtime context.
*/
public static void prepareRuntimeContext() {
RuntimeContext.set(RuntimeContext.DEFAULT.apply(System.getenv()));
RuntimeContext.get().verifyApplication(WindGate.class.getClassLoader());
LOG.debug("Runtime context is prepared: {}", RuntimeContext.get());
}
/**
* Prefix of system properties used for log context.
*/
public static final String LOG_CONTEXT_PREFIX = "com.asakusafw.windgate.log.";
/**
* The scheme name of Java class path.
*/
public static final String SCHEME_CLASSPATH = "classpath";
/**
* Prepares log context.
* If current system properties contain keys with a special prefix
* (described in {@link #LOG_CONTEXT_PREFIX}),
* then this method will put each key-value pairs into log MDC.
*/
public static void prepareLogContext() {
Map<String, String> registered = new TreeMap<>();
Properties properties = System.getProperties();
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
if ((entry.getKey() instanceof String) == false || (entry.getValue() instanceof String) == false) {
continue;
}
String key = (String) entry.getKey();
if (key.startsWith(LOG_CONTEXT_PREFIX) == false) {
continue;
}
String value = (String) entry.getValue();
String name = key.substring(LOG_CONTEXT_PREFIX.length());
MDC.put(name, value);
registered.put(name, value);
}
LOG.debug("Log context is prepared: {}",
registered);
}
/**
* Returns the name of URI for hint.
* @param uri the target URI
* @return the name
* @throws IllegalArgumentException if some parameters were {@code null}
*/
public static String toName(URI uri) {
if (uri == null) {
throw new IllegalArgumentException("uri must not be null"); //$NON-NLS-1$
}
String path = uri.getSchemeSpecificPart();
if (path == null) {
return uri.toString();
}
String name = path.substring(path.lastIndexOf('/') + 1);
if (name.endsWith(".properties")) {
return name.substring(0, name.length() - ".properties".length());
} else {
return name;
}
}
/**
* Converts the path to the related URI.
* @param path target path
* @return the related URI
* @throws URISyntaxException if failed to convert the path
* @throws IllegalArgumentException if some parameters were {@code null}
*/
public static URI toUri(String path) throws URISyntaxException {
if (path == null) {
throw new IllegalArgumentException("path must not be null"); //$NON-NLS-1$
}
URI uri = new URI(path);
if (uri.getScheme() == null || uri.getScheme().length() != 1) {
return uri;
}
String os = System.getProperty("os.name", "UNKNOWN");
LOG.debug("Current OS name: {}",
os);
if (os.toLowerCase().startsWith("windows") == false) {
return uri;
}
File file = new File(path);
uri = file.toURI();
LOG.debug("Path \"{}\" may be an absolute path on Windows, converted into URI: {}",
path,
uri);
return uri;
}
/**
* Loads properties from the specified URI.
* URI can have following forms.
* <ul>
* <li> no scheme - relative path from the current working directory (local file system) </li>
* <li> "classpath" scheme - absolute path on {@code loader}'s class path </li>
* <li> other scheme - as a URL </li>
* </ul>
* @param path path to the target properties
* @param loader the class loader for the scheme "classpath",
* or {@code null} to use the system class loader
* @return the loaded properties
* @throws IOException if failed to load the properties
* @throws IllegalArgumentException if any parameter is {@code null}
*/
public static Properties loadProperties(URI path, ClassLoader loader) throws IOException {
if (path == null) {
throw new IllegalArgumentException("path must not be null"); //$NON-NLS-1$
}
LOG.debug("Loading properties: {}", path);
String scheme = path.getScheme();
if (scheme == null) {
File file = new File(path.getPath());
LOG.debug("Loading properties from local path: {}", file);
FileInputStream in = new FileInputStream(file);
return loadProperties(path, in);
} else if (scheme.equals(SCHEME_CLASSPATH)) {
ClassLoader cl = loader == null ? ClassLoader.getSystemClassLoader() : loader;
String rest = path.getSchemeSpecificPart();
LOG.debug("Loading properties from class path: {}", rest);
InputStream in = cl.getResourceAsStream(rest);
if (in == null) {
throw new FileNotFoundException(MessageFormat.format(
"Failed to load properties \"{0}\"",
path.toString()));
}
return loadProperties(path, in);
} else {
URL url = path.toURL();
LOG.debug("Loading properties from URL: {}", url);
InputStream in = url.openStream();
return loadProperties(path, in);
}
}
private static Properties loadProperties(URI uri, InputStream in) throws IOException {
assert uri != null;
assert in != null;
try {
Properties properties = new Properties();
properties.load(in);
return properties;
} finally {
in.close();
}
}
/**
* Parses a string of file list separated by the platform dependent path separator.
* @param fileListOrNull target string, or {@code null}
* @return the represented file list, or an empty list if not specified
*/
public static List<File> parseFileList(String fileListOrNull) {
if (fileListOrNull == null || fileListOrNull.isEmpty()) {
return Collections.emptyList();
}
List<File> results = new ArrayList<>();
int start = 0;
while (true) {
int index = fileListOrNull.indexOf(File.pathSeparatorChar, start);
if (index < 0) {
break;
}
if (start != index) {
results.add(new File(fileListOrNull.substring(start, index).trim()));
}
start = index + 1;
}
results.add(new File(fileListOrNull.substring(start).trim()));
return results;
}
/**
* Creates a class loader for loading plug-ins.
* @param parent parent class loader, or {@code null} to use the system class loader
* @param files plug-in class paths (*.jar file or class path directory)
* @return the created class loader
* @throws IllegalArgumentException if some parameters were {@code null}
*/
public static ClassLoader buildPluginLoader(ClassLoader parent, List<File> files) {
if (files == null) {
throw new IllegalArgumentException("files must not be null"); //$NON-NLS-1$
}
List<URL> pluginLocations = new ArrayList<>();
for (File file : files) {
try {
if (file.exists() == false) {
throw new FileNotFoundException(MessageFormat.format(
"Failed to load plugin \"{0}\"",
file.getAbsolutePath()));
}
URL url = file.toURI().toURL();
pluginLocations.add(url);
} catch (IOException e) {
WGLOG.warn(e, "W99001",
file.getAbsolutePath());
}
}
ClassLoader serviceLoader = AccessController.doPrivileged((PrivilegedAction<ClassLoader>) () -> {
URLClassLoader loader = new URLClassLoader(
pluginLocations.toArray(new URL[pluginLocations.size()]),
parent);
return loader;
});
return serviceLoader;
}
private static final Pattern PAIRS = Pattern.compile("(?<!\\\\),");
private static final Pattern KEY_VALUE = Pattern.compile("(?<!\\\\)=");
/**
* Parses the specified arguments string and returns key-value pairs.
* The arguments string is represented as following syntax with
* {@code ArgumentList} as the goal symbol.
* The each result pair will have {@code Value_key} as its key,
* and {@code Value_value} as value.
<pre><code>
ArgumentList:
ArgumentList "," Argument
Argument
","
(Empty)
Argument:
Value_key "=" Value_value
Value:
Character*
Character:
any character except ",", "=", "\\"
"\" any character
</code></pre>
* @param arguments the arguments represented in a string, or {@code null} as empty arguments
* @return the parsed key-value pairs
*/
public static ParameterList parseArguments(String arguments) {
if (arguments == null || arguments.isEmpty()) {
return new ParameterList();
}
Map<String, String> results = new LinkedHashMap<>();
String[] pairs = PAIRS.split(arguments);
for (String pair : pairs) {
if (pair.isEmpty()) {
continue;
}
String[] kv = KEY_VALUE.split(pair);
if (kv.length == 0) {
// in the case of "=", the regex engine returns an empty array
addArgument(results, "", "");
} else if (kv.length == 1 && kv[0].equals(pair) == false) {
// in the case of "key=", the regex engine return returns only a key
addArgument(results, unescape(kv[0]), "");
} else if (kv.length == 2) {
addArgument(results, unescape(kv[0]), unescape(kv[1]));
} else {
WGLOG.warn("W99002",
pair);
}
}
return new ParameterList(results);
}
private static void addArgument(Map<String, String> results, String key, String value) {
assert results != null;
assert key != null;
assert value != null;
if (results.containsKey(key)) {
WGLOG.warn("W99003",
key,
value);
} else {
results.put(key, value);
}
}
private static String unescape(String string) {
assert string != null;
StringBuilder buf = new StringBuilder();
int start = 0;
while (true) {
int index = string.indexOf('\\', start);
if (index < 0) {
break;
}
buf.append(string.substring(start, index));
if (index != string.length() - 1) {
buf.append(string.charAt(index + 1));
start = index + 2;
} else {
buf.append(string.charAt(index));
start = index + 1;
}
}
if (start < string.length()) {
buf.append(string.substring(start));
}
return buf.toString();
}
private CommandLineUtil() {
return;
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
js.go | package retracer
import (
"bytes"
"io"
"log"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path"
"strings"
"sync/atomic"
"syscall"
"time"
"h12.io/errors"
"h12.io/mitm"
"h12.io/uuid"
)
type JSTracer struct {
Timeout time.Duration
Certs *mitm.CertPool
}
func (t *JSTracer) Trace(uri string, header http.Header, body []byte) (string, error) {
body = neutralizeIFrame(body)
if t.Timeout == 0 {
t.Timeout = 10 * time.Second
}
proxy := newProxy(uri, header, body, t.Timeout, t.Certs)
defer proxy.Close()
browser, err := startBrowser(uri, proxy.URL())
if err != nil {
return "", err
}
defer browser.Close()
select {
case <-time.After(t.Timeout):
return "", ErrJSRedirectionTimeout
case redirectURL := <-proxy.RedirectURLChan():
return redirectURL, nil
case err := <-proxy.ErrChan():
return "", err
case err := <-errChan(browser.Wait):
return "", err
}
}
func neutralizeIFrame(body []byte) []byte {
body = bytes.Replace(body, []byte("<iframe"), []byte("<div"), -1)
body = bytes.Replace(body, []byte("/iframe>"), []byte("/div>"), -1)
return body
}
type fakeProxy struct {
uri string
header http.Header
body []byte
certs *mitm.CertPool
timeout time.Duration
proxy *httptest.Server
redirectChan chan string
errChan chan error
respondCount int32
}
func newProxy(uri string, header http.Header, body []byte, timeout time.Duration, certs *mitm.CertPool) *fakeProxy {
fp := &fakeProxy{
uri: uri,
header: header,
body: body,
certs: certs,
timeout: timeout,
redirectChan: make(chan string),
errChan: make(chan error),
}
fp.proxy = httptest.NewServer(http.HandlerFunc(fp.serve))
return fp
}
func (p *fakeProxy) URL() string {
return p.proxy.URL
}
func (p *fakeProxy) RedirectURLChan() <-chan string {
return p.redirectChan
}
func (p *fakeProxy) ErrChan() <-chan error {
return p.errChan
}
func (p *fakeProxy) setError(err error) {
select {
case p.errChan <- err:
default:
}
}
func (p *fakeProxy) setRedirectURL(uri string) {
select {
case p.redirectChan <- uri:
default:
}
}
func (p *fakeProxy) Close() error {
p.proxy.Close() // make should all serve goroutines have exited
return nil
}
func (p *fakeProxy) serve(w http.ResponseWriter, req *http.Request) {
if req.Method == "GET" {
p.serveHTTP(w, req)
} else if req.Method == "CONNECT" {
err := p.certs.ServeHTTPS(w, req, p.serveHTTP)
if err != nil {
p.setError(errors.Wrap(err))
}
}
}
func (p *fakeProxy) serveHTTP(w http.ResponseWriter, req *http.Request) {
if atomic.AddInt32(&p.respondCount, 1) == 1 {
for k, v := range p.header {
w.Header()[k] = v
}
w.Write(p.body)
} else {
if isJS(req.RequestURI) {
jsResp, err := http.Get(req.RequestURI)
if err != nil {
p.setError(errors.Wrap(err))
return
}
for k, v := range jsResp.Header {
w.Header()[k] = v
}
w.WriteHeader(jsResp.StatusCode)
if _, err := io.Copy(w, jsResp.Body); err != nil {
p.setError(errors.Wrap(err))
}
jsResp.Body.Close()
return
}
if isResource(req.RequestURI, req.Header) {
return
}
p.setRedirectURL(req.RequestURI)
}
}
func isJS(uri string) bool {
return strings.ToLower(path.Ext(uri)) == ".js"
}
func isResource(uri string, header http.Header) bool {
switch strings.ToLower(path.Ext(uri)) {
case ".css", ".png", ".gif", ".jpg", ".jpeg":
return true
}
accept := header.Get("Accept")
if !strings.Contains(accept, "text/html") {
return true
}
return false
}
type browser struct {
id string
cmd *exec.Cmd
}
func startBrowser(uri, proxy string) (*browser, error) {
// id is for debugging only
id, _ := uuid.NewTime(time.Now())
cmd := exec.Command(
"surf",
"-bdfgikmnp",
"-t", os.DevNull,
uri,
id.String(),
)
// set pgid so all child processes can be killed together
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
cmd.Env = []string{
"DISPLAY=" + os.Getenv("DISPLAY"),
"http_proxy=" + proxy,
}
return &browser{id: id.String(), cmd: cmd}, cmd.Start()
}
func (b *browser) pid() int {
if b.cmd.Process != nil {
return b.cmd.Process.Pid
}
return 0
}
func (b *browser) Wait() error {
err := b.cmd.Wait()
if _, ok := err.(*exec.ExitError); !ok {
return errors.Wrap(err)
}
return nil
}
func (b *browser) Close() error {
if b.cmd.Process == nil {
log.Printf("cannot kill surf %s because it is not started", b.id)
return nil
}
// kill -pgid (-pid)
// https://medium.com/@felixge/killing-a-child-process-and-all-of-its-children-in-go-54079af94773#.g2krdc3ir
if err := syscall.Kill(-b.cmd.Process.Pid, syscall.SIGKILL); err != nil {
log.Printf("fail to kill surf %s (%d)", b.id, b.pid())
return err
}
return nil
}
func forceKill(p *os.Process) error {
if err := p.Kill(); err != nil {
return err
}
for i := 0; processExists(p.Pid); i++ {
if err := p.Kill(); err != nil {
return err
}
time.Sleep(time.Second)
if i > 10 {
log.Printf("try to kill surf %d for the %d times", p.Pid, i)
}
}
return nil
}
func processExists(pid int) bool {
process, err := os.FindProcess(pid)
if err != nil {
// non-unix system
return false
}
return nil == process.Signal(syscall.Signal(0))
}
func strChan(f func() string) chan string {
ch := make(chan string)
go func() {
ch <- f()
}()
return ch
}
func errChan(f func() error) chan error {
ch := make(chan error)
go func() {
ch <- f()
}()
return ch
}
| [
"\"DISPLAY\""
]
| []
| [
"DISPLAY"
]
| [] | ["DISPLAY"] | go | 1 | 0 | |
controllers/webhook_cabundler.go | package controllers
import (
"context"
"errors"
"io/ioutil"
"os"
"path/filepath"
"strings"
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/ForgeRock/secret-agent/api/v1alpha1"
"github.com/ForgeRock/secret-agent/pkg/generator"
)
// InitWebhookCertificates creates and injects req certs by the k8s webhooks
func InitWebhookCertificates(certDir string) error {
secretName := os.Getenv("WEBHOOK_SECRET_NAME")
namespace := os.Getenv("SERVICE_NAMESPACE")
validatingWebhookConfigurationName := os.Getenv("VALIDATING_WEBHOOK_CONFIGURATION")
mutatingWebhookConfigurationName := os.Getenv("MUTATING_WEBHOOK_CONFIGURATION")
val := os.Getenv("CERTIFICATE_SANS")
sans := strings.Split(val, ",")
if len(secretName) == 0 || len(namespace) == 0 || len(validatingWebhookConfigurationName) == 0 ||
len(mutatingWebhookConfigurationName) == 0 || len(sans) == 0 {
return errors.New("Need ENVS: WEBHOOK_SECRET_NAME, SERVICE_NAMESPACE, " +
"VALIDATING_WEBHOOK_CONFIGURATION, MUTATING_WEBHOOK_CONFIGURATION, CERTIFICATE_SANS")
}
rootCA, leafCert, err := generateCertificates(sans)
if err != nil {
// Unable to create secret
return err
}
k8sClient, err := getClient()
if err != nil {
return err
}
// Patching webhook secret
if err := patchWebhookSecret(k8sClient, rootCA.CertPEM, leafCert.CertPEM, leafCert.PrivateKeyPEM, secretName, namespace); err != nil {
return err
}
// Patching validating webhook
if err := patchValidatingWebhookConfiguration(k8sClient, rootCA.CertPEM, validatingWebhookConfigurationName); err != nil {
return err
}
// Patching mutating webhook
if err := patchMutatingWebhookConfiguration(k8sClient, rootCA.CertPEM, mutatingWebhookConfigurationName); err != nil {
return err
}
// Unable to create certDir
if err := os.MkdirAll(certDir, 0755); err != nil {
return err
}
// Unable to create ca.crt
if err := ioutil.WriteFile(filepath.Join(certDir, "ca.crt"), rootCA.CertPEM, 0400); err != nil {
return err
}
// Unable to create tls.crt
if err := ioutil.WriteFile(filepath.Join(certDir, "tls.crt"), leafCert.CertPEM, 0400); err != nil {
return err
}
// Unable to create tls.key
if err := ioutil.WriteFile(filepath.Join(certDir, "tls.key"), leafCert.PrivateKeyPEM, 0400); err != nil {
return err
}
return nil
}
func getClient() (client.Client, error) {
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
kubeconfig, err := ctrl.GetConfig()
if err != nil {
return nil, err
}
kubeclient, err := client.New(kubeconfig, client.Options{
Scheme: scheme,
})
if err != nil {
return nil, err
}
return kubeclient, nil
}
// generateCertificates generates the root CA and leaf certificate to be used by the webhook
func generateCertificates(sans []string) (rootCA, leafCert *generator.Certificate, err error) {
rootCA, err = generator.GenerateRootCA("secret-agent")
if err != nil {
return
}
leafCert, err = generator.GenerateSignedCert(rootCA, v1alpha1.ECDSAWithSHA256, "", sans)
if err != nil {
return
}
return
}
// patchWebhookSecret patches the named TLS secret with the TLS information
func patchWebhookSecret(k client.Client, rootCAPem, certPEM, keyPEM []byte, name, namespace string) (err error) {
k8sSecret := &corev1.Secret{}
if err = k.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, k8sSecret); err != nil {
return
}
// secret found, we need to update
k8sSecret.Data["ca.crt"] = rootCAPem
k8sSecret.Data["tls.crt"] = certPEM
k8sSecret.Data["tls.key"] = keyPEM
err = k.Update(context.TODO(), k8sSecret)
return
}
// patchValidatingWebhookConfiguration patches the given ValidatingWebhookConfiguration with the caBuncle
func patchValidatingWebhookConfiguration(k client.Client, rootCAPem []byte, name string) (err error) {
webhookConfiguration := &admissionregistrationv1beta1.ValidatingWebhookConfiguration{}
if err = k.Get(context.TODO(), types.NamespacedName{Name: name}, webhookConfiguration); err != nil {
return
}
for i := range webhookConfiguration.Webhooks {
webhookConfiguration.Webhooks[i].ClientConfig.CABundle = rootCAPem
}
err = k.Update(context.TODO(), webhookConfiguration)
return
}
// patchMutatingWebhookConfiguration patches the given MutatingWebhookConfiguration with the caBuncle
func patchMutatingWebhookConfiguration(k client.Client, rootCAPem []byte, name string) (err error) {
webhookConfiguration := &admissionregistrationv1beta1.MutatingWebhookConfiguration{}
if err = k.Get(context.TODO(), types.NamespacedName{Name: name}, webhookConfiguration); err != nil {
return
}
for i := range webhookConfiguration.Webhooks {
webhookConfiguration.Webhooks[i].ClientConfig.CABundle = rootCAPem
}
err = k.Update(context.TODO(), webhookConfiguration)
return
}
| [
"\"WEBHOOK_SECRET_NAME\"",
"\"SERVICE_NAMESPACE\"",
"\"VALIDATING_WEBHOOK_CONFIGURATION\"",
"\"MUTATING_WEBHOOK_CONFIGURATION\"",
"\"CERTIFICATE_SANS\""
]
| []
| [
"CERTIFICATE_SANS",
"WEBHOOK_SECRET_NAME",
"SERVICE_NAMESPACE",
"MUTATING_WEBHOOK_CONFIGURATION",
"VALIDATING_WEBHOOK_CONFIGURATION"
]
| [] | ["CERTIFICATE_SANS", "WEBHOOK_SECRET_NAME", "SERVICE_NAMESPACE", "MUTATING_WEBHOOK_CONFIGURATION", "VALIDATING_WEBHOOK_CONFIGURATION"] | go | 5 | 0 | |
pkg/global/global.go | // Copyright 2020 spaGO Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package global
import (
"fmt"
"os"
"strconv"
)
// This is used to regulate the use of approximate math functions over the default implementations.
var mathOptimizationLevel = 0
const (
// Min optimization level
minMathOptimizationLevel = 0
// Max optimization level
maxMathOptimizationLevel = 2
// Default optimization level (no optimization)
defaultMathOptimizationLevel = 0
)
var ballast []byte
func init() {
// Create a virtual heap allocation of 2 GiB to reduce GC activity.
// https://blog.twitch.tv/go-memory-ballast-how-i-learnt-to-stop-worrying-and-love-the-heap-26c2462549a2
ballast = make([]byte, 2<<30)
_ = ballast // it is consciously not used
strOptLevel := os.Getenv("OPTIMIZATION_LEVEL")
if strOptLevel == "" {
SetMathOptimizationLevel(defaultMathOptimizationLevel)
} else {
if i, err := strconv.Atoi(strOptLevel); err == nil {
SetMathOptimizationLevel(i)
} else {
panic(fmt.Sprintf("global: optimization level must be a number in the range [%d-%d]",
minMathOptimizationLevel, maxMathOptimizationLevel))
}
}
}
// SetMathOptimizationLevel the global optimization level to i.
// It returns the previous level.
func SetMathOptimizationLevel(i int) int {
if !(i >= minMathOptimizationLevel && i <= maxMathOptimizationLevel) {
panic(fmt.Sprintf("global: optimization level must be in the range [%d-%d], found %d",
minMathOptimizationLevel, maxMathOptimizationLevel, i))
}
prev := mathOptimizationLevel
mathOptimizationLevel = i
return prev
}
// MathOptimizationLevel returns the global optimization
func MathOptimizationLevel() int {
return mathOptimizationLevel
}
| [
"\"OPTIMIZATION_LEVEL\""
]
| []
| [
"OPTIMIZATION_LEVEL"
]
| [] | ["OPTIMIZATION_LEVEL"] | go | 1 | 0 | |
internal/handler/context.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handler
import (
"errors"
"os"
"path/filepath"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// getContext is a function to read current context
func getContext() (*rest.Config, error) {
path, err := configPath()
if err != nil {
return nil, err
}
config, err := clientcmd.BuildConfigFromFlags("", path)
if err != nil {
return nil, err
}
return config, nil
}
func configPath() (string, error) {
if home := homeDir(); home != "" {
return filepath.Join(home, ".kube", "config"), nil
}
return "", errors.New("HOME OR USERPROFILE env variables are not set")
}
func homeDir() string {
if home := os.Getenv("HOME"); home != "" {
return home
}
return os.Getenv("USERPROFILE")
}
| [
"\"HOME\"",
"\"USERPROFILE\""
]
| []
| [
"HOME",
"USERPROFILE"
]
| [] | ["HOME", "USERPROFILE"] | go | 2 | 0 | |
step3ComprehensiveProject/django-vue/djangoCMS/djangoCMS/wsgi.py | """
WSGI config for djangoCMS project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoCMS.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
main.go | package main
import (
"encoding/hex"
"encoding/json"
"fmt"
"log"
"net"
"os"
"strconv"
"strings"
"time"
"github.com/AdguardTeam/dnsproxy/upstream"
"github.com/ameshkov/dnsstamps"
"github.com/miekg/dns"
)
// VersionString -- see the makefile
var VersionString = "undefined"
func main() {
machineReadable := os.Getenv("JSON") == "1"
insecureSkipVerify := os.Getenv("VERIFY") == "0"
timeoutStr := os.Getenv("TIMEOUT")
rrTypeStr := os.Getenv("RRTYPE")
rrType, ok := dns.StringToType[rrTypeStr]
if !ok {
if rrTypeStr != "" {
log.Printf("Invalid RRTYPE: %s", rrTypeStr)
usage()
os.Exit(1)
}
rrType = dns.TypeA
}
timeout := 10
if !machineReadable {
os.Stdout.WriteString(fmt.Sprintf("dnslookup %s\n", VersionString))
if len(os.Args) == 2 && (os.Args[1] == "-v" || os.Args[1] == "--version") {
os.Exit(0)
}
}
if insecureSkipVerify {
os.Stdout.WriteString("TLS verification has been disabled\n")
}
if len(os.Args) == 2 && (os.Args[1] == "-h" || os.Args[1] == "--help") {
usage()
os.Exit(0)
}
if len(os.Args) != 3 && len(os.Args) != 4 && len(os.Args) != 5 {
log.Printf("Wrong number of arguments")
usage()
os.Exit(1)
}
if timeoutStr != "" {
i, err := strconv.Atoi(timeoutStr)
if err != nil {
log.Printf("Wrong timeout value: %s", timeoutStr)
usage()
os.Exit(1)
}
timeout = i
}
domain := os.Args[1]
server := os.Args[2]
opts := &upstream.Options{
Timeout: time.Duration(timeout) * time.Second,
InsecureSkipVerify: insecureSkipVerify,
}
if len(os.Args) == 4 {
ip := net.ParseIP(os.Args[3])
if ip == nil {
log.Fatalf("invalid IP specified: %s", os.Args[3])
}
opts.ServerIPAddrs = []net.IP{ip}
}
if len(os.Args) == 5 {
// DNSCrypt parameters
providerName := os.Args[3]
serverPkStr := os.Args[4]
serverPk, err := hex.DecodeString(strings.Replace(serverPkStr, ":", "", -1))
if err != nil {
log.Fatalf("Invalid server PK %s: %s", serverPkStr, err)
}
var stamp dnsstamps.ServerStamp
stamp.Proto = dnsstamps.StampProtoTypeDNSCrypt
stamp.ServerAddrStr = server
stamp.ProviderName = providerName
stamp.ServerPk = serverPk
server = stamp.String()
}
u, err := upstream.AddressToUpstream(server, opts)
if err != nil {
log.Fatalf("Cannot create an upstream: %s", err)
}
req := dns.Msg{}
req.Id = dns.Id()
req.RecursionDesired = true
req.Question = []dns.Question{
{Name: domain + ".", Qtype: rrType, Qclass: dns.ClassINET},
}
reply, err := u.Exchange(&req)
if err != nil {
log.Fatalf("Cannot make the DNS request: %s", err)
}
if !machineReadable {
os.Stdout.WriteString("dnslookup result:\n")
os.Stdout.WriteString(reply.String() + "\n")
} else {
b, err := json.MarshalIndent(reply, "", " ")
if err != nil {
log.Fatalf("Cannot marshal json: %s", err)
}
os.Stdout.WriteString(string(b) + "\n")
}
}
func usage() {
os.Stdout.WriteString("Usage: dnslookup <domain> <server> [<providerName> <serverPk>]\n")
os.Stdout.WriteString("<domain>: mandatory, domain name to lookup\n")
os.Stdout.WriteString("<server>: mandatory, server address. Supported: plain, tls:// (DOT), https:// (DOH), sdns:// (DNSCrypt), quic:// (DOQ)\n")
os.Stdout.WriteString("<providerName>: optional, DNSCrypt provider name\n")
os.Stdout.WriteString("<serverPk>: optional, DNSCrypt server public key\n")
}
| [
"\"JSON\"",
"\"VERIFY\"",
"\"TIMEOUT\"",
"\"RRTYPE\""
]
| []
| [
"JSON",
"VERIFY",
"TIMEOUT",
"RRTYPE"
]
| [] | ["JSON", "VERIFY", "TIMEOUT", "RRTYPE"] | go | 4 | 0 | |
cmd/s3db/main.go | package main
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"sort"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/docopt/docopt-go"
"github.com/jrhy/s3db"
)
const version = "0.8"
var (
subcommandFuncs = map[string]func(*subcommandArgs) int{}
subcommandUsage = map[string]string{}
subcommandDesc = map[string]string{}
)
type subcommandArgs struct {
// inputs
Bucket string
Prefix string `docopt:"-p,--prefix"`
MasterKeyFile string `docopt:"-k,--master-key-file"`
Quiet bool `docopt:"-q,--quiet"`
Verbose bool `docopt:"-v,--verbose"`
Subcommand string `docopt:"<command>"`
Arg []string `docopt:"<arg>"`
Ctx context.Context
Stdout io.Writer
Stderr io.Writer
// derived
encryptor s3db.Encryptor
SubcommandOptions docopt.Opts
// outputs
db *s3db.DB
s3opts *s3db.OpenOptions
Result struct {
suppressCommit bool
}
}
func main() {
s := subcommandArgs{
Stdout: os.Stdout,
Stderr: os.Stderr,
Ctx: context.Background(),
}
os.Exit(int(s.run(os.Args[1:])))
}
func parseArgs(s *subcommandArgs, args []string) {
usage := `s3db v` + version + `
Usage:
s3db --bucket=<name> [--master-key-file=<path>] [--prefix=<s3-prefix>]
[-qv] <command> [<arg>...]
s3db -h
Options:
-b, --bucket=<name> S3 bucket to put the database in
-h, --help Print detailed help, including subcommands.
-k, --master-key-file=<path>
path to master key material bytes
-p, --prefix=<string> S3 object name prefix
-q, --quiet suppress warnings
-v, --verbose always say what happened
Environment:
S3_ENDPOINT=<url> override S3 endpoint, if not using AWS S3
(e.g. minio, Wasabi)
AWS SDK S3 client per
docs.aws.amazon.com/cli/latest/reference/configure
Commands:
`
cmds := []string{}
for cmd := range subcommandUsage {
cmds = append(cmds, cmd)
}
sort.Strings(cmds)
for _, cmd := range cmds {
usage += fmt.Sprintf(" %s\n", subcommandUsage[cmd])
usage += fmt.Sprintf(" %s\n", subcommandDesc[cmd])
}
p := docopt.Parser{
OptionsFirst: true,
}
opts, err := p.ParseArgs(usage, args, version)
if err != nil {
panic(err)
}
err = opts.Bind(s)
if err != nil {
panic(err)
}
}
func (s *subcommandArgs) run(args []string) int {
parseArgs(s, args)
if s.MasterKeyFile != "" {
keyBytes, err := ioutil.ReadFile(s.MasterKeyFile)
if err != nil {
fmt.Fprintln(s.Stderr, err)
return 1
}
s.encryptor = s3db.V1NodeEncryptor(keyBytes)
}
if f, ok := subcommandFuncs[s.Subcommand]; ok {
su := subcommandUsage[s.Subcommand]
var r int
r = parseSubcommandArgs(su, s)
if r != 0 {
return r
}
r = f(s)
if r != 0 {
return r
}
} else {
fmt.Fprintf(s.Stderr, "unknown command: %s", s.Subcommand)
fmt.Fprintf(s.Stderr, "arg: %v\n", s.Arg)
return 1
}
if s.db == nil ||
s.s3opts == nil ||
s.s3opts.ReadOnly ||
s.Result.suppressCommit {
return 0
}
if !s.db.IsDirty() {
if s.Verbose {
fmt.Fprintf(s.Stdout, "no change\n")
}
return 0
}
hash, err := s.db.Commit(s.Ctx)
if err != nil {
fmt.Fprintln(s.Stderr, err)
return 1
}
if s.Verbose {
if hash != nil {
fmt.Fprintf(s.Stdout, "committed %s\n", *hash)
} else {
fmt.Fprintf(s.Stdout, "committed empty tree\n")
}
}
return 0
}
func open(ctx context.Context, opts *s3db.OpenOptions, args *subcommandArgs) *s3db.DB {
if args.Bucket == "" {
fmt.Fprintf(args.Stderr, "--bucket not set\n")
os.Exit(1)
}
s := getS3()
if s.Endpoint == "" {
fmt.Fprintf(args.Stderr, "No S3 endpoint configured. Ensure AWS SDK is configured or set S3_ENDPOINT explicitly.\n")
os.Exit(1)
}
cfg := s3db.Config{
Storage: &s3db.S3BucketInfo{
EndpointURL: s.Endpoint,
BucketName: args.Bucket,
Prefix: args.Prefix,
},
KeysLike: "stringy",
ValuesLike: "stringy",
NodeEncryptor: args.encryptor,
}
var so s3db.OpenOptions
if opts != nil {
so = *opts
}
db, err := s3db.Open(ctx, s, cfg, so, time.Now())
if err != nil {
err = fmt.Errorf("open: %w", err)
fmt.Fprintln(args.Stderr, err)
os.Exit(1)
}
args.db = db
args.s3opts = &so
return db
}
func getS3() *s3.S3 {
config := aws.Config{}
endpoint := os.Getenv("S3_ENDPOINT")
if endpoint != "" {
config.Endpoint = &endpoint
config.S3ForcePathStyle = aws.Bool(true)
}
sess, err := session.NewSession(&config)
if err != nil {
err = fmt.Errorf("session: %w", err)
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
return s3.New(sess)
}
func parseSubcommandArgs(usage string, s *subcommandArgs) int {
p := docopt.Parser{
SkipHelpFlags: true,
}
opts, err := p.ParseArgs(
"Usage: "+strings.Split(usage, "\n")[0],
s.Arg, "")
if err != nil {
fmt.Fprintln(s.Stderr, err)
return 1
}
s.SubcommandOptions = opts
return 0
}
func parseDuration(o *docopt.Opts, name string, d *time.Duration) error {
durstr, err := o.String(name)
if err != nil {
return fmt.Errorf("option: %w", err)
}
if durstr == "" {
return errors.New("empty duration")
}
*d, err = time.ParseDuration(durstr)
if err != nil {
return fmt.Errorf("duration: %w", err)
}
return nil
}
| [
"\"S3_ENDPOINT\""
]
| []
| [
"S3_ENDPOINT"
]
| [] | ["S3_ENDPOINT"] | go | 1 | 0 | |
whatsnew/wsgi.py | """
WSGI config for whatsnew project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "whatsnew.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/loggers/test_tensorboard.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import Namespace
from unittest import mock
import pytest
import torch
import yaml
from omegaconf import OmegaConf
from packaging.version import Version
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
@RunIf(min_torch="1.5.0")
def test_tensorboard_hparams_reload(tmpdir):
class CustomModel(BoringModel):
def __init__(self, b1=0.5, b2=0.999):
super().__init__()
self.save_hyperparameters()
trainer = Trainer(max_steps=1, default_root_dir=tmpdir)
model = CustomModel()
assert trainer.log_dir == trainer.logger.log_dir
trainer.fit(model)
assert trainer.log_dir == trainer.logger.log_dir
folder_path = trainer.log_dir
# make sure yaml is there
with open(os.path.join(folder_path, "hparams.yaml")) as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
yaml_params = yaml.safe_load(file)
assert yaml_params["b1"] == 0.5
assert yaml_params["b2"] == 0.999
assert len(yaml_params.keys()) == 2
# verify artifacts
assert len(os.listdir(os.path.join(folder_path, "checkpoints"))) == 1
# verify tb logs
event_acc = EventAccumulator(folder_path)
event_acc.Reload()
data_pt_1_5 = b'\x12\x1b"\x04\n\x02b1"\x04\n\x02b2*\r\n\x0b\x12\thp_metric'
data_pt_1_6 = b'\x12\x1f"\x06\n\x02b1 \x03"\x06\n\x02b2 \x03*\r\n\x0b\x12\thp_metric'
hparams_data = data_pt_1_6 if Version(torch.__version__) >= Version("1.6.0") else data_pt_1_5
assert event_acc.summary_metadata['_hparams_/experiment'].plugin_data.plugin_name == 'hparams'
assert event_acc.summary_metadata['_hparams_/experiment'].plugin_data.content == hparams_data
def test_tensorboard_automatic_versioning(tmpdir):
"""Verify that automatic versioning works"""
root_dir = tmpdir / "tb_versioning"
root_dir.mkdir()
(root_dir / "version_0").mkdir()
(root_dir / "version_1").mkdir()
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning")
assert logger.version == 2
def test_tensorboard_manual_versioning(tmpdir):
"""Verify that manual versioning works"""
root_dir = tmpdir / "tb_versioning"
root_dir.mkdir()
(root_dir / "version_0").mkdir()
(root_dir / "version_1").mkdir()
(root_dir / "version_2").mkdir()
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning", version=1)
assert logger.version == 1
def test_tensorboard_named_version(tmpdir):
"""Verify that manual versioning works for string versions, e.g. '2020-02-05-162402' """
name = "tb_versioning"
(tmpdir / name).mkdir()
expected_version = "2020-02-05-162402"
logger = TensorBoardLogger(save_dir=tmpdir, name=name, version=expected_version)
logger.log_hyperparams({"a": 1, "b": 2, 123: 3, 3.5: 4, 5j: 5}) # Force data to be written
assert logger.version == expected_version
assert os.listdir(tmpdir / name) == [expected_version]
assert os.listdir(tmpdir / name / expected_version)
@pytest.mark.parametrize("name", ["", None])
def test_tensorboard_no_name(tmpdir, name):
"""Verify that None or empty name works"""
logger = TensorBoardLogger(save_dir=tmpdir, name=name)
logger.log_hyperparams({"a": 1, "b": 2, 123: 3, 3.5: 4, 5j: 5}) # Force data to be written
assert logger.root_dir == tmpdir
assert os.listdir(tmpdir / "version_0")
def test_tensorboard_log_sub_dir(tmpdir):
class TestLogger(TensorBoardLogger):
# for reproducibility
@property
def version(self):
return "version"
@property
def name(self):
return "name"
trainer_args = dict(
default_root_dir=tmpdir,
max_steps=1,
)
# no sub_dir specified
save_dir = tmpdir / "logs"
logger = TestLogger(save_dir)
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(save_dir, "name", "version")
# sub_dir specified
logger = TestLogger(save_dir, sub_dir="sub_dir")
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(save_dir, "name", "version", "sub_dir")
# test home dir (`~`) handling
save_dir = "~/tmp"
explicit_save_dir = os.path.expanduser(save_dir)
logger = TestLogger(save_dir, sub_dir="sub_dir")
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(explicit_save_dir, "name", "version", "sub_dir")
# test env var (`$`) handling
test_env_dir = "some_directory"
os.environ["test_env_dir"] = test_env_dir
save_dir = "$test_env_dir/tmp"
explicit_save_dir = f"{test_env_dir}/tmp"
logger = TestLogger(save_dir, sub_dir="sub_dir")
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(explicit_save_dir, "name", "version", "sub_dir")
@pytest.mark.parametrize("step_idx", [10, None])
def test_tensorboard_log_metrics(tmpdir, step_idx):
logger = TensorBoardLogger(tmpdir)
metrics = {
"float": 0.3,
"int": 1,
"FloatTensor": torch.tensor(0.1),
"IntTensor": torch.tensor(1),
}
logger.log_metrics(metrics, step_idx)
def test_tensorboard_log_hyperparams(tmpdir):
logger = TensorBoardLogger(tmpdir)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {
"a": {
"b": "c"
}
},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar="buzz")),
"layer": torch.nn.BatchNorm1d,
}
logger.log_hyperparams(hparams)
def test_tensorboard_log_hparams_and_metrics(tmpdir):
logger = TensorBoardLogger(tmpdir, default_hp_metric=False)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {
"a": {
"b": "c"
}
},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar="buzz")),
"layer": torch.nn.BatchNorm1d,
}
metrics = {"abc": torch.tensor([0.54])}
logger.log_hyperparams(hparams, metrics)
def test_tensorboard_log_omegaconf_hparams_and_metrics(tmpdir):
logger = TensorBoardLogger(tmpdir, default_hp_metric=False)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {
"a": {
"b": "c"
}
},
"list": [1, 2, 3],
# "namespace": Namespace(foo=Namespace(bar="buzz")),
# "layer": torch.nn.BatchNorm1d,
}
hparams = OmegaConf.create(hparams)
metrics = {"abc": torch.tensor([0.54])}
logger.log_hyperparams(hparams, metrics)
@pytest.mark.parametrize("example_input_array", [None, torch.rand(2, 32)])
def test_tensorboard_log_graph(tmpdir, example_input_array):
""" test that log graph works with both model.example_input_array and
if array is passed externaly
"""
model = BoringModel()
if example_input_array is not None:
model.example_input_array = None
logger = TensorBoardLogger(tmpdir, log_graph=True)
logger.log_graph(model, example_input_array)
def test_tensorboard_log_graph_warning_no_example_input_array(tmpdir):
""" test that log graph throws warning if model.example_input_array is None """
model = BoringModel()
model.example_input_array = None
logger = TensorBoardLogger(tmpdir, log_graph=True)
with pytest.warns(
UserWarning,
match='Could not log computational graph since the `model.example_input_array`'
' attribute is not set or `input_array` was not given'
):
logger.log_graph(model)
@mock.patch('pytorch_lightning.loggers.TensorBoardLogger.log_metrics')
def test_tensorboard_with_accummulated_gradients(mock_log_metrics, tmpdir):
"""Tests to ensure that tensorboard log properly when accumulated_gradients > 1"""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.indexes = []
def training_step(self, *args):
self.log('foo', 1, on_step=True, on_epoch=True)
if not self.trainer.train_loop.should_accumulate():
if self.trainer.logger_connector.should_update_logs:
self.indexes.append(self.trainer.global_step)
return super().training_step(*args)
model = TestModel()
model.training_epoch_end = None
logger_0 = TensorBoardLogger(tmpdir, default_hp_metric=False)
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=12,
limit_val_batches=0,
max_epochs=3,
accumulate_grad_batches=2,
logger=[logger_0],
log_every_n_steps=3,
)
trainer.fit(model)
calls = [m[2] for m in mock_log_metrics.mock_calls]
count_epochs = [c["step"] for c in calls if "foo_epoch" in c["metrics"]]
assert count_epochs == [5, 11, 17]
count_steps = [c["step"] for c in calls if "foo_step" in c["metrics"]]
assert count_steps == model.indexes
@mock.patch('pytorch_lightning.loggers.tensorboard.SummaryWriter')
def test_tensorboard_finalize(summary_writer, tmpdir):
""" Test that the SummaryWriter closes in finalize. """
logger = TensorBoardLogger(save_dir=tmpdir)
logger.finalize("any")
summary_writer().flush.assert_called()
summary_writer().close.assert_called()
def test_tensorboard_save_hparams_to_yaml_once(tmpdir):
model = BoringModel()
logger = TensorBoardLogger(save_dir=tmpdir, default_hp_metric=False)
trainer = Trainer(max_steps=1, default_root_dir=tmpdir, logger=logger)
assert trainer.log_dir == trainer.logger.log_dir
trainer.fit(model)
hparams_file = "hparams.yaml"
assert os.path.isfile(os.path.join(trainer.log_dir, hparams_file))
assert not os.path.isfile(os.path.join(tmpdir, hparams_file))
@mock.patch('pytorch_lightning.loggers.tensorboard.log')
def test_tensorboard_with_symlink(log, tmpdir):
"""
Tests a specific failure case when tensorboard logger is used with empty name, symbolic link ``save_dir``, and
relative paths.
"""
os.chdir(tmpdir) # need to use relative paths
source = os.path.join('.', 'lightning_logs')
dest = os.path.join('.', 'sym_lightning_logs')
os.makedirs(source, exist_ok=True)
os.symlink(source, dest)
logger = TensorBoardLogger(save_dir=dest, name='')
_ = logger.version
log.warning.assert_not_called()
| []
| []
| [
"test_env_dir"
]
| [] | ["test_env_dir"] | python | 1 | 0 | |
face.keypoints.py |
from __future__ import division
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
import numpy as np
import time
import os
import cv2
import kmodel
from utils import transparentOverlay
os.environ['KERAS_BACKEND'] = 'tensorflow'
print(tf.__version__)
config = tf.ConfigProto(log_device_placement=True, allow_soft_placement=True,
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.7))
# allow_growth=True per_process_gpu_memory_fraction = 0.3
#per_process_gpu_memory_fraction = 0.3
sess = tf.Session(config=config)
set_session(sess)
# os.environ['KMP_DUPLICATE_LIB_OK']='True'
# 加载预先训练好的模型
#my_model = kmodel.load_trained_model('yuan_model_mac')
# 加载自己训练好的模型(测试时取消下面行的注释)
my_model = kmodel.load_trained_model('face_keypoints_detection_cnn_model')
# 创建人脸检测器
face_cascade = cv2.CascadeClassifier(
'cascades/haarcascade_frontalface_default.xml')
#smileCascade = cv2.CascadeClassifier('cascades/haarcascade_smile.xml')
# 加载摄像头
camera = cv2.VideoCapture(0)
# 加载一个太阳眼镜图像
sunglasses = cv2.imread('sunglass.png', cv2.IMREAD_UNCHANGED)
# 死循环
while True:
# time.sleep(0.01)
# 从摄像头获取一张图像
(_, frame) = camera.read()
frame = cv2.flip(frame, 1)
frame2 = np.copy(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 检测所有的人脸
faces = face_cascade.detectMultiScale(gray, 1.25, 6)
# 对每一个检测到的人脸
for (x, y, w, h) in faces:
# 只包含人脸的图像
gray_face = gray[y:y+h, x:x+w]
color_face = frame[y:y+h, x:x+w]
# 将人脸图像的值 normalize 在 [0, 1] 之间
gray_normalized = gray_face / 255
# 缩放灰度图人脸到 96x96 匹配网络的输入
original_shape = gray_face.shape # A Copy for future reference
face_resized = cv2.resize(
gray_normalized, (96, 96), interpolation=cv2.INTER_AREA)
face_resized = face_resized.reshape(1, 96, 96, 1)
# 预测关键点坐标
keypoints = my_model.predict(face_resized)
# 将关键点坐标的值从 [-1, 1] 之间转换为 [0, 96] 之间
keypoints = keypoints * 48 + 48
# 缩放彩色图人脸到 96x96 匹配关键点
face_resized_color = cv2.resize(
color_face, (96, 96), interpolation=cv2.INTER_AREA)
face_resized_color2 = np.copy(face_resized_color)
# 将网络输出的30个值配对为15个tuple对
points = []
for i, co in enumerate(keypoints[0][0::2]):
points.append((co, keypoints[0][1::2][i]))
# 按照关键点的 left_eyebrow_outer_end_x[7], right_eyebrow_outer_end_x[9]确定眼镜的宽度
sunglass_width = int((points[7][0]-points[9][0])*1.1)
# 按照关键点的 nose_tip_y[10], right_eyebrow_inner_end_y[8]确定眼镜的高度
sunglass_height = int((points[10][1]-points[8][1])/1.1)
sunglass_resized = cv2.resize(
sunglasses, (sunglass_width, sunglass_height), interpolation=cv2.INTER_CUBIC)
face_resized_color = transparentOverlay(face_resized_color, sunglass_resized, pos=(
int(points[9][0]), int(points[9][1])), scale=1)
# 将覆盖了眼镜的 face_resized_color 图像转为摄像头捕捉到的原始图像中的大小
frame[y:y+h, x:x+w] = cv2.resize(face_resized_color,
original_shape, interpolation=cv2.INTER_CUBIC)
# 在人脸图像中显示关键点坐标
for keypoint in points:
cv2.circle(face_resized_color2, keypoint, 1, (0, 255, 0), 1)
frame2[y:y+h, x:x+w] = cv2.resize(face_resized_color2,
original_shape, interpolation=cv2.INTER_CUBIC)
# 显示加了眼镜的图像
cv2.imshow("With Glass", frame)
# 显示添加了关键点的图像
cv2.imshow("With Keypoints", frame2)
# 当 'q' 键被点击, 退出循环
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# 释放摄像头, 关闭窗口
camera.release()
cv2.destroyAllWindows()
| []
| []
| [
"KERAS_BACKEND",
"KMP_DUPLICATE_LIB_OK"
]
| [] | ["KERAS_BACKEND", "KMP_DUPLICATE_LIB_OK"] | python | 2 | 0 | |
src/test/java/org/zaproxy/zap/extension/hud/ui/browser/tutorial/TutorialStatics.java | /*
* Zed Attack Proxy (ZAP) and its related class files.
*
* ZAP is an HTTP/HTTPS proxy for assessing web application security.
*
* Copyright 2018 The ZAP Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.zaproxy.zap.extension.hud.ui.browser.tutorial;
import org.junit.jupiter.api.Tag;
import org.openqa.selenium.By;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
@Tag("tutorial")
public class TutorialStatics {
private static final String DEFAULT_TUTORIAL_HOST_PORT = "localhost:9998";
private static String tutorialHostPort = null;
public static String NEXT_BUTTON_PREFIX = "Next:";
public static String PREVIOUS_BUTTON_PREFIX = "Previous:";
public static By NEXT_BUTTON_BY_ID = By.id("next-button");
public static By PREVIOUS_BUTTON_BY_ID = By.id("previous-button");
public static int ALERT_LOOP_COUNT = 10;
private static String getTutorialHostPort() {
if (tutorialHostPort == null) {
tutorialHostPort = System.getenv().get("ZAP_HUD_TUTORIAL");
if (tutorialHostPort == null) {
tutorialHostPort = DEFAULT_TUTORIAL_HOST_PORT;
}
}
return tutorialHostPort;
}
public static String getTutorialUrl() {
return "http://" + getTutorialHostPort();
}
public static String getTutorialUrl(String page) {
return getTutorialUrl() + "/" + page;
}
public static String getTutorialHudUrl() {
return "https://" + getTutorialHostPort();
}
public static String getTutorialHudUrl(String page) {
return getTutorialHudUrl() + "/" + page;
}
public static WebElement getNextButton(WebDriver wd) {
return wd.findElement(NEXT_BUTTON_BY_ID);
}
public static WebElement getPreviousButton(WebDriver wd) {
return wd.findElement(PREVIOUS_BUTTON_BY_ID);
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
tests/test_bootstrap.py | """Test the bootstrapping."""
# pylint: disable=protected-access
import asyncio
import logging
import os
from unittest.mock import Mock, patch
from homeassistant import bootstrap
import homeassistant.config as config_util
import homeassistant.util.dt as dt_util
from tests.common import (
MockModule,
get_test_config_dir,
mock_coro,
mock_integration,
patch_yaml_files,
)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
_LOGGER = logging.getLogger(__name__)
# prevent .HA_VERSION file from being written
@patch("homeassistant.bootstrap.conf_util.process_ha_config_upgrade", Mock())
@patch(
"homeassistant.util.location.async_detect_location_info",
Mock(return_value=mock_coro(None)),
)
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
@patch("homeassistant.bootstrap.async_enable_logging", Mock(return_value=True))
def test_from_config_file(hass):
"""Test with configuration file."""
components = set(["browser", "conversation", "script"])
files = {"config.yaml": "".join("{}:\n".format(comp) for comp in components)}
with patch_yaml_files(files, True):
yield from bootstrap.async_from_config_file("config.yaml", hass)
assert components == hass.config.components
@patch("homeassistant.bootstrap.async_enable_logging", Mock())
@asyncio.coroutine
def test_home_assistant_core_config_validation(hass):
"""Test if we pass in wrong information for HA conf."""
# Extensive HA conf validation testing is done
result = yield from bootstrap.async_from_config_dict(
{"homeassistant": {"latitude": "some string"}}, hass
)
assert result is None
async def test_async_from_config_file_not_mount_deps_folder(loop):
"""Test that we not mount the deps folder inside async_from_config_file."""
hass = Mock(async_add_executor_job=Mock(side_effect=lambda *args: mock_coro()))
with patch("homeassistant.bootstrap.is_virtual_env", return_value=False), patch(
"homeassistant.bootstrap.async_enable_logging", return_value=mock_coro()
), patch(
"homeassistant.bootstrap.async_mount_local_lib_path", return_value=mock_coro()
) as mock_mount, patch(
"homeassistant.bootstrap.async_from_config_dict", return_value=mock_coro()
):
await bootstrap.async_from_config_file("mock-path", hass)
assert len(mock_mount.mock_calls) == 1
with patch("homeassistant.bootstrap.is_virtual_env", return_value=True), patch(
"homeassistant.bootstrap.async_enable_logging", return_value=mock_coro()
), patch(
"homeassistant.bootstrap.async_mount_local_lib_path", return_value=mock_coro()
) as mock_mount, patch(
"homeassistant.bootstrap.async_from_config_dict", return_value=mock_coro()
):
await bootstrap.async_from_config_file("mock-path", hass)
assert len(mock_mount.mock_calls) == 0
async def test_load_hassio(hass):
"""Test that we load Hass.io component."""
with patch.dict(os.environ, {}, clear=True):
assert bootstrap._get_domains(hass, {}) == set()
with patch.dict(os.environ, {"HASSIO": "1"}):
assert bootstrap._get_domains(hass, {}) == {"hassio"}
async def test_empty_setup(hass):
"""Test an empty set up loads the core."""
await bootstrap._async_set_up_integrations(hass, {})
for domain in bootstrap.CORE_INTEGRATIONS:
assert domain in hass.config.components, domain
async def test_core_failure_aborts(hass, caplog):
"""Test failing core setup aborts further setup."""
with patch(
"homeassistant.components.homeassistant.async_setup",
return_value=mock_coro(False),
):
await bootstrap._async_set_up_integrations(hass, {"group": {}})
assert "core failed to initialize" in caplog.text
# We aborted early, group not set up
assert "group" not in hass.config.components
async def test_setting_up_config(hass, caplog):
"""Test we set up domains in config."""
await bootstrap._async_set_up_integrations(
hass, {"group hello": {}, "homeassistant": {}}
)
assert "group" in hass.config.components
async def test_setup_after_deps_all_present(hass, caplog):
"""Test after_dependencies when all present."""
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}}
)
assert "root" in hass.config.components
assert "first_dep" in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "first_dep", "second_dep"]
async def test_setup_after_deps_not_trigger_load(hass, caplog):
"""Test after_dependencies does not trigger loading it."""
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(hass, {"root": {}, "second_dep": {}})
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "second_dep"]
async def test_setup_after_deps_not_present(hass, caplog):
"""Test after_dependencies when referenced integration doesn't exist."""
caplog.set_level(logging.DEBUG)
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}}
)
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "second_dep"]
| []
| []
| []
| [] | [] | python | 0 | 0 | |
aws_saml_login/__init__.py | from aws_saml_login.saml import authenticate, assume_role, write_aws_credentials, get_boto3_session # noqa
__version__ = '0.12'
| []
| []
| []
| [] | [] | python | null | null | null |
setup.py | import json, marshal, os, ntpath, shutil
from setuptools import setup, Extension
import distutils
import sys
from shutil import copyfile
import distutils.dir_util
from distutils.core import Command
###################################################################################################
#
# Setup for NREL-PySAM Package
#
###################################################################################################
latest_version = '2.0.2'
# determine if making PyPi or Conda distribution
distclass = distutils.core.Distribution
if sys.argv[1] == "bdist_conda":
import distutils.command.bdist_conda
distclass = distutils.command.bdist_conda.CondaDistribution
# defaults and include directories
defaults_dir = os.environ['SAMNTDIR']+"/api/api_autogen/library/defaults/"
includepath = os.environ['SAMNTDIR']+"/api/include"
srcpath = os.environ['SAMNTDIR']+"/api/src"
this_directory = os.path.abspath(os.path.dirname(__file__))
libpath = this_directory+"/files"
# prepare package description
with open(os.path.join(this_directory, 'RELEASE.md'), encoding='utf-8') as f:
long_description = f.read()
# prepare package
libs = []
libfiles = []
extra_compile_args = ["-Wno-implicit-function-declaration", "-Wno-unused-function", "-Wno-strict-prototypes"]
extra_link_args = []
defines = []
if sys.platform == 'darwin':
from distutils import sysconfig
vars = sysconfig.get_config_vars()
vars['LDSHARED'] = vars['LDSHARED'].replace('-bundle', '-dynamiclib')
libs = ['SAM_api', 'ssc']
libfiles = ['libSAM_api.so', 'libssc.so']
extra_link_args = ["-Wl,-rpath,@loader_path/"]
extra_compile_args.append("-Wno-ignored-attributes")
if sys.platform == 'linux':
libs = ['SAM_api', 'ssc']
libfiles = ['libSAM_api.so', 'libssc.so']
extra_link_args = ["-Wl,-rpath,$ORIGIN/"]
extra_compile_args.append('-Wno-attributes')
if sys.platform == 'win32':
libs = ['SAM_api', 'ssc']
libfiles = ['SAM_api.dll', 'ssc.dll', 'SAM_api.lib', 'ssc.lib']
defines = [('__WINDOWS__', '1')]
extra_compile_args = []
###################################################################################################
#
# Copy Required Source and Data Files
#
###################################################################################################
# dynamic library files should be exported to pysam/files by post-build step of each library but copy over headers
distutils.dir_util.copy_tree(
includepath,
this_directory+"/include",
update=1,
verbose=1,
)
for filename in os.listdir(srcpath):
name = os.path.splitext(filename)
if name[1] == ".h":
copyfile(os.path.join(srcpath, filename), os.path.join("src", filename))
# serialize all defaults into dict
def _decode(o):
if isinstance(o, str):
try:
return float(o)
except ValueError:
return o
elif isinstance(o, dict):
dic = {}
for k, v in o.items():
if k != "hybrid_dispatch_schedule" and k != "biopwr_plant_tou_grid":
dic[k] = _decode(v)
else:
dic[k] = v
return dic
elif isinstance(o, list):
return [_decode(v) for v in o]
else:
return o
shutil.rmtree('files/defaults')
os.mkdir('files/defaults')
# generate defaults and copy them into installation
for filename in os.listdir(defaults_dir):
with open(defaults_dir + '/' + filename) as f:
name = os.path.splitext(filename)
if name[1] != '.json':
continue
data = json.load(f)
dic = data[list(data.keys())[0]]
with open('files/defaults/' + name[0] + '.df', "wb") as out:
marshal.dump(dic, out)
for filename in os.listdir(defaults_dir):
libfiles.append('defaults/' + os.path.splitext(filename)[0] + '.df')
# make list of all extension modules
extension_modules = [Extension('PySAM.AdjustmentFactors',
['src/AdjustmentFactors.c'],
define_macros=defines,
include_dirs=["include", "src"],
library_dirs=[libpath],
libraries=libs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args
)]
for filename in os.listdir(this_directory+"/modules"):
extension_modules.append(Extension('PySAM.' + os.path.splitext(filename)[0],
['modules/' + filename],
define_macros=defines,
include_dirs=["include", "src"],
library_dirs=[libpath],
libraries=libs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args
))
# function to rename macosx distribution for Python 3.7 to be minimum version of 10.12 instead of 10.14
class PostProcess(Command):
description = "rename macosx distribution for Python 3.7 to be minimum version of 10.12 instead of 10.14"
user_options = []
def initialize_options(self):
self.cwd = None
def finalize_options(self):
self.cwd = os.getcwd()
def run(self):
assert os.getcwd() == self.cwd, 'Must be in package root: %s' % self.cwd
name = "NREL_PySAM-" + latest_version + "-" + "cp37-cp37m-macosx_10_14_x86_64.whl"
newname = "NREL_PySAM-" + latest_version + "-" + "cp37-cp37m-macosx_10_12_x86_64.whl"
os.system('mv ./dist/' + name + ' ./dist/' + newname)
###################################################################################################
#
# setup script
#
###################################################################################################
setup(
name='NREL-PySAM',
version=latest_version,
distclass=distclass,
url='https://pysam-docs.readthedocs.io',
description="National Renewable Energy Laboratory's System Advisor Model Python Wrapper",
long_description=long_description,
long_description_content_type='text/markdown',
license='BSD 3-Clause',
author="dguittet",
author_email="[email protected]",
include_package_data=True,
packages=['PySAM'],
package_dir={'PySAM': 'files'},
package_data={
'': libfiles},
install_requires=['NREL-PySAM-stubs'],
setup_requires=["pytest-runner"],
tests_require=["pytest"],
cmdclass={
'post': PostProcess
},
ext_modules=extension_modules
)
| []
| []
| [
"SAMNTDIR"
]
| [] | ["SAMNTDIR"] | python | 1 | 0 | |
config/config.go | // Copyright 2014 The Cayley Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"encoding/json"
"flag"
"os"
"github.com/barakmich/glog"
)
type Config struct {
DatabaseType string `json:"database"`
DatabasePath string `json:"db_path"`
DatabaseOptions map[string]interface{} `json:"db_options"`
ListenHost string `json:"listen_host"`
ListenPort string `json:"listen_port"`
ReadOnly bool `json:"read_only"`
GremlinTimeout int `json:"gremlin_timeout"`
LoadSize int `json:"load_size"`
}
var databasePath = flag.String("dbpath", "/tmp/testdb", "Path to the database.")
var databaseBackend = flag.String("db", "mem", "Database Backend.")
var host = flag.String("host", "0.0.0.0", "Host to listen on (defaults to all).")
var loadSize = flag.Int("load_size", 10000, "Size of triplesets to load")
var port = flag.String("port", "64210", "Port to listen on.")
var readOnly = flag.Bool("read_only", false, "Disable writing via HTTP.")
var gremlinTimeout = flag.Int("gremlin_timeout", 30, "Number of seconds until an individual query times out.")
func ParseConfigFromFile(filename string) *Config {
config := &Config{}
if filename == "" {
return config
}
f, err := os.Open(filename)
if err != nil {
glog.Fatalln("Couldn't open config file", filename)
}
defer f.Close()
dec := json.NewDecoder(f)
err = dec.Decode(config)
if err != nil {
glog.Fatalln("Couldn't read config file:", err)
}
return config
}
func ParseConfigFromFlagsAndFile(fileFlag string) *Config {
// Find the file...
var trueFilename string
if fileFlag != "" {
if _, err := os.Stat(fileFlag); os.IsNotExist(err) {
glog.Fatalln("Cannot find specified configuration file", fileFlag, ", aborting.")
} else {
trueFilename = fileFlag
}
} else {
if _, err := os.Stat(os.Getenv("CAYLEY_CFG")); err == nil {
trueFilename = os.Getenv("CAYLEY_CFG")
} else {
if _, err := os.Stat("/etc/cayley.cfg"); err == nil {
trueFilename = "/etc/cayley.cfg"
}
}
}
if trueFilename == "" {
glog.Infoln("Couldn't find a config file in either $CAYLEY_CFG or /etc/cayley.cfg. Going by flag defaults only.")
}
config := ParseConfigFromFile(trueFilename)
if config.DatabasePath == "" {
config.DatabasePath = *databasePath
}
if config.DatabaseType == "" {
config.DatabaseType = *databaseBackend
}
if config.ListenHost == "" {
config.ListenHost = *host
}
if config.ListenPort == "" {
config.ListenPort = *port
}
if config.GremlinTimeout == 0 {
config.GremlinTimeout = *gremlinTimeout
}
if config.LoadSize == 0 {
config.LoadSize = *loadSize
}
config.ReadOnly = config.ReadOnly || *readOnly
return config
}
| [
"\"CAYLEY_CFG\"",
"\"CAYLEY_CFG\""
]
| []
| [
"CAYLEY_CFG"
]
| [] | ["CAYLEY_CFG"] | go | 1 | 0 | |
pkg/operator/ceph/cluster/cephstatus.go | /*
Copyright 2019 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cluster to manage Kubernetes storage.
package cluster
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
"github.com/rook/rook/pkg/operator/ceph/config"
opcontroller "github.com/rook/rook/pkg/operator/ceph/controller"
"github.com/rook/rook/pkg/operator/ceph/reporting"
cephver "github.com/rook/rook/pkg/operator/ceph/version"
"github.com/rook/rook/pkg/operator/k8sutil"
v1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
// defaultStatusCheckInterval is the interval to check the status of the ceph cluster
defaultStatusCheckInterval = 60 * time.Second
)
// cephStatusChecker aggregates the mon/cluster info needed to check the health of the monitors
type cephStatusChecker struct {
context *clusterd.Context
clusterInfo *cephclient.ClusterInfo
interval *time.Duration
client client.Client
isExternal bool
}
// newCephStatusChecker creates a new HealthChecker object
func newCephStatusChecker(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, clusterSpec *cephv1.ClusterSpec) *cephStatusChecker {
c := &cephStatusChecker{
context: context,
clusterInfo: clusterInfo,
interval: &defaultStatusCheckInterval,
client: context.Client,
isExternal: clusterSpec.External.Enable,
}
// allow overriding the check interval with an env var on the operator
// Keep the existing behavior
var checkInterval *time.Duration
checkIntervalCRSetting := clusterSpec.HealthCheck.DaemonHealth.Status.Interval
checkIntervalEnv := os.Getenv("ROOK_CEPH_STATUS_CHECK_INTERVAL")
if checkIntervalEnv != "" {
if duration, err := time.ParseDuration(checkIntervalEnv); err == nil {
checkInterval = &duration
}
} else if checkIntervalCRSetting != nil {
checkInterval = &checkIntervalCRSetting.Duration
}
if checkInterval != nil {
logger.Infof("ceph status check interval is %s", checkInterval.String())
c.interval = checkInterval
}
return c
}
// checkCephStatus periodically checks the health of the cluster
func (c *cephStatusChecker) checkCephStatus(stopCh chan struct{}) {
// check the status immediately before starting the loop
c.checkStatus()
for {
select {
case <-stopCh:
logger.Infof("stopping monitoring of ceph status")
return
case <-time.After(*c.interval):
c.checkStatus()
}
}
}
// checkStatus queries the status of ceph health then updates the CR status
func (c *cephStatusChecker) checkStatus() {
var status cephclient.CephStatus
var err error
logger.Debugf("checking health of cluster")
condition := cephv1.ConditionReady
reason := cephv1.ClusterCreatedReason
if c.isExternal {
condition = cephv1.ConditionConnected
reason = cephv1.ClusterConnectedReason
}
// Check ceph's status
status, err = cephclient.StatusWithUser(c.context, c.clusterInfo)
if err != nil {
if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) {
logger.Info("skipping ceph status since operator is still initializing")
return
}
logger.Errorf("failed to get ceph status. %v", err)
message := "Failed to configure ceph cluster"
if c.isExternal {
message = "Failed to configure external ceph cluster"
}
status := cephStatusOnError(err.Error())
c.updateCephStatus(status, condition, reason, message, v1.ConditionFalse)
return
}
logger.Debugf("cluster status: %+v", status)
message := "Cluster created successfully"
if c.isExternal {
message = "Cluster connected successfully"
}
c.updateCephStatus(&status, condition, reason, message, v1.ConditionTrue)
if status.Health.Status != "HEALTH_OK" {
logger.Debug("checking for stuck pods on not ready nodes")
if err := c.forceDeleteStuckRookPodsOnNotReadyNodes(); err != nil {
logger.Errorf("failed to delete pod on not ready nodes. %v", err)
}
}
c.configureHealthSettings(status)
}
func (c *cephStatusChecker) configureHealthSettings(status cephclient.CephStatus) {
// loop through the health codes and log what we find
for healthCode, check := range status.Health.Checks {
logger.Debugf("Health: %q, code: %q, message: %q", check.Severity, healthCode, check.Summary.Message)
}
// disable the insecure global id if there are no old clients
if _, ok := status.Health.Checks["AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED"]; ok {
if _, ok := status.Health.Checks["AUTH_INSECURE_GLOBAL_ID_RECLAIM"]; !ok {
logger.Info("Disabling the insecure global ID as no legacy clients are currently connected. If you still require the insecure connections, see the CVE to suppress the health warning and re-enable the insecure connections. https://docs.ceph.com/en/latest/security/CVE-2021-20288/")
monStore := config.GetMonStore(c.context, c.clusterInfo)
if err := monStore.Set("mon", "auth_allow_insecure_global_id_reclaim", "false"); err != nil {
logger.Warningf("failed to disable the insecure global ID. %v", err)
} else {
logger.Info("insecure global ID is now disabled")
}
} else {
logger.Warning("insecure clients are connected to the cluster, to resolve the AUTH_INSECURE_GLOBAL_ID_RECLAIM health warning please refer to the upgrade guide to ensure all Ceph daemons are updated.")
}
}
}
// updateStatus updates an object with a given status
func (c *cephStatusChecker) updateCephStatus(status *cephclient.CephStatus, condition cephv1.ConditionType, reason cephv1.ConditionReason, message string, conditionStatus v1.ConditionStatus) {
clusterName := c.clusterInfo.NamespacedName()
cephCluster, err := c.context.RookClientset.CephV1().CephClusters(clusterName.Namespace).Get(context.TODO(), clusterName.Name, metav1.GetOptions{})
if err != nil {
if kerrors.IsNotFound(err) {
logger.Debug("CephCluster resource not found. Ignoring since object must be deleted.")
return
}
logger.Errorf("failed to retrieve ceph cluster %q in namespace %q to update status to %+v", clusterName.Name, clusterName.Namespace, status)
return
}
// Update with Ceph Status
cephCluster.Status.CephStatus = toCustomResourceStatus(cephCluster.Status, status)
// versions store the ceph version of all the ceph daemons and overall cluster version
versions, err := cephclient.GetAllCephDaemonVersions(c.context, c.clusterInfo)
if err != nil {
logger.Errorf("failed to get ceph daemons versions. %v", err)
} else {
// Update status with Ceph versions
cephCluster.Status.CephStatus.Versions = versions
}
// Update condition
logger.Debugf("updating ceph cluster %q status and condition to %+v, %v, %s, %s", clusterName.Namespace, status, conditionStatus, reason, message)
opcontroller.UpdateClusterCondition(c.context, cephCluster, c.clusterInfo.NamespacedName(), condition, conditionStatus, reason, message, true)
}
// toCustomResourceStatus converts the ceph status to the struct expected for the CephCluster CR status
func toCustomResourceStatus(currentStatus cephv1.ClusterStatus, newStatus *cephclient.CephStatus) *cephv1.CephStatus {
s := &cephv1.CephStatus{
Health: newStatus.Health.Status,
LastChecked: formatTime(time.Now().UTC()),
Details: make(map[string]cephv1.CephHealthMessage),
}
for name, message := range newStatus.Health.Checks {
s.Details[name] = cephv1.CephHealthMessage{
Severity: message.Severity,
Message: message.Summary.Message,
}
}
if newStatus.PgMap.TotalBytes != 0 {
s.Capacity.TotalBytes = newStatus.PgMap.TotalBytes
s.Capacity.UsedBytes = newStatus.PgMap.UsedBytes
s.Capacity.AvailableBytes = newStatus.PgMap.AvailableBytes
s.Capacity.LastUpdated = formatTime(time.Now().UTC())
}
if currentStatus.CephStatus != nil {
s.PreviousHealth = currentStatus.CephStatus.PreviousHealth
s.LastChanged = currentStatus.CephStatus.LastChanged
if currentStatus.CephStatus.Health != s.Health {
s.PreviousHealth = currentStatus.CephStatus.Health
s.LastChanged = s.LastChecked
}
if newStatus.PgMap.TotalBytes == 0 {
s.Capacity = currentStatus.CephStatus.Capacity
}
}
return s
}
func formatTime(t time.Time) string {
return t.Format(time.RFC3339)
}
func (c *ClusterController) updateClusterCephVersion(image string, cephVersion cephver.CephVersion) {
ctx := context.TODO()
logger.Infof("cluster %q: version %q detected for image %q", c.namespacedName.Namespace, cephVersion.String(), image)
cephCluster, err := c.context.RookClientset.CephV1().CephClusters(c.namespacedName.Namespace).Get(ctx, c.namespacedName.Name, metav1.GetOptions{})
if err != nil {
if kerrors.IsNotFound(err) {
logger.Debug("CephCluster resource not found. Ignoring since object must be deleted.")
return
}
logger.Errorf("failed to retrieve ceph cluster %q to update ceph version to %+v. %v", c.namespacedName.Name, cephVersion, err)
return
}
cephClusterVersion := &cephv1.ClusterVersion{
Image: image,
Version: opcontroller.GetCephVersionLabel(cephVersion),
}
// update the Ceph version on the retrieved cluster object
// do not overwrite the ceph status that is updated in a separate goroutine
cephCluster.Status.CephVersion = cephClusterVersion
if err := reporting.UpdateStatus(c.client, cephCluster); err != nil {
logger.Errorf("failed to update cluster %q version. %v", c.namespacedName.Name, err)
return
}
}
func cephStatusOnError(errorMessage string) *cephclient.CephStatus {
details := make(map[string]cephclient.CheckMessage)
details["error"] = cephclient.CheckMessage{
Severity: "Urgent",
Summary: cephclient.Summary{
Message: errorMessage,
},
}
return &cephclient.CephStatus{
Health: cephclient.HealthStatus{
Status: "HEALTH_ERR",
Checks: details,
},
}
}
// forceDeleteStuckPodsOnNotReadyNodes lists all the nodes that are in NotReady state and
// gets all the pods on the failed node and force delete the pods stuck in terminating state.
func (c *cephStatusChecker) forceDeleteStuckRookPodsOnNotReadyNodes() error {
nodes, err := k8sutil.GetNotReadyKubernetesNodes(c.context.Clientset)
if err != nil {
return errors.Wrap(err, "failed to get NotReady nodes")
}
for _, node := range nodes {
pods, err := c.getRookPodsOnNode(node.Name)
if err != nil {
logger.Errorf("failed to get pods on NotReady node %q. %v", node.Name, err)
}
for _, pod := range pods {
if err := k8sutil.ForceDeletePodIfStuck(c.context, pod); err != nil {
logger.Warningf("skipping forced delete of stuck pod %q. %v", pod.Name, err)
}
}
}
return nil
}
func (c *cephStatusChecker) getRookPodsOnNode(node string) ([]v1.Pod, error) {
clusterName := c.clusterInfo.NamespacedName()
appLabels := []string{
"csi-rbdplugin-provisioner",
"csi-rbdplugin",
"csi-cephfsplugin-provisioner",
"csi-cephfsplugin",
"rook-ceph-operator",
"rook-ceph-mon",
"rook-ceph-osd",
"rook-ceph-crashcollector",
"rook-ceph-mgr",
"rook-ceph-mds",
"rook-ceph-rgw",
}
podsOnNode := []v1.Pod{}
listOpts := metav1.ListOptions{
FieldSelector: fmt.Sprintf("spec.nodeName=%s", node),
}
pods, err := c.context.Clientset.CoreV1().Pods(clusterName.Namespace).List(context.TODO(), listOpts)
if err != nil {
return podsOnNode, errors.Wrapf(err, "failed to get pods on node %q", node)
}
for _, pod := range pods.Items {
for _, label := range appLabels {
if pod.Labels["app"] == label {
podsOnNode = append(podsOnNode, pod)
break
}
}
}
return podsOnNode, nil
}
| [
"\"ROOK_CEPH_STATUS_CHECK_INTERVAL\""
]
| []
| [
"ROOK_CEPH_STATUS_CHECK_INTERVAL"
]
| [] | ["ROOK_CEPH_STATUS_CHECK_INTERVAL"] | go | 1 | 0 | |
analytics-service/home-hits/store/update.go | package store
import (
"fmt"
"os"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
// IncomingEvent represents struct for the expected SQS event data
type IncomingEvent struct {
ConnectionID string
CurrentPage string
PreviousPage string
Refreshed bool
EventType string
Referrer string
}
// getClient creates a dynamodb client to connect DynamoDB to acsess the datastore
func getClient() (*dynamodb.DynamoDB, error) {
sess, err := session.NewSession(&aws.Config{
Region: aws.String(os.Getenv("TABLE_REGION")),
})
if err != nil {
return nil, fmt.Errorf("Could not create a new session: %v", err)
}
return dynamodb.New(sess), nil
}
// UpdateTable updates the table with the given value from SQS event
func UpdateTable(data IncomingEvent) error {
client, err := getClient()
if err != nil {
return fmt.Errorf("Error: %v", err)
}
if data.EventType == "homepage_view" && data.ConnectionID != "" {
var uniqueCount int
if !data.Refreshed {
uniqueCount = 1
}
input := &dynamodb.UpdateItemInput{
ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
":totalCount": {
N: aws.String(strconv.Itoa(1)),
},
":uniqueCount": {
N: aws.String(strconv.Itoa(uniqueCount)),
},
},
TableName: aws.String(os.Getenv("TABLE_NAME")),
Key: map[string]*dynamodb.AttributeValue{
"pageName": {
S: aws.String("Home_Page"),
},
},
ReturnValues: aws.String("UPDATED_NEW"),
UpdateExpression: aws.String("ADD uniqueViews :uniqueCount, totalViews :totalCount"),
}
response, err := client.UpdateItem(input)
if err != nil {
return fmt.Errorf("Could not update table item: %v", err)
}
fmt.Println("Home_Page item updated", response)
}
return nil
}
| [
"\"TABLE_REGION\"",
"\"TABLE_NAME\""
]
| []
| [
"TABLE_NAME",
"TABLE_REGION"
]
| [] | ["TABLE_NAME", "TABLE_REGION"] | go | 2 | 0 | |
references/detection/utils.py | import datetime
import errno
import os
import time
from collections import defaultdict, deque
import torch
import torch.distributed as dist
class SmoothedValue:
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value
)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
data_list = [None] * world_size
dist.all_gather_object(data_list, data)
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.inference_mode():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger:
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'")
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(f"{name}: {str(meter)}")
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"{header} Total time: {total_time_str} ({total_time / len(iterable):.4f} s / it)")
def collate_fn(batch):
return tuple(zip(*batch))
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True)
torch.distributed.init_process_group(
backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank
)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
| []
| []
| [
"LOCAL_RANK",
"WORLD_SIZE",
"SLURM_PROCID",
"RANK"
]
| [] | ["LOCAL_RANK", "WORLD_SIZE", "SLURM_PROCID", "RANK"] | python | 4 | 0 | |
setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#!/usr/bin/env python
import glob
import os
import torch
from setuptools import find_packages
from setuptools import setup
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "maskrcnn_benchmark", "csrc")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1":
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"maskrcnn_benchmark._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="maskrcnn_benchmark",
version="0.1",
author="fmassa",
url="https://github.com/facebookresearch/maskrcnn-benchmark",
description="object detection in pytorch",
packages=find_packages(exclude=("configs", "tests",)),
# install_requires=requirements,
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
) | []
| []
| [
"FORCE_CUDA"
]
| [] | ["FORCE_CUDA"] | python | 1 | 0 | |
src/net/http/fs_test.go | // Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net"
. "net/http"
"net/http/httptest"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strings"
"testing"
"time"
)
const (
testFile = "testdata/file"
testFileLen = 11
)
type wantRange struct {
start, end int64 // range [start,end)
}
var ServeFileRangeTests = []struct {
r string
code int
ranges []wantRange
}{
{r: "", code: StatusOK},
{r: "bytes=0-4", code: StatusPartialContent, ranges: []wantRange{{0, 5}}},
{r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}},
{r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}},
{r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}},
{r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}},
{r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}},
{r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}},
{r: "bytes=5-1000", code: StatusPartialContent, ranges: []wantRange{{5, testFileLen}}},
{r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request
{r: "bytes=0-9", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen - 1}}},
{r: "bytes=0-10", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
{r: "bytes=0-11", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
{r: "bytes=10-11", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
{r: "bytes=10-", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
{r: "bytes=11-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=11-12", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=12-12", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=11-100", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=12-100", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=100-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=100-1000", code: StatusRequestedRangeNotSatisfiable},
}
func TestServeFile(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
var err error
file, err := ioutil.ReadFile(testFile)
if err != nil {
t.Fatal("reading file:", err)
}
// set up the Request (re-used for all tests)
var req Request
req.Header = make(Header)
if req.URL, err = url.Parse(ts.URL); err != nil {
t.Fatal("ParseURL:", err)
}
req.Method = "GET"
// straight GET
_, body := getBody(t, "straight get", req)
if !bytes.Equal(body, file) {
t.Fatalf("body mismatch: got %q, want %q", body, file)
}
// Range tests
Cases:
for _, rt := range ServeFileRangeTests {
if rt.r != "" {
req.Header.Set("Range", rt.r)
}
resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req)
if resp.StatusCode != rt.code {
t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code)
}
if rt.code == StatusRequestedRangeNotSatisfiable {
continue
}
wantContentRange := ""
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
}
cr := resp.Header.Get("Content-Range")
if cr != wantContentRange {
t.Errorf("range=%q: Content-Range = %q, want %q", rt.r, cr, wantContentRange)
}
ct := resp.Header.Get("Content-Type")
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
if strings.HasPrefix(ct, "multipart/byteranges") {
t.Errorf("range=%q content-type = %q; unexpected multipart/byteranges", rt.r, ct)
}
}
if len(rt.ranges) > 1 {
typ, params, err := mime.ParseMediaType(ct)
if err != nil {
t.Errorf("range=%q content-type = %q; %v", rt.r, ct, err)
continue
}
if typ != "multipart/byteranges" {
t.Errorf("range=%q content-type = %q; want multipart/byteranges", rt.r, typ)
continue
}
if params["boundary"] == "" {
t.Errorf("range=%q content-type = %q; lacks boundary", rt.r, ct)
continue
}
if g, w := resp.ContentLength, int64(len(body)); g != w {
t.Errorf("range=%q Content-Length = %d; want %d", rt.r, g, w)
continue
}
mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
for ri, rng := range rt.ranges {
part, err := mr.NextPart()
if err != nil {
t.Errorf("range=%q, reading part index %d: %v", rt.r, ri, err)
continue Cases
}
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
if g, w := part.Header.Get("Content-Range"), wantContentRange; g != w {
t.Errorf("range=%q: part Content-Range = %q; want %q", rt.r, g, w)
}
body, err := ioutil.ReadAll(part)
if err != nil {
t.Errorf("range=%q, reading part index %d body: %v", rt.r, ri, err)
continue Cases
}
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
}
_, err = mr.NextPart()
if err != io.EOF {
t.Errorf("range=%q; expected final error io.EOF; got %v", rt.r, err)
}
}
}
}
func TestServeFile_DotDot(t *testing.T) {
tests := []struct {
req string
wantStatus int
}{
{"/testdata/file", 200},
{"/../file", 400},
{"/..", 400},
{"/../", 400},
{"/../foo", 400},
{"/..\\foo", 400},
{"/file/a", 200},
{"/file/a..", 200},
{"/file/a/..", 400},
{"/file/a\\..", 400},
}
for _, tt := range tests {
req, err := ReadRequest(bufio.NewReader(strings.NewReader("GET " + tt.req + " HTTP/1.1\r\nHost: foo\r\n\r\n")))
if err != nil {
t.Errorf("bad request %q: %v", tt.req, err)
continue
}
rec := httptest.NewRecorder()
ServeFile(rec, req, "testdata/file")
if rec.Code != tt.wantStatus {
t.Errorf("for request %q, status = %d; want %d", tt.req, rec.Code, tt.wantStatus)
}
}
}
var fsRedirectTestData = []struct {
original, redirect string
}{
{"/test/index.html", "/test/"},
{"/test/testdata", "/test/testdata/"},
{"/test/testdata/file/", "/test/testdata/file"},
}
func TestFSRedirect(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir("."))))
defer ts.Close()
for _, data := range fsRedirectTestData {
res, err := Get(ts.URL + data.original)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if g, e := res.Request.URL.Path, data.redirect; g != e {
t.Errorf("redirect from %s: got %s, want %s", data.original, g, e)
}
}
}
type testFileSystem struct {
open func(name string) (File, error)
}
func (fs *testFileSystem) Open(name string) (File, error) {
return fs.open(name)
}
func TestFileServerCleans(t *testing.T) {
defer afterTest(t)
ch := make(chan string, 1)
fs := FileServer(&testFileSystem{func(name string) (File, error) {
ch <- name
return nil, errors.New("file does not exist")
}})
tests := []struct {
reqPath, openArg string
}{
{"/foo.txt", "/foo.txt"},
{"//foo.txt", "/foo.txt"},
{"/../foo.txt", "/foo.txt"},
}
req, _ := NewRequest("GET", "http://example.com", nil)
for n, test := range tests {
rec := httptest.NewRecorder()
req.URL.Path = test.reqPath
fs.ServeHTTP(rec, req)
if got := <-ch; got != test.openArg {
t.Errorf("test %d: got %q, want %q", n, got, test.openArg)
}
}
}
func TestFileServerEscapesNames(t *testing.T) {
defer afterTest(t)
const dirListPrefix = "<pre>\n"
const dirListSuffix = "\n</pre>\n"
tests := []struct {
name, escaped string
}{
{`simple_name`, `<a href="simple_name">simple_name</a>`},
{`"'<>&`, `<a href="%22%27%3C%3E&">"'<>&</a>`},
{`?foo=bar#baz`, `<a href="%3Ffoo=bar%23baz">?foo=bar#baz</a>`},
{`<combo>?foo`, `<a href="%3Ccombo%3E%3Ffoo"><combo>?foo</a>`},
{`foo:bar`, `<a href="./foo:bar">foo:bar</a>`},
}
// We put each test file in its own directory in the fakeFS so we can look at it in isolation.
fs := make(fakeFS)
for i, test := range tests {
testFile := &fakeFileInfo{basename: test.name}
fs[fmt.Sprintf("/%d", i)] = &fakeFileInfo{
dir: true,
modtime: time.Unix(1000000000, 0).UTC(),
ents: []*fakeFileInfo{testFile},
}
fs[fmt.Sprintf("/%d/%s", i, test.name)] = testFile
}
ts := httptest.NewServer(FileServer(&fs))
defer ts.Close()
for i, test := range tests {
url := fmt.Sprintf("%s/%d", ts.URL, i)
res, err := Get(url)
if err != nil {
t.Fatalf("test %q: Get: %v", test.name, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("test %q: read Body: %v", test.name, err)
}
s := string(b)
if !strings.HasPrefix(s, dirListPrefix) || !strings.HasSuffix(s, dirListSuffix) {
t.Errorf("test %q: listing dir, full output is %q, want prefix %q and suffix %q", test.name, s, dirListPrefix, dirListSuffix)
}
if trimmed := strings.TrimSuffix(strings.TrimPrefix(s, dirListPrefix), dirListSuffix); trimmed != test.escaped {
t.Errorf("test %q: listing dir, filename escaped to %q, want %q", test.name, trimmed, test.escaped)
}
res.Body.Close()
}
}
func TestFileServerSortsNames(t *testing.T) {
defer afterTest(t)
const contents = "I am a fake file"
dirMod := time.Unix(123, 0).UTC()
fileMod := time.Unix(1000000000, 0).UTC()
fs := fakeFS{
"/": &fakeFileInfo{
dir: true,
modtime: dirMod,
ents: []*fakeFileInfo{
{
basename: "b",
modtime: fileMod,
contents: contents,
},
{
basename: "a",
modtime: fileMod,
contents: contents,
},
},
},
}
ts := httptest.NewServer(FileServer(&fs))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatalf("Get: %v", err)
}
defer res.Body.Close()
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("read Body: %v", err)
}
s := string(b)
if !strings.Contains(s, "<a href=\"a\">a</a>\n<a href=\"b\">b</a>") {
t.Errorf("output appears to be unsorted:\n%s", s)
}
}
func mustRemoveAll(dir string) {
err := os.RemoveAll(dir)
if err != nil {
panic(err)
}
}
func TestFileServerImplicitLeadingSlash(t *testing.T) {
defer afterTest(t)
tempDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("TempDir: %v", err)
}
defer mustRemoveAll(tempDir)
if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir))))
defer ts.Close()
get := func(suffix string) string {
res, err := Get(ts.URL + suffix)
if err != nil {
t.Fatalf("Get %s: %v", suffix, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("ReadAll %s: %v", suffix, err)
}
res.Body.Close()
return string(b)
}
if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {
t.Logf("expected a directory listing with foo.txt, got %q", s)
}
if s := get("/bar/foo.txt"); s != "Hello world" {
t.Logf("expected %q, got %q", "Hello world", s)
}
}
func TestDirJoin(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping test on windows")
}
wfi, err := os.Stat("/etc/hosts")
if err != nil {
t.Skip("skipping test; no /etc/hosts file")
}
test := func(d Dir, name string) {
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
gfi, err := f.Stat()
if err != nil {
t.Fatalf("stat of %s: %v", name, err)
}
if !os.SameFile(gfi, wfi) {
t.Errorf("%s got different file", name)
}
}
test(Dir("/etc/"), "/hosts")
test(Dir("/etc/"), "hosts")
test(Dir("/etc/"), "../../../../hosts")
test(Dir("/etc"), "/hosts")
test(Dir("/etc"), "hosts")
test(Dir("/etc"), "../../../../hosts")
// Not really directories, but since we use this trick in
// ServeFile, test it:
test(Dir("/etc/hosts"), "")
test(Dir("/etc/hosts"), "/")
test(Dir("/etc/hosts"), "../")
}
func TestEmptyDirOpenCWD(t *testing.T) {
test := func(d Dir) {
name := "fs_test.go"
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
}
test(Dir(""))
test(Dir("."))
test(Dir("./"))
}
func TestServeFileContentType(t *testing.T) {
defer afterTest(t)
const ctype = "icecream/chocolate"
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
switch r.FormValue("override") {
case "1":
w.Header().Set("Content-Type", ctype)
case "2":
// Explicitly inhibit sniffing.
w.Header()["Content-Type"] = []string{}
}
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
get := func(override string, want []string) {
resp, err := Get(ts.URL + "?override=" + override)
if err != nil {
t.Fatal(err)
}
if h := resp.Header["Content-Type"]; !reflect.DeepEqual(h, want) {
t.Errorf("Content-Type mismatch: got %v, want %v", h, want)
}
resp.Body.Close()
}
get("0", []string{"text/plain; charset=utf-8"})
get("1", []string{ctype})
get("2", nil)
}
func TestServeFileMimeType(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/style.css")
}))
defer ts.Close()
resp, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
want := "text/css; charset=utf-8"
if h := resp.Header.Get("Content-Type"); h != want {
t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
}
}
func TestServeFileFromCWD(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "fs_test.go")
}))
defer ts.Close()
r, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
r.Body.Close()
if r.StatusCode != 200 {
t.Fatalf("expected 200 OK, got %s", r.Status)
}
}
// Issue 13996
func TestServeDirWithoutTrailingSlash(t *testing.T) {
e := "/testdata/"
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, ".")
}))
defer ts.Close()
r, err := Get(ts.URL + "/testdata")
if err != nil {
t.Fatal(err)
}
r.Body.Close()
if g := r.Request.URL.Path; g != e {
t.Errorf("got %s, want %s", g, e)
}
}
// Tests that ServeFile doesn't add a Content-Length if a Content-Encoding is
// specified.
func TestServeFileWithContentEncoding_h1(t *testing.T) { testServeFileWithContentEncoding(t, h1Mode) }
func TestServeFileWithContentEncoding_h2(t *testing.T) { testServeFileWithContentEncoding(t, h2Mode) }
func testServeFileWithContentEncoding(t *testing.T, h2 bool) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Encoding", "foo")
ServeFile(w, r, "testdata/file")
// Because the testdata is so small, it would fit in
// both the h1 and h2 Server's write buffers. For h1,
// sendfile is used, though, forcing a header flush at
// the io.Copy. http2 doesn't do a header flush so
// buffers all 11 bytes and then adds its own
// Content-Length. To prevent the Server's
// Content-Length and test ServeFile only, flush here.
w.(Flusher).Flush()
}))
defer cst.close()
resp, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
if g, e := resp.ContentLength, int64(-1); g != e {
t.Errorf("Content-Length mismatch: got %d, want %d", g, e)
}
}
func TestServeIndexHtml(t *testing.T) {
defer afterTest(t)
const want = "index.html says hello\n"
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
for _, path := range []string{"/testdata/", "/testdata/index.html"} {
res, err := Get(ts.URL + path)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if s := string(b); s != want {
t.Errorf("for path %q got %q, want %q", path, s, want)
}
res.Body.Close()
}
}
func TestFileServerZeroByte(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
res, err := Get(ts.URL + "/..\x00")
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if res.StatusCode == 200 {
t.Errorf("got status 200; want an error. Body is:\n%s", string(b))
}
}
type fakeFileInfo struct {
dir bool
basename string
modtime time.Time
ents []*fakeFileInfo
contents string
err error
}
func (f *fakeFileInfo) Name() string { return f.basename }
func (f *fakeFileInfo) Sys() interface{} { return nil }
func (f *fakeFileInfo) ModTime() time.Time { return f.modtime }
func (f *fakeFileInfo) IsDir() bool { return f.dir }
func (f *fakeFileInfo) Size() int64 { return int64(len(f.contents)) }
func (f *fakeFileInfo) Mode() os.FileMode {
if f.dir {
return 0755 | os.ModeDir
}
return 0644
}
type fakeFile struct {
io.ReadSeeker
fi *fakeFileInfo
path string // as opened
entpos int
}
func (f *fakeFile) Close() error { return nil }
func (f *fakeFile) Stat() (os.FileInfo, error) { return f.fi, nil }
func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
if !f.fi.dir {
return nil, os.ErrInvalid
}
var fis []os.FileInfo
limit := f.entpos + count
if count <= 0 || limit > len(f.fi.ents) {
limit = len(f.fi.ents)
}
for ; f.entpos < limit; f.entpos++ {
fis = append(fis, f.fi.ents[f.entpos])
}
if len(fis) == 0 && count > 0 {
return fis, io.EOF
} else {
return fis, nil
}
}
type fakeFS map[string]*fakeFileInfo
func (fs fakeFS) Open(name string) (File, error) {
name = path.Clean(name)
f, ok := fs[name]
if !ok {
return nil, os.ErrNotExist
}
if f.err != nil {
return nil, f.err
}
return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil
}
func TestDirectoryIfNotModified(t *testing.T) {
defer afterTest(t)
const indexContents = "I am a fake index.html file"
fileMod := time.Unix(1000000000, 0).UTC()
fileModStr := fileMod.Format(TimeFormat)
dirMod := time.Unix(123, 0).UTC()
indexFile := &fakeFileInfo{
basename: "index.html",
modtime: fileMod,
contents: indexContents,
}
fs := fakeFS{
"/": &fakeFileInfo{
dir: true,
modtime: dirMod,
ents: []*fakeFileInfo{indexFile},
},
"/index.html": indexFile,
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if string(b) != indexContents {
t.Fatalf("Got body %q; want %q", b, indexContents)
}
res.Body.Close()
lastMod := res.Header.Get("Last-Modified")
if lastMod != fileModStr {
t.Fatalf("initial Last-Modified = %q; want %q", lastMod, fileModStr)
}
req, _ := NewRequest("GET", ts.URL, nil)
req.Header.Set("If-Modified-Since", lastMod)
res, err = DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 304 {
t.Fatalf("Code after If-Modified-Since request = %v; want 304", res.StatusCode)
}
res.Body.Close()
// Advance the index.html file's modtime, but not the directory's.
indexFile.modtime = indexFile.modtime.Add(1 * time.Hour)
res, err = DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("Code after second If-Modified-Since request = %v; want 200; res is %#v", res.StatusCode, res)
}
res.Body.Close()
}
func mustStat(t *testing.T, fileName string) os.FileInfo {
fi, err := os.Stat(fileName)
if err != nil {
t.Fatal(err)
}
return fi
}
func TestServeContent(t *testing.T) {
defer afterTest(t)
type serveParam struct {
name string
modtime time.Time
content io.ReadSeeker
contentType string
etag string
}
servec := make(chan serveParam, 1)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
p := <-servec
if p.etag != "" {
w.Header().Set("ETag", p.etag)
}
if p.contentType != "" {
w.Header().Set("Content-Type", p.contentType)
}
ServeContent(w, r, p.name, p.modtime, p.content)
}))
defer ts.Close()
type testCase struct {
// One of file or content must be set:
file string
content io.ReadSeeker
modtime time.Time
serveETag string // optional
serveContentType string // optional
reqHeader map[string]string
wantLastMod string
wantContentType string
wantContentRange string
wantStatus int
}
htmlModTime := mustStat(t, "testdata/index.html").ModTime()
tests := map[string]testCase{
"no_last_modified": {
file: "testdata/style.css",
wantContentType: "text/css; charset=utf-8",
wantStatus: 200,
},
"with_last_modified": {
file: "testdata/index.html",
wantContentType: "text/html; charset=utf-8",
modtime: htmlModTime,
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
wantStatus: 200,
},
"not_modified_modtime": {
file: "testdata/style.css",
serveETag: `"foo"`, // Last-Modified sent only when no ETag
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_modtime_with_contenttype": {
file: "testdata/style.css",
serveContentType: "text/css", // explicit content type
serveETag: `"foo"`, // Last-Modified sent only when no ETag
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_etag": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"foo"`,
},
wantStatus: 304,
},
"not_modified_etag_no_seek": {
content: panicOnSeek{nil}, // should never be called
serveETag: `W/"foo"`, // If-None-Match uses weak ETag comparison
reqHeader: map[string]string{
"If-None-Match": `"baz", W/"foo"`,
},
wantStatus: 304,
},
"if_none_match_mismatch": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"Foo"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_good": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
},
"range_match": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `"A"`,
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
},
"range_match_weak_etag": {
file: "testdata/style.css",
serveETag: `W/"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `W/"A"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_no_overlap": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=10-20",
},
wantStatus: StatusRequestedRangeNotSatisfiable,
wantContentType: "text/plain; charset=utf-8",
wantContentRange: "bytes */8",
},
// An If-Range resource for entity "A", but entity "B" is now current.
// The Range request should be ignored.
"range_no_match": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `"B"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_with_modtime": {
file: "testdata/style.css",
modtime: time.Date(2014, 6, 25, 17, 12, 18, 0 /* nanos */, time.UTC),
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": "Wed, 25 Jun 2014 17:12:18 GMT",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
},
"range_with_modtime_nanos": {
file: "testdata/style.css",
modtime: time.Date(2014, 6, 25, 17, 12, 18, 123 /* nanos */, time.UTC),
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": "Wed, 25 Jun 2014 17:12:18 GMT",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
},
"unix_zero_modtime": {
content: strings.NewReader("<html>foo"),
modtime: time.Unix(0, 0),
wantStatus: StatusOK,
wantContentType: "text/html; charset=utf-8",
},
"ifmatch_matches": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `"Z", "A"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"ifmatch_star": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `*`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"ifmatch_failed": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `"B"`,
},
wantStatus: 412,
wantContentType: "text/plain; charset=utf-8",
},
"ifmatch_fails_on_weak_etag": {
file: "testdata/style.css",
serveETag: `W/"A"`,
reqHeader: map[string]string{
"If-Match": `W/"A"`,
},
wantStatus: 412,
wantContentType: "text/plain; charset=utf-8",
},
"if_unmodified_since_true": {
file: "testdata/style.css",
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Unmodified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
},
"if_unmodified_since_false": {
file: "testdata/style.css",
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Unmodified-Since": htmlModTime.Add(-2 * time.Second).UTC().Format(TimeFormat),
},
wantStatus: 412,
wantContentType: "text/plain; charset=utf-8",
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
},
}
for testName, tt := range tests {
var content io.ReadSeeker
if tt.file != "" {
f, err := os.Open(tt.file)
if err != nil {
t.Fatalf("test %q: %v", testName, err)
}
defer f.Close()
content = f
} else {
content = tt.content
}
servec <- serveParam{
name: filepath.Base(tt.file),
content: content,
modtime: tt.modtime,
etag: tt.serveETag,
contentType: tt.serveContentType,
}
req, err := NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
for k, v := range tt.reqHeader {
req.Header.Set(k, v)
}
res, err := DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
if res.StatusCode != tt.wantStatus {
t.Errorf("test %q: status = %d; want %d", testName, res.StatusCode, tt.wantStatus)
}
if g, e := res.Header.Get("Content-Type"), tt.wantContentType; g != e {
t.Errorf("test %q: content-type = %q, want %q", testName, g, e)
}
if g, e := res.Header.Get("Content-Range"), tt.wantContentRange; g != e {
t.Errorf("test %q: content-range = %q, want %q", testName, g, e)
}
if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e {
t.Errorf("test %q: last-modified = %q, want %q", testName, g, e)
}
}
}
// Issue 12991
func TestServerFileStatError(t *testing.T) {
rec := httptest.NewRecorder()
r, _ := NewRequest("GET", "http://foo/", nil)
redirect := false
name := "file.txt"
fs := issue12991FS{}
ExportServeFile(rec, r, fs, name, redirect)
if body := rec.Body.String(); !strings.Contains(body, "403") || !strings.Contains(body, "Forbidden") {
t.Errorf("wanted 403 forbidden message; got: %s", body)
}
}
type issue12991FS struct{}
func (issue12991FS) Open(string) (File, error) { return issue12991File{}, nil }
type issue12991File struct{ File }
func (issue12991File) Stat() (os.FileInfo, error) { return nil, os.ErrPermission }
func (issue12991File) Close() error { return nil }
func TestServeContentErrorMessages(t *testing.T) {
defer afterTest(t)
fs := fakeFS{
"/500": &fakeFileInfo{
err: errors.New("random error"),
},
"/403": &fakeFileInfo{
err: &os.PathError{Err: os.ErrPermission},
},
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
for _, code := range []int{403, 404, 500} {
res, err := DefaultClient.Get(fmt.Sprintf("%s/%d", ts.URL, code))
if err != nil {
t.Errorf("Error fetching /%d: %v", code, err)
continue
}
if res.StatusCode != code {
t.Errorf("For /%d, status code = %d; want %d", code, res.StatusCode, code)
}
res.Body.Close()
}
}
// verifies that sendfile is being used on Linux
func TestLinuxSendfile(t *testing.T) {
defer afterTest(t)
if runtime.GOOS != "linux" {
t.Skip("skipping; linux-only test")
}
if _, err := exec.LookPath("strace"); err != nil {
t.Skip("skipping; strace not found in path")
}
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
lnf, err := ln.(*net.TCPListener).File()
if err != nil {
t.Fatal(err)
}
defer ln.Close()
syscalls := "sendfile,sendfile64"
switch runtime.GOARCH {
case "mips64", "mips64le", "s390x":
// strace on the above platforms doesn't support sendfile64
// and will error out if we specify that with `-e trace='.
syscalls = "sendfile"
}
var buf bytes.Buffer
child := exec.Command("strace", "-f", "-q", "-e", "trace="+syscalls, os.Args[0], "-test.run=TestLinuxSendfileChild")
child.ExtraFiles = append(child.ExtraFiles, lnf)
child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
child.Stdout = &buf
child.Stderr = &buf
if err := child.Start(); err != nil {
t.Skipf("skipping; failed to start straced child: %v", err)
}
res, err := Get(fmt.Sprintf("http://%s/", ln.Addr()))
if err != nil {
t.Fatalf("http client error: %v", err)
}
_, err = io.Copy(ioutil.Discard, res.Body)
if err != nil {
t.Fatalf("client body read error: %v", err)
}
res.Body.Close()
// Force child to exit cleanly.
Post(fmt.Sprintf("http://%s/quit", ln.Addr()), "", nil)
child.Wait()
rx := regexp.MustCompile(`sendfile(64)?\(\d+,\s*\d+,\s*NULL,\s*\d+\)\s*=\s*\d+\s*\n`)
rxResume := regexp.MustCompile(`<\.\.\. sendfile(64)? resumed> \)\s*=\s*\d+\s*\n`)
out := buf.String()
if !rx.MatchString(out) && !rxResume.MatchString(out) {
t.Errorf("no sendfile system call found in:\n%s", out)
}
}
func getBody(t *testing.T, testName string, req Request) (*Response, []byte) {
r, err := DefaultClient.Do(&req)
if err != nil {
t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err)
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("%s: for URL %q, reading body: %v", testName, req.URL.String(), err)
}
return r, b
}
// TestLinuxSendfileChild isn't a real test. It's used as a helper process
// for TestLinuxSendfile.
func TestLinuxSendfileChild(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
fd3 := os.NewFile(3, "ephemeral-port-listener")
ln, err := net.FileListener(fd3)
if err != nil {
panic(err)
}
mux := NewServeMux()
mux.Handle("/", FileServer(Dir("testdata")))
mux.HandleFunc("/quit", func(ResponseWriter, *Request) {
os.Exit(0)
})
s := &Server{Handler: mux}
err = s.Serve(ln)
if err != nil {
panic(err)
}
}
func TestFileServerCleanPath(t *testing.T) {
tests := []struct {
path string
wantCode int
wantOpen []string
}{
{"/", 200, []string{"/", "/index.html"}},
{"/dir", 301, []string{"/dir"}},
{"/dir/", 200, []string{"/dir", "/dir/index.html"}},
}
for _, tt := range tests {
var log []string
rr := httptest.NewRecorder()
req, _ := NewRequest("GET", "http://foo.localhost"+tt.path, nil)
FileServer(fileServerCleanPathDir{&log}).ServeHTTP(rr, req)
if !reflect.DeepEqual(log, tt.wantOpen) {
t.Logf("For %s: Opens = %q; want %q", tt.path, log, tt.wantOpen)
}
if rr.Code != tt.wantCode {
t.Logf("For %s: Response code = %d; want %d", tt.path, rr.Code, tt.wantCode)
}
}
}
type fileServerCleanPathDir struct {
log *[]string
}
func (d fileServerCleanPathDir) Open(path string) (File, error) {
*(d.log) = append(*(d.log), path)
if path == "/" || path == "/dir" || path == "/dir/" {
// Just return back something that's a directory.
return Dir(".").Open(".")
}
return nil, os.ErrNotExist
}
type panicOnSeek struct{ io.ReadSeeker }
func Test_scanETag(t *testing.T) {
tests := []struct {
in string
wantETag string
wantRemain string
}{
{`W/"etag-1"`, `W/"etag-1"`, ""},
{`"etag-2"`, `"etag-2"`, ""},
{`"etag-1", "etag-2"`, `"etag-1"`, `, "etag-2"`},
{"", "", ""},
{"", "", ""},
{"W/", "", ""},
{`W/"truc`, "", ""},
{`w/"case-sensitive"`, "", ""},
}
for _, test := range tests {
etag, remain := ExportScanETag(test.in)
if etag != test.wantETag || remain != test.wantRemain {
t.Errorf("scanETag(%q)=%q %q, want %q %q", test.in, etag, remain, test.wantETag, test.wantRemain)
}
}
}
| [
"\"GO_WANT_HELPER_PROCESS\""
]
| []
| [
"GO_WANT_HELPER_PROCESS"
]
| [] | ["GO_WANT_HELPER_PROCESS"] | go | 1 | 0 | |
cmd/buildlet/stage0/stage0.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The stage0 command looks up the buildlet's URL from its environment
// (GCE metadata service, scaleway, etc), downloads it, and runs
// it. If not on GCE, such as when in a Linux Docker container being
// developed and tested locally, the stage0 instead looks for the
// META_BUILDLET_BINARY_URL environment to have a URL to the buildlet
// binary.
//
// The stage0 binary is typically baked into the VM or container
// images or manually copied to dedicated once and is typically never
// auto-updated. Changes to this binary should be rare, as it's
// difficult and slow to roll out. Any per-host-type logic to do at
// start-up should be done in x/build/cmd/buildlet instead, which is
// re-downloaded once per build, and rolls out easily.
package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"time"
"cloud.google.com/go/compute/metadata"
"golang.org/x/build/internal/httpdl"
"golang.org/x/build/internal/untar"
)
// This lets us be lazy and put the stage0 start-up in rc.local where
// it might race with the network coming up, rather than write proper
// upstart+systemd+init scripts:
var networkWait = flag.Duration("network-wait", 0, "if zero, a default is used if needed")
const osArch = runtime.GOOS + "/" + runtime.GOARCH
const attr = "buildlet-binary-url"
// untar helper, for the Windows image prep script.
var (
untarFile = flag.String("untar-file", "", "if non-empty, tar.gz to untar to --untar-dest-dir")
untarDestDir = flag.String("untar-dest-dir", "", "destination directory to untar --untar-file to")
)
// configureSerialLogOutput and closeSerialLogOutput are set non-nil
// on some platforms to configure log output to go to the serial
// console and to close the serial port, respectively.
var (
configureSerialLogOutput func()
closeSerialLogOutput func()
)
var timeStart = time.Now()
func main() {
if configureSerialLogOutput != nil {
configureSerialLogOutput()
}
log.SetPrefix("stage0: ")
flag.Parse()
if *untarFile != "" {
log.Printf("running in untar mode, untarring %q to %q", *untarFile, *untarDestDir)
untarMode()
log.Printf("done untarring; exiting")
return
}
log.Printf("bootstrap binary running")
var isMacStadiumVM bool
switch osArch {
case "linux/arm":
switch env := os.Getenv("GO_BUILDER_ENV"); env {
case "linux-arm-arm5spacemonkey", "host-linux-arm-scaleway":
// No setup currently.
default:
panic(fmt.Sprintf("unknown/unspecified $GO_BUILDER_ENV value %q", env))
}
case "linux/arm64":
switch env := os.Getenv("GO_BUILDER_ENV"); env {
case "host-linux-arm64-packet":
// No special setup.
default:
panic(fmt.Sprintf("unknown/unspecified $GO_BUILDER_ENV value %q", env))
}
case "darwin/amd64":
// The MacStadium builders' baked-in stage0.sh
// bootstrap file doesn't set GO_BUILDER_ENV
// unfortunately, so use the filename it runs its
// downloaded bootstrap URL to determine whether we're
// in that environment.
isMacStadiumVM = len(os.Args) > 0 && strings.HasSuffix(os.Args[0], "run-builder")
log.Printf("isMacStadiumVM = %v", isMacStadiumVM)
os.Setenv("GO_BUILDER_ENV", "macstadium_vm")
}
if !awaitNetwork() {
sleepFatalf("network didn't become reachable")
}
timeNetwork := time.Now()
netDelay := prettyDuration(timeNetwork.Sub(timeStart))
log.Printf("network up after %v", netDelay)
Download:
// Note: we name it ".exe" for Windows, but the name also
// works fine on Linux, etc.
target := filepath.FromSlash("./buildlet.exe")
if err := download(target, buildletURL()); err != nil {
sleepFatalf("Downloading %s: %v", buildletURL(), err)
}
if runtime.GOOS != "windows" {
if err := os.Chmod(target, 0755); err != nil {
log.Fatal(err)
}
}
downloadDelay := prettyDuration(time.Since(timeNetwork))
log.Printf("downloaded buildlet in %v", downloadDelay)
env := os.Environ()
if isUnix() && os.Getuid() == 0 {
if os.Getenv("USER") == "" {
env = append(env, "USER=root")
}
if os.Getenv("HOME") == "" {
env = append(env, "HOME=/root")
}
}
env = append(env, fmt.Sprintf("GO_STAGE0_NET_DELAY=%v", netDelay))
env = append(env, fmt.Sprintf("GO_STAGE0_DL_DELAY=%v", downloadDelay))
cmd := exec.Command(target)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Env = env
// buildEnv is set by some builders. It's increasingly set by new ones.
// It predates the buildtype-vs-hosttype split, so the values aren't
// always host types, but they're often host types. They should probably
// be host types in the future, or we can introduce GO_BUILD_HOST_TYPE
// to be explicit and kill off GO_BUILDER_ENV.
buildEnv := os.Getenv("GO_BUILDER_ENV")
switch buildEnv {
case "linux-arm-arm5spacemonkey":
cmd.Args = append(cmd.Args, reverseHostTypeArgs("host-linux-arm5spacemonkey")...)
cmd.Args = append(cmd.Args, os.ExpandEnv("--workdir=${WORKDIR}"))
case "host-linux-arm-scaleway":
scalewayArgs := append(
reverseHostTypeArgs(buildEnv),
"--hostname="+os.Getenv("HOSTNAME"),
)
cmd.Args = append(cmd.Args,
scalewayArgs...,
)
case "host-linux-mipsle-mengzhuo":
cmd.Args = append(cmd.Args, reverseHostTypeArgs(buildEnv)...)
cmd.Args = append(cmd.Args, os.ExpandEnv("--workdir=${WORKDIR}"))
case "host-linux-mips64le-rtrk":
cmd.Args = append(cmd.Args, reverseHostTypeArgs(buildEnv)...)
cmd.Args = append(cmd.Args, os.ExpandEnv("--workdir=${WORKDIR}"))
cmd.Args = append(cmd.Args, os.ExpandEnv("--hostname=${GO_BUILDER_ENV}"))
case "host-linux-mips64-rtrk":
cmd.Args = append(cmd.Args, reverseHostTypeArgs(buildEnv)...)
cmd.Args = append(cmd.Args, os.ExpandEnv("--workdir=${WORKDIR}"))
cmd.Args = append(cmd.Args, os.ExpandEnv("--hostname=${GO_BUILDER_ENV}"))
case "host-linux-ppc64le-power9-osu":
cmd.Args = append(cmd.Args, reverseHostTypeArgs(buildEnv)...)
case "host-linux-ppc64le-osu": // power8
cmd.Args = append(cmd.Args, reverseHostTypeArgs(buildEnv)...)
case "host-linux-ppc64-osu":
cmd.Args = append(cmd.Args, reverseHostTypeArgs(buildEnv)...)
}
switch osArch {
case "linux/s390x":
cmd.Args = append(cmd.Args, "--workdir=/data/golang/workdir")
cmd.Args = append(cmd.Args, reverseHostTypeArgs("host-linux-s390x")...)
case "linux/arm64":
switch buildEnv {
case "host-linux-arm64-packet":
hostname := os.Getenv("HOSTNAME") // if empty, docker container name is used
cmd.Args = append(cmd.Args,
"--reverse-type="+buildEnv,
"--workdir=/workdir",
"--hostname="+hostname,
"--halt=false",
"--reboot=false",
"--coordinator=farmer.golang.org:443",
)
default:
panic(fmt.Sprintf("unknown/unspecified $GO_BUILDER_ENV value %q", env))
}
case "solaris/amd64", "illumos/amd64":
hostType := buildEnv
cmd.Args = append(cmd.Args, reverseHostTypeArgs(hostType)...)
}
// Release the serial port (if we opened it) so the buildlet
// process can open & write to it. At least on Windows, only
// one process can have it open.
if closeSerialLogOutput != nil {
closeSerialLogOutput()
}
err := cmd.Run()
if isMacStadiumVM {
if err != nil {
log.Printf("error running buildlet: %v", err)
log.Printf("restarting in 2 seconds.")
time.Sleep(2 * time.Second) // in case we're spinning, slow it down
} else {
log.Printf("buildlet process exited; restarting.")
}
// Some of the MacStadium VM environments reuse their
// environment. Re-download the buildlet (if it
// changed-- httpdl does conditional downloading) and
// then re-run. At least on Sierra we never get this
// far because the buildlet will halt the machine
// before we get here. (and then cmd/makemac will
// recreate the VM)
// But if we get here, restart the process.
goto Download
}
if err != nil {
if configureSerialLogOutput != nil {
configureSerialLogOutput()
}
sleepFatalf("Error running buildlet: %v", err)
}
}
// reverseHostTypeArgs returns the default arguments for the buildlet
// for the provided host type. (one of the keys of the
// x/build/dashboard.Hosts map)
func reverseHostTypeArgs(hostType string) []string {
return []string{
"--halt=false",
"--reverse-type=" + hostType,
"--coordinator=farmer.golang.org:443",
}
}
// awaitNetwork reports whether the network came up within 30 seconds,
// determined somewhat arbitrarily via a DNS lookup for google.com.
func awaitNetwork() bool {
timeout := 30 * time.Second
if runtime.GOOS == "windows" {
timeout = 5 * time.Minute // empirically slower sometimes?
}
if *networkWait != 0 {
timeout = *networkWait
}
deadline := time.Now().Add(timeout)
var lastSpam time.Time
log.Printf("waiting for network.")
for time.Now().Before(deadline) {
t0 := time.Now()
if isNetworkUp() {
return true
}
failAfter := time.Since(t0)
if now := time.Now(); now.After(lastSpam.Add(5 * time.Second)) {
log.Printf("network still down for %v; probe failure took %v",
prettyDuration(time.Since(timeStart)),
prettyDuration(failAfter))
lastSpam = now
}
time.Sleep(1 * time.Second)
}
log.Printf("gave up waiting for network")
return false
}
// isNetworkUp reports whether the network is up by hitting an
// known-up HTTP server. It might block for a few seconds before
// returning an answer.
func isNetworkUp() bool {
const probeURL = "http://farmer.golang.org/netcheck" // 404 is fine.
c := &http.Client{
Timeout: 5 * time.Second,
Transport: &http.Transport{
DisableKeepAlives: true,
Proxy: http.ProxyFromEnvironment,
},
}
res, err := c.Get(probeURL)
if err != nil {
return false
}
res.Body.Close()
return true
}
func buildletURL() string {
if v := os.Getenv("META_BUILDLET_BINARY_URL"); v != "" {
return v
}
switch os.Getenv("GO_BUILDER_ENV") {
case "linux-arm-arm5spacemonkey":
return "https://storage.googleapis.com/go-builder-data/buildlet.linux-arm-arm5"
}
if metadata.OnGCE() {
v, err := metadata.InstanceAttributeValue(attr)
if err == nil {
return v
}
sleepFatalf("on GCE, but no META_BUILDLET_BINARY_URL env or instance attribute %q: %v", attr, err)
}
// Fallback:
return fmt.Sprintf("https://storage.googleapis.com/go-builder-data/buildlet.%s-%s", runtime.GOOS, runtime.GOARCH)
}
func sleepFatalf(format string, args ...interface{}) {
log.Printf(format, args...)
if runtime.GOOS == "windows" {
log.Printf("(sleeping for 1 minute before failing)")
time.Sleep(time.Minute) // so user has time to see it in cmd.exe, maybe
}
os.Exit(1)
}
func download(file, url string) error {
log.Printf("downloading %s to %s ...\n", url, file)
const maxTry = 3
var lastErr error
for try := 1; try <= maxTry; try++ {
if try > 1 {
// network should be up by now per awaitNetwork, so just retry
// shortly a few time on errors.
time.Sleep(2)
}
err := httpdl.Download(file, url)
if err == nil {
fi, err := os.Stat(file)
if err != nil {
return err
}
log.Printf("downloaded %s (%d bytes)", file, fi.Size())
return nil
}
lastErr = err
log.Printf("try %d/%d download failure: %v", try, maxTry, err)
}
return lastErr
}
func aptGetInstall(pkgs ...string) {
t0 := time.Now()
args := append([]string{"--yes", "install"}, pkgs...)
cmd := exec.Command("apt-get", args...)
if out, err := cmd.CombinedOutput(); err != nil {
log.Fatalf("error running apt-get install: %s", out)
}
log.Printf("stage0: apt-get installed %q in %v", pkgs, time.Since(t0).Round(time.Second/10))
}
func initBootstrapDir(destDir, tgzCache string) {
t0 := time.Now()
if err := os.MkdirAll(destDir, 0755); err != nil {
log.Fatal(err)
}
latestURL := fmt.Sprintf("https://storage.googleapis.com/go-builder-data/gobootstrap-%s-%s.tar.gz",
runtime.GOOS, runtime.GOARCH)
if err := httpdl.Download(tgzCache, latestURL); err != nil {
log.Fatalf("dowloading %s to %s: %v", latestURL, tgzCache, err)
}
log.Printf("synced %s to %s in %v", latestURL, tgzCache, time.Since(t0).Round(time.Second/10))
t1 := time.Now()
// TODO(bradfitz): rewrite this to use Go instead of shelling
// out to tar? if this ever gets used on platforms besides
// Unix. For Windows and Plan 9 we bake in the bootstrap
// tarball into the image anyway. So this works for now.
// Solaris might require tweaking to use gtar instead or
// something.
tar := exec.Command("tar", "zxf", tgzCache)
tar.Dir = destDir
out, err := tar.CombinedOutput()
if err != nil {
log.Fatalf("error untarring %s to %s: %s", tgzCache, destDir, out)
}
log.Printf("untarred %s to %s in %v", tgzCache, destDir, time.Since(t1).Round(time.Second/10))
}
func isUnix() bool {
switch runtime.GOOS {
case "plan9", "windows":
return false
}
return true
}
func untarMode() {
if *untarDestDir == "" {
log.Fatal("--untar-dest-dir must not be empty")
}
if fi, err := os.Stat(*untarDestDir); err != nil || !fi.IsDir() {
if err != nil {
log.Fatalf("--untar-dest-dir %q: %v", *untarDestDir, err)
}
log.Fatalf("--untar-dest-dir %q not a directory.", *untarDestDir)
}
f, err := os.Open(*untarFile)
if err != nil {
log.Fatal(err)
}
defer f.Close()
if err := untar.Untar(f, *untarDestDir); err != nil {
log.Fatalf("Untarring %q to %q: %v", *untarFile, *untarDestDir, err)
}
}
func prettyDuration(d time.Duration) time.Duration {
const round = time.Second / 10
return d / round * round
}
| [
"\"GO_BUILDER_ENV\"",
"\"GO_BUILDER_ENV\"",
"\"USER\"",
"\"HOME\"",
"\"GO_BUILDER_ENV\"",
"\"HOSTNAME\"",
"\"HOSTNAME\"",
"\"META_BUILDLET_BINARY_URL\"",
"\"GO_BUILDER_ENV\""
]
| []
| [
"META_BUILDLET_BINARY_URL",
"GO_BUILDER_ENV",
"USER",
"HOSTNAME",
"HOME"
]
| [] | ["META_BUILDLET_BINARY_URL", "GO_BUILDER_ENV", "USER", "HOSTNAME", "HOME"] | go | 5 | 0 | |
database/orm/association_test.go | package orm_test
import (
"fmt"
"os"
"reflect"
"sort"
"testing"
"landzero.net/x/database/orm"
)
func TestBelongsTo(t *testing.T) {
post := Post{
Title: "post belongs to",
Body: "body belongs to",
Category: Category{Name: "Category 1"},
MainCategory: Category{Name: "Main Category 1"},
}
if err := DB.Save(&post).Error; err != nil {
t.Error("Got errors when save post", err)
}
if post.Category.ID == 0 || post.MainCategory.ID == 0 {
t.Errorf("Category's primary key should be updated")
}
if post.CategoryId.Int64 == 0 || post.MainCategoryId == 0 {
t.Errorf("post's foreign key should be updated")
}
// Query
var category1 Category
DB.Model(&post).Association("Category").Find(&category1)
if category1.Name != "Category 1" {
t.Errorf("Query belongs to relations with Association")
}
var mainCategory1 Category
DB.Model(&post).Association("MainCategory").Find(&mainCategory1)
if mainCategory1.Name != "Main Category 1" {
t.Errorf("Query belongs to relations with Association")
}
var category11 Category
DB.Model(&post).Related(&category11)
if category11.Name != "Category 1" {
t.Errorf("Query belongs to relations with Related")
}
if DB.Model(&post).Association("Category").Count() != 1 {
t.Errorf("Post's category count should be 1")
}
if DB.Model(&post).Association("MainCategory").Count() != 1 {
t.Errorf("Post's main category count should be 1")
}
// Append
var category2 = Category{
Name: "Category 2",
}
DB.Model(&post).Association("Category").Append(&category2)
if category2.ID == 0 {
t.Errorf("Category should has ID when created with Append")
}
var category21 Category
DB.Model(&post).Related(&category21)
if category21.Name != "Category 2" {
t.Errorf("Category should be updated with Append")
}
if DB.Model(&post).Association("Category").Count() != 1 {
t.Errorf("Post's category count should be 1")
}
// Replace
var category3 = Category{
Name: "Category 3",
}
DB.Model(&post).Association("Category").Replace(&category3)
if category3.ID == 0 {
t.Errorf("Category should has ID when created with Replace")
}
var category31 Category
DB.Model(&post).Related(&category31)
if category31.Name != "Category 3" {
t.Errorf("Category should be updated with Replace")
}
if DB.Model(&post).Association("Category").Count() != 1 {
t.Errorf("Post's category count should be 1")
}
// Delete
DB.Model(&post).Association("Category").Delete(&category2)
if DB.Model(&post).Related(&Category{}).RecordNotFound() {
t.Errorf("Should not delete any category when Delete a unrelated Category")
}
if post.Category.Name == "" {
t.Errorf("Post's category should not be reseted when Delete a unrelated Category")
}
DB.Model(&post).Association("Category").Delete(&category3)
if post.Category.Name != "" {
t.Errorf("Post's category should be reseted after Delete")
}
var category41 Category
DB.Model(&post).Related(&category41)
if category41.Name != "" {
t.Errorf("Category should be deleted with Delete")
}
if count := DB.Model(&post).Association("Category").Count(); count != 0 {
t.Errorf("Post's category count should be 0 after Delete, but got %v", count)
}
// Clear
DB.Model(&post).Association("Category").Append(&Category{
Name: "Category 2",
})
if DB.Model(&post).Related(&Category{}).RecordNotFound() {
t.Errorf("Should find category after append")
}
if post.Category.Name == "" {
t.Errorf("Post's category should has value after Append")
}
DB.Model(&post).Association("Category").Clear()
if post.Category.Name != "" {
t.Errorf("Post's category should be cleared after Clear")
}
if !DB.Model(&post).Related(&Category{}).RecordNotFound() {
t.Errorf("Should not find any category after Clear")
}
if count := DB.Model(&post).Association("Category").Count(); count != 0 {
t.Errorf("Post's category count should be 0 after Clear, but got %v", count)
}
// Check Association mode with soft delete
category6 := Category{
Name: "Category 6",
}
DB.Model(&post).Association("Category").Append(&category6)
if count := DB.Model(&post).Association("Category").Count(); count != 1 {
t.Errorf("Post's category count should be 1 after Append, but got %v", count)
}
DB.Delete(&category6)
if count := DB.Model(&post).Association("Category").Count(); count != 0 {
t.Errorf("Post's category count should be 0 after the category has been deleted, but got %v", count)
}
if err := DB.Model(&post).Association("Category").Find(&Category{}).Error; err == nil {
t.Errorf("Post's category is not findable after Delete")
}
if count := DB.Unscoped().Model(&post).Association("Category").Count(); count != 1 {
t.Errorf("Post's category count should be 1 when query with Unscoped, but got %v", count)
}
if err := DB.Unscoped().Model(&post).Association("Category").Find(&Category{}).Error; err != nil {
t.Errorf("Post's category should be findable when query with Unscoped, got %v", err)
}
}
func TestBelongsToOverrideForeignKey1(t *testing.T) {
type Profile struct {
orm.Model
Name string
}
type User struct {
orm.Model
Profile Profile `orm:"ForeignKey:ProfileRefer"`
ProfileRefer int
}
if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
if relation.Relationship.Kind != "belongs_to" ||
!reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"ProfileRefer"}) ||
!reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) {
t.Errorf("Override belongs to foreign key with tag")
}
}
}
func TestBelongsToOverrideForeignKey2(t *testing.T) {
type Profile struct {
orm.Model
Refer string
Name string
}
type User struct {
orm.Model
Profile Profile `orm:"ForeignKey:ProfileID;AssociationForeignKey:Refer"`
ProfileID int
}
if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
if relation.Relationship.Kind != "belongs_to" ||
!reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"ProfileID"}) ||
!reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) {
t.Errorf("Override belongs to foreign key with tag")
}
}
}
func TestHasOne(t *testing.T) {
user := User{
Name: "has one",
CreditCard: CreditCard{Number: "411111111111"},
}
if err := DB.Save(&user).Error; err != nil {
t.Error("Got errors when save user", err.Error())
}
if user.CreditCard.UserId.Int64 == 0 {
t.Errorf("CreditCard's foreign key should be updated")
}
// Query
var creditCard1 CreditCard
DB.Model(&user).Related(&creditCard1)
if creditCard1.Number != "411111111111" {
t.Errorf("Query has one relations with Related")
}
var creditCard11 CreditCard
DB.Model(&user).Association("CreditCard").Find(&creditCard11)
if creditCard11.Number != "411111111111" {
t.Errorf("Query has one relations with Related")
}
if DB.Model(&user).Association("CreditCard").Count() != 1 {
t.Errorf("User's credit card count should be 1")
}
// Append
var creditcard2 = CreditCard{
Number: "411111111112",
}
DB.Model(&user).Association("CreditCard").Append(&creditcard2)
if creditcard2.ID == 0 {
t.Errorf("Creditcard should has ID when created with Append")
}
var creditcard21 CreditCard
DB.Model(&user).Related(&creditcard21)
if creditcard21.Number != "411111111112" {
t.Errorf("CreditCard should be updated with Append")
}
if DB.Model(&user).Association("CreditCard").Count() != 1 {
t.Errorf("User's credit card count should be 1")
}
// Replace
var creditcard3 = CreditCard{
Number: "411111111113",
}
DB.Model(&user).Association("CreditCard").Replace(&creditcard3)
if creditcard3.ID == 0 {
t.Errorf("Creditcard should has ID when created with Replace")
}
var creditcard31 CreditCard
DB.Model(&user).Related(&creditcard31)
if creditcard31.Number != "411111111113" {
t.Errorf("CreditCard should be updated with Replace")
}
if DB.Model(&user).Association("CreditCard").Count() != 1 {
t.Errorf("User's credit card count should be 1")
}
// Delete
DB.Model(&user).Association("CreditCard").Delete(&creditcard2)
var creditcard4 CreditCard
DB.Model(&user).Related(&creditcard4)
if creditcard4.Number != "411111111113" {
t.Errorf("Should not delete credit card when Delete a unrelated CreditCard")
}
if DB.Model(&user).Association("CreditCard").Count() != 1 {
t.Errorf("User's credit card count should be 1")
}
DB.Model(&user).Association("CreditCard").Delete(&creditcard3)
if !DB.Model(&user).Related(&CreditCard{}).RecordNotFound() {
t.Errorf("Should delete credit card with Delete")
}
if DB.Model(&user).Association("CreditCard").Count() != 0 {
t.Errorf("User's credit card count should be 0 after Delete")
}
// Clear
var creditcard5 = CreditCard{
Number: "411111111115",
}
DB.Model(&user).Association("CreditCard").Append(&creditcard5)
if DB.Model(&user).Related(&CreditCard{}).RecordNotFound() {
t.Errorf("Should added credit card with Append")
}
if DB.Model(&user).Association("CreditCard").Count() != 1 {
t.Errorf("User's credit card count should be 1")
}
DB.Model(&user).Association("CreditCard").Clear()
if !DB.Model(&user).Related(&CreditCard{}).RecordNotFound() {
t.Errorf("Credit card should be deleted with Clear")
}
if DB.Model(&user).Association("CreditCard").Count() != 0 {
t.Errorf("User's credit card count should be 0 after Clear")
}
// Check Association mode with soft delete
var creditcard6 = CreditCard{
Number: "411111111116",
}
DB.Model(&user).Association("CreditCard").Append(&creditcard6)
if count := DB.Model(&user).Association("CreditCard").Count(); count != 1 {
t.Errorf("User's credit card count should be 1 after Append, but got %v", count)
}
DB.Delete(&creditcard6)
if count := DB.Model(&user).Association("CreditCard").Count(); count != 0 {
t.Errorf("User's credit card count should be 0 after credit card deleted, but got %v", count)
}
if err := DB.Model(&user).Association("CreditCard").Find(&CreditCard{}).Error; err == nil {
t.Errorf("User's creditcard is not findable after Delete")
}
if count := DB.Unscoped().Model(&user).Association("CreditCard").Count(); count != 1 {
t.Errorf("User's credit card count should be 1 when query with Unscoped, but got %v", count)
}
if err := DB.Unscoped().Model(&user).Association("CreditCard").Find(&CreditCard{}).Error; err != nil {
t.Errorf("User's creditcard should be findable when query with Unscoped, got %v", err)
}
}
func TestHasOneOverrideForeignKey1(t *testing.T) {
type Profile struct {
orm.Model
Name string
UserRefer uint
}
type User struct {
orm.Model
Profile Profile `orm:"ForeignKey:UserRefer"`
}
if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
if relation.Relationship.Kind != "has_one" ||
!reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserRefer"}) ||
!reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) {
t.Errorf("Override belongs to foreign key with tag")
}
}
}
func TestHasOneOverrideForeignKey2(t *testing.T) {
type Profile struct {
orm.Model
Name string
UserID uint
}
type User struct {
orm.Model
Refer string
Profile Profile `orm:"ForeignKey:UserID;AssociationForeignKey:Refer"`
}
if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
if relation.Relationship.Kind != "has_one" ||
!reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserID"}) ||
!reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) {
t.Errorf("Override belongs to foreign key with tag")
}
}
}
func TestHasMany(t *testing.T) {
post := Post{
Title: "post has many",
Body: "body has many",
Comments: []*Comment{{Content: "Comment 1"}, {Content: "Comment 2"}},
}
if err := DB.Save(&post).Error; err != nil {
t.Error("Got errors when save post", err)
}
for _, comment := range post.Comments {
if comment.PostId == 0 {
t.Errorf("comment's PostID should be updated")
}
}
var compareComments = func(comments []Comment, contents []string) bool {
var commentContents []string
for _, comment := range comments {
commentContents = append(commentContents, comment.Content)
}
sort.Strings(commentContents)
sort.Strings(contents)
return reflect.DeepEqual(commentContents, contents)
}
// Query
if DB.First(&Comment{}, "content = ?", "Comment 1").Error != nil {
t.Errorf("Comment 1 should be saved")
}
var comments1 []Comment
DB.Model(&post).Association("Comments").Find(&comments1)
if !compareComments(comments1, []string{"Comment 1", "Comment 2"}) {
t.Errorf("Query has many relations with Association")
}
var comments11 []Comment
DB.Model(&post).Related(&comments11)
if !compareComments(comments11, []string{"Comment 1", "Comment 2"}) {
t.Errorf("Query has many relations with Related")
}
if DB.Model(&post).Association("Comments").Count() != 2 {
t.Errorf("Post's comments count should be 2")
}
// Append
DB.Model(&post).Association("Comments").Append(&Comment{Content: "Comment 3"})
var comments2 []Comment
DB.Model(&post).Related(&comments2)
if !compareComments(comments2, []string{"Comment 1", "Comment 2", "Comment 3"}) {
t.Errorf("Append new record to has many relations")
}
if DB.Model(&post).Association("Comments").Count() != 3 {
t.Errorf("Post's comments count should be 3 after Append")
}
// Delete
DB.Model(&post).Association("Comments").Delete(comments11)
var comments3 []Comment
DB.Model(&post).Related(&comments3)
if !compareComments(comments3, []string{"Comment 3"}) {
t.Errorf("Delete an existing resource for has many relations")
}
if DB.Model(&post).Association("Comments").Count() != 1 {
t.Errorf("Post's comments count should be 1 after Delete 2")
}
// Replace
DB.Model(&Post{Id: 999}).Association("Comments").Replace()
var comments4 []Comment
DB.Model(&post).Related(&comments4)
if len(comments4) == 0 {
t.Errorf("Replace for other resource should not clear all comments")
}
DB.Model(&post).Association("Comments").Replace(&Comment{Content: "Comment 4"}, &Comment{Content: "Comment 5"})
var comments41 []Comment
DB.Model(&post).Related(&comments41)
if !compareComments(comments41, []string{"Comment 4", "Comment 5"}) {
t.Errorf("Replace has many relations")
}
// Clear
DB.Model(&Post{Id: 999}).Association("Comments").Clear()
var comments5 []Comment
DB.Model(&post).Related(&comments5)
if len(comments5) == 0 {
t.Errorf("Clear should not clear all comments")
}
DB.Model(&post).Association("Comments").Clear()
var comments51 []Comment
DB.Model(&post).Related(&comments51)
if len(comments51) != 0 {
t.Errorf("Clear has many relations")
}
// Check Association mode with soft delete
var comment6 = Comment{
Content: "comment 6",
}
DB.Model(&post).Association("Comments").Append(&comment6)
if count := DB.Model(&post).Association("Comments").Count(); count != 1 {
t.Errorf("post's comments count should be 1 after Append, but got %v", count)
}
DB.Delete(&comment6)
if count := DB.Model(&post).Association("Comments").Count(); count != 0 {
t.Errorf("post's comments count should be 0 after comment been deleted, but got %v", count)
}
var comments6 []Comment
if DB.Model(&post).Association("Comments").Find(&comments6); len(comments6) != 0 {
t.Errorf("post's comments count should be 0 when find with Find, but got %v", len(comments6))
}
if count := DB.Unscoped().Model(&post).Association("Comments").Count(); count != 1 {
t.Errorf("post's comments count should be 1 when query with Unscoped, but got %v", count)
}
var comments61 []Comment
if DB.Unscoped().Model(&post).Association("Comments").Find(&comments61); len(comments61) != 1 {
t.Errorf("post's comments count should be 1 when query with Unscoped, but got %v", len(comments61))
}
}
func TestHasManyOverrideForeignKey1(t *testing.T) {
type Profile struct {
orm.Model
Name string
UserRefer uint
}
type User struct {
orm.Model
Profile []Profile `orm:"ForeignKey:UserRefer"`
}
if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
if relation.Relationship.Kind != "has_many" ||
!reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserRefer"}) ||
!reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) {
t.Errorf("Override belongs to foreign key with tag")
}
}
}
func TestHasManyOverrideForeignKey2(t *testing.T) {
type Profile struct {
orm.Model
Name string
UserID uint
}
type User struct {
orm.Model
Refer string
Profile []Profile `orm:"ForeignKey:UserID;AssociationForeignKey:Refer"`
}
if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok {
if relation.Relationship.Kind != "has_many" ||
!reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserID"}) ||
!reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) {
t.Errorf("Override belongs to foreign key with tag")
}
}
}
func TestManyToMany(t *testing.T) {
DB.Raw("delete from languages")
var languages = []Language{{Name: "ZH"}, {Name: "EN"}}
user := User{Name: "Many2Many", Languages: languages}
DB.Save(&user)
// Query
var newLanguages []Language
DB.Model(&user).Related(&newLanguages, "Languages")
if len(newLanguages) != len([]string{"ZH", "EN"}) {
t.Errorf("Query many to many relations")
}
DB.Model(&user).Association("Languages").Find(&newLanguages)
if len(newLanguages) != len([]string{"ZH", "EN"}) {
t.Errorf("Should be able to find many to many relations")
}
if DB.Model(&user).Association("Languages").Count() != len([]string{"ZH", "EN"}) {
t.Errorf("Count should return correct result")
}
// Append
DB.Model(&user).Association("Languages").Append(&Language{Name: "DE"})
if DB.Where("name = ?", "DE").First(&Language{}).RecordNotFound() {
t.Errorf("New record should be saved when append")
}
languageA := Language{Name: "AA"}
DB.Save(&languageA)
DB.Model(&User{Id: user.Id}).Association("Languages").Append(&languageA)
languageC := Language{Name: "CC"}
DB.Save(&languageC)
DB.Model(&user).Association("Languages").Append(&[]Language{{Name: "BB"}, languageC})
DB.Model(&User{Id: user.Id}).Association("Languages").Append(&[]Language{{Name: "DD"}, {Name: "EE"}})
totalLanguages := []string{"ZH", "EN", "DE", "AA", "BB", "CC", "DD", "EE"}
if DB.Model(&user).Association("Languages").Count() != len(totalLanguages) {
t.Errorf("All appended languages should be saved")
}
// Delete
user.Languages = []Language{}
DB.Model(&user).Association("Languages").Find(&user.Languages)
var language Language
DB.Where("name = ?", "EE").First(&language)
DB.Model(&user).Association("Languages").Delete(language, &language)
if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-1 || len(user.Languages) != len(totalLanguages)-1 {
t.Errorf("Relations should be deleted with Delete")
}
if DB.Where("name = ?", "EE").First(&Language{}).RecordNotFound() {
t.Errorf("Language EE should not be deleted")
}
DB.Where("name IN (?)", []string{"CC", "DD"}).Find(&languages)
user2 := User{Name: "Many2Many_User2", Languages: languages}
DB.Save(&user2)
DB.Model(&user).Association("Languages").Delete(languages, &languages)
if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-3 || len(user.Languages) != len(totalLanguages)-3 {
t.Errorf("Relations should be deleted with Delete")
}
if DB.Model(&user2).Association("Languages").Count() == 0 {
t.Errorf("Other user's relations should not be deleted")
}
// Replace
var languageB Language
DB.Where("name = ?", "BB").First(&languageB)
DB.Model(&user).Association("Languages").Replace(languageB)
if len(user.Languages) != 1 || DB.Model(&user).Association("Languages").Count() != 1 {
t.Errorf("Relations should be replaced")
}
DB.Model(&user).Association("Languages").Replace()
if len(user.Languages) != 0 || DB.Model(&user).Association("Languages").Count() != 0 {
t.Errorf("Relations should be replaced with empty")
}
DB.Model(&user).Association("Languages").Replace(&[]Language{{Name: "FF"}, {Name: "JJ"}})
if len(user.Languages) != 2 || DB.Model(&user).Association("Languages").Count() != len([]string{"FF", "JJ"}) {
t.Errorf("Relations should be replaced")
}
// Clear
DB.Model(&user).Association("Languages").Clear()
if len(user.Languages) != 0 || DB.Model(&user).Association("Languages").Count() != 0 {
t.Errorf("Relations should be cleared")
}
// Check Association mode with soft delete
var language6 = Language{
Name: "language 6",
}
DB.Model(&user).Association("Languages").Append(&language6)
if count := DB.Model(&user).Association("Languages").Count(); count != 1 {
t.Errorf("user's languages count should be 1 after Append, but got %v", count)
}
DB.Delete(&language6)
if count := DB.Model(&user).Association("Languages").Count(); count != 0 {
t.Errorf("user's languages count should be 0 after language been deleted, but got %v", count)
}
var languages6 []Language
if DB.Model(&user).Association("Languages").Find(&languages6); len(languages6) != 0 {
t.Errorf("user's languages count should be 0 when find with Find, but got %v", len(languages6))
}
if count := DB.Unscoped().Model(&user).Association("Languages").Count(); count != 1 {
t.Errorf("user's languages count should be 1 when query with Unscoped, but got %v", count)
}
var languages61 []Language
if DB.Unscoped().Model(&user).Association("Languages").Find(&languages61); len(languages61) != 1 {
t.Errorf("user's languages count should be 1 when query with Unscoped, but got %v", len(languages61))
}
}
func TestRelated(t *testing.T) {
user := User{
Name: "jinzhu",
BillingAddress: Address{Address1: "Billing Address - Address 1"},
ShippingAddress: Address{Address1: "Shipping Address - Address 1"},
Emails: []Email{{Email: "[email protected]"}, {Email: "jinzhu-2@[email protected]"}},
CreditCard: CreditCard{Number: "1234567890"},
Company: Company{Name: "company1"},
}
if err := DB.Save(&user).Error; err != nil {
t.Errorf("No error should happen when saving user")
}
if user.CreditCard.ID == 0 {
t.Errorf("After user save, credit card should have id")
}
if user.BillingAddress.ID == 0 {
t.Errorf("After user save, billing address should have id")
}
if user.Emails[0].Id == 0 {
t.Errorf("After user save, billing address should have id")
}
var emails []Email
DB.Model(&user).Related(&emails)
if len(emails) != 2 {
t.Errorf("Should have two emails")
}
var emails2 []Email
DB.Model(&user).Where("email = ?", "[email protected]").Related(&emails2)
if len(emails2) != 1 {
t.Errorf("Should have two emails")
}
var emails3 []*Email
DB.Model(&user).Related(&emails3)
if len(emails3) != 2 {
t.Errorf("Should have two emails")
}
var user1 User
DB.Model(&user).Related(&user1.Emails)
if len(user1.Emails) != 2 {
t.Errorf("Should have only one email match related condition")
}
var address1 Address
DB.Model(&user).Related(&address1, "BillingAddressId")
if address1.Address1 != "Billing Address - Address 1" {
t.Errorf("Should get billing address from user correctly")
}
user1 = User{}
DB.Model(&address1).Related(&user1, "BillingAddressId")
if DB.NewRecord(user1) {
t.Errorf("Should get user from address correctly")
}
var user2 User
DB.Model(&emails[0]).Related(&user2)
if user2.Id != user.Id || user2.Name != user.Name {
t.Errorf("Should get user from email correctly")
}
var creditcard CreditCard
var user3 User
DB.First(&creditcard, "number = ?", "1234567890")
DB.Model(&creditcard).Related(&user3)
if user3.Id != user.Id || user3.Name != user.Name {
t.Errorf("Should get user from credit card correctly")
}
if !DB.Model(&CreditCard{}).Related(&User{}).RecordNotFound() {
t.Errorf("RecordNotFound for Related")
}
var company Company
if DB.Model(&user).Related(&company, "Company").RecordNotFound() || company.Name != "company1" {
t.Errorf("RecordNotFound for Related")
}
}
func TestForeignKey(t *testing.T) {
for _, structField := range DB.NewScope(&User{}).GetStructFields() {
for _, foreignKey := range []string{"BillingAddressID", "ShippingAddressId", "CompanyID"} {
if structField.Name == foreignKey && !structField.IsForeignKey {
t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey))
}
}
}
for _, structField := range DB.NewScope(&Email{}).GetStructFields() {
for _, foreignKey := range []string{"UserId"} {
if structField.Name == foreignKey && !structField.IsForeignKey {
t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey))
}
}
}
for _, structField := range DB.NewScope(&Post{}).GetStructFields() {
for _, foreignKey := range []string{"CategoryId", "MainCategoryId"} {
if structField.Name == foreignKey && !structField.IsForeignKey {
t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey))
}
}
}
for _, structField := range DB.NewScope(&Comment{}).GetStructFields() {
for _, foreignKey := range []string{"PostId"} {
if structField.Name == foreignKey && !structField.IsForeignKey {
t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey))
}
}
}
}
func testForeignKey(t *testing.T, source interface{}, sourceFieldName string, target interface{}, targetFieldName string) {
if dialect := os.Getenv("ORM_DIALECT"); dialect == "" || dialect == "sqlite" {
// sqlite does not support ADD CONSTRAINT in ALTER TABLE
return
}
targetScope := DB.NewScope(target)
targetTableName := targetScope.TableName()
modelScope := DB.NewScope(source)
modelField, ok := modelScope.FieldByName(sourceFieldName)
if !ok {
t.Fatalf(fmt.Sprintf("Failed to get field by name: %v", sourceFieldName))
}
targetField, ok := targetScope.FieldByName(targetFieldName)
if !ok {
t.Fatalf(fmt.Sprintf("Failed to get field by name: %v", targetFieldName))
}
dest := fmt.Sprintf("%v(%v)", targetTableName, targetField.DBName)
err := DB.Model(source).AddForeignKey(modelField.DBName, dest, "CASCADE", "CASCADE").Error
if err != nil {
t.Fatalf(fmt.Sprintf("Failed to create foreign key: %v", err))
}
}
func TestLongForeignKey(t *testing.T) {
testForeignKey(t, &NotSoLongTableName{}, "ReallyLongThingID", &ReallyLongTableNameToTestMySQLNameLengthLimit{}, "ID")
}
func TestLongForeignKeyWithShortDest(t *testing.T) {
testForeignKey(t, &ReallyLongThingThatReferencesShort{}, "ShortID", &Short{}, "ID")
}
func TestHasManyChildrenWithOneStruct(t *testing.T) {
category := Category{
Name: "main",
Categories: []Category{
{Name: "sub1"},
{Name: "sub2"},
},
}
DB.Save(&category)
}
func TestAutoSaveBelongsToAssociation(t *testing.T) {
type Company struct {
orm.Model
Name string
}
type User struct {
orm.Model
Name string
CompanyID uint
Company Company `orm:"association_autoupdate:false;association_autocreate:false;"`
}
DB.Where("name = ?", "auto_save_association").Delete(&Company{})
DB.AutoMigrate(&Company{}, &User{})
DB.Save(&User{Name: "jinzhu", Company: Company{Name: "auto_save_association"}})
if !DB.Where("name = ?", "auto_save_association").First(&Company{}).RecordNotFound() {
t.Errorf("Company auto_save_association should not have been saved when autosave is false")
}
// if foreign key is set, this should be saved even if association isn't
company := Company{Name: "auto_save_association"}
DB.Save(&company)
company.Name = "auto_save_association_new_name"
user := User{Name: "jinzhu", Company: company}
DB.Save(&user)
if !DB.Where("name = ?", "auto_save_association_new_name").First(&Company{}).RecordNotFound() {
t.Errorf("Company should not have been updated")
}
if DB.Where("id = ? AND company_id = ?", user.ID, company.ID).First(&User{}).RecordNotFound() {
t.Errorf("User's foreign key should have been saved")
}
user2 := User{Name: "jinzhu_2", Company: Company{Name: "auto_save_association_2"}}
DB.Set("orm:association_autocreate", true).Save(&user2)
if DB.Where("name = ?", "auto_save_association_2").First(&Company{}).RecordNotFound() {
t.Errorf("Company auto_save_association_2 should been created when autocreate is true")
}
user2.Company.Name = "auto_save_association_2_newname"
DB.Set("orm:association_autoupdate", true).Save(&user2)
if DB.Where("name = ?", "auto_save_association_2_newname").First(&Company{}).RecordNotFound() {
t.Errorf("Company should been updated")
}
}
func TestAutoSaveHasOneAssociation(t *testing.T) {
type Company struct {
orm.Model
UserID uint
Name string
}
type User struct {
orm.Model
Name string
Company Company `orm:"association_autoupdate:false;association_autocreate:false;"`
}
DB.Where("name = ?", "auto_save_has_one_association").Delete(&Company{})
DB.AutoMigrate(&Company{}, &User{})
DB.Save(&User{Name: "jinzhu", Company: Company{Name: "auto_save_has_one_association"}})
if !DB.Where("name = ?", "auto_save_has_one_association").First(&Company{}).RecordNotFound() {
t.Errorf("Company auto_save_has_one_association should not have been saved when autosave is false")
}
company := Company{Name: "auto_save_has_one_association"}
DB.Save(&company)
company.Name = "auto_save_has_one_association_new_name"
user := User{Name: "jinzhu", Company: company}
DB.Save(&user)
if !DB.Where("name = ?", "auto_save_has_one_association_new_name").First(&Company{}).RecordNotFound() {
t.Errorf("Company should not have been updated")
}
if !DB.Where("name = ? AND user_id = ?", "auto_save_has_one_association", user.ID).First(&Company{}).RecordNotFound() {
t.Errorf("Company should not have been updated")
}
if user.Company.UserID == 0 {
t.Errorf("UserID should be assigned")
}
company.Name = "auto_save_has_one_association_2_new_name"
DB.Set("orm:association_autoupdate", true).Save(&user)
if DB.Where("name = ? AND user_id = ?", "auto_save_has_one_association_new_name", user.ID).First(&Company{}).RecordNotFound() {
t.Errorf("Company should been updated")
}
user2 := User{Name: "jinzhu_2", Company: Company{Name: "auto_save_has_one_association_2"}}
DB.Set("orm:association_autocreate", true).Save(&user2)
if DB.Where("name = ?", "auto_save_has_one_association_2").First(&Company{}).RecordNotFound() {
t.Errorf("Company auto_save_has_one_association_2 should been created when autocreate is true")
}
}
func TestAutoSaveMany2ManyAssociation(t *testing.T) {
type Company struct {
orm.Model
Name string
}
type User struct {
orm.Model
Name string
Companies []Company `orm:"many2many:user_companies;association_autoupdate:false;association_autocreate:false;"`
}
DB.AutoMigrate(&Company{}, &User{})
DB.Save(&User{Name: "jinzhu", Companies: []Company{{Name: "auto_save_m2m_association"}}})
if !DB.Where("name = ?", "auto_save_m2m_association").First(&Company{}).RecordNotFound() {
t.Errorf("Company auto_save_m2m_association should not have been saved when autosave is false")
}
company := Company{Name: "auto_save_m2m_association"}
DB.Save(&company)
company.Name = "auto_save_m2m_association_new_name"
user := User{Name: "jinzhu", Companies: []Company{company, {Name: "auto_save_m2m_association_new_name_2"}}}
DB.Save(&user)
if !DB.Where("name = ?", "auto_save_m2m_association_new_name").First(&Company{}).RecordNotFound() {
t.Errorf("Company should not have been updated")
}
if !DB.Where("name = ?", "auto_save_m2m_association_new_name_2").First(&Company{}).RecordNotFound() {
t.Errorf("Company should not been created")
}
if DB.Model(&user).Association("Companies").Count() != 1 {
t.Errorf("Relationship should been saved")
}
DB.Set("orm:association_autoupdate", true).Set("orm:association_autocreate", true).Save(&user)
if DB.Where("name = ?", "auto_save_m2m_association_new_name").First(&Company{}).RecordNotFound() {
t.Errorf("Company should been updated")
}
if DB.Where("name = ?", "auto_save_m2m_association_new_name_2").First(&Company{}).RecordNotFound() {
t.Errorf("Company should been created")
}
if DB.Model(&user).Association("Companies").Count() != 2 {
t.Errorf("Relationship should been updated")
}
}
| [
"\"ORM_DIALECT\""
]
| []
| [
"ORM_DIALECT"
]
| [] | ["ORM_DIALECT"] | go | 1 | 0 | |
src/handlers/image.go | package handlers
import (
"errors"
"log"
"net/http"
"os"
"regexp"
ua "github.com/mileusna/useragent"
"image.it-lab.su/models"
)
const bufferSize = 64
func ImageHandler(writer http.ResponseWriter, request *http.Request) {
defer func() {
if err := recover(); err != nil {
log.Println(err)
writer.WriteHeader(http.StatusNotFound)
writer.Write([]byte("Error while image handling"))
}
}()
rePath := regexp.MustCompile(`/images/(.+)$`)
regexpPathResult := rePath.FindStringSubmatch(request.URL.String())
reWebp := regexp.MustCompile(`(?i)image/(webp)`)
regexpWebpResult := reWebp.FindStringSubmatch(request.Header.Get("Accept"))
userAgent := ua.Parse(request.Header.Get("User-Agent"))
webpSupported := os.Getenv("APP_ALLOW_WEBP") == "1" && !userAgent.IsSafari() && len(regexpWebpResult) > 1
if len(regexpPathResult) < 2 {
panic(errors.New("WrongURL"))
}
image := models.LoadImage(regexpPathResult[1], webpSupported)
imagePath := image.Parser.GetCachePath()
if _, err := os.Stat(imagePath); os.IsNotExist(err) {
var cacheErr error
imagePath, cacheErr = image.MakeCachedImage()
if cacheErr != nil {
log.Fatal("Failed to write cache: ", cacheErr.Error())
panic(cacheErr)
}
}
if image.Parser.AllowedWebp {
writer.Header().Add("Content-Type", "image/webp")
} else {
writer.Header().Add("Content-Type", "image/jpeg")
}
cachedFile, _ := os.Open(imagePath)
buf := make([]byte, bufferSize)
reads, err := cachedFile.Read(buf)
for reads > 0 && err == nil {
if _, err := writer.Write(buf); err != nil {
log.Fatal("Failed to write to response. Error: ", err.Error())
}
reads, err = cachedFile.Read(buf)
}
}
| [
"\"APP_ALLOW_WEBP\""
]
| []
| [
"APP_ALLOW_WEBP"
]
| [] | ["APP_ALLOW_WEBP"] | go | 1 | 0 | |
pdm/termui.py | from __future__ import annotations
import atexit
import contextlib
import functools
import io
import logging
import os
import sys
from itertools import zip_longest
from tempfile import mktemp
from typing import Any, Callable, Iterator, List, Optional, Sequence, Union
import click
from click._compat import strip_ansi
from pdm._vendor import halo
from pdm._vendor.log_symbols.symbols import is_supported as supports_unicode
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.NullHandler())
def ljust(text: str, length: int) -> str:
"""Like str.ljust() but ignore all ANSI controlling characters."""
return text + " " * (length - len(strip_ansi(text)))
def rjust(text: str, length: int) -> str:
"""Like str.rjust() but ignore all ANSI controlling characters."""
return " " * (length - len(strip_ansi(text))) + text
def centerize(text: str, length: int) -> str:
"""Centerize the text while ignoring ANSI controlling characters."""
space_num = length - len(strip_ansi(text))
left_space = space_num // 2
return " " * left_space + text + " " * (space_num - left_space)
def supports_ansi() -> bool:
"""Check if the current environment supports ANSI colors"""
if os.getenv("CI"):
return False
stream = sys.stdout
if not hasattr(stream, "fileno"):
return False
try:
return os.isatty(stream.fileno()) # type: ignore
except io.UnsupportedOperation:
return False
# Export some style shortcut helpers
green = functools.partial(click.style, fg="green")
red = functools.partial(click.style, fg="red")
yellow = functools.partial(click.style, fg="yellow")
cyan = functools.partial(click.style, fg="cyan")
blue = functools.partial(click.style, fg="blue")
bold = functools.partial(click.style, bold=True)
# Verbosity levels
NORMAL = 0
DETAIL = 1
DEBUG = 2
class DummySpinner:
"""A dummy spinner class implementing needed interfaces.
But only display text onto screen.
"""
def start(self, text: str) -> None:
click.echo(text)
def stop_and_persist(self, symbol: str = " ", text: Optional[str] = None) -> None:
click.echo(symbol + " " + (text or ""))
succeed = fail = start
text = property(lambda self: "", start)
def __enter__(self) -> DummySpinner:
return self
def __exit__(self, *args: Any) -> None:
pass
class UI:
"""Terminal UI object"""
def __init__(self, verbosity: int = NORMAL, no_ansi: Optional[bool] = None) -> None:
self.verbosity = verbosity
self._indent = ""
self.supports_ansi = not no_ansi if no_ansi is not None else supports_ansi()
def set_verbosity(self, verbosity: int) -> None:
self.verbosity = verbosity
def echo(
self,
message: str = "",
err: bool = False,
verbosity: int = NORMAL,
**kwargs: Any,
) -> None:
if self.verbosity >= verbosity:
click.secho(
self._indent + str(message), err=err, color=self.supports_ansi, **kwargs
)
def display_columns(
self, rows: Sequence[Sequence[str]], header: Optional[List[str]] = None
) -> None:
"""Print rows in aligned columns.
:param rows: a rows of data to be displayed.
:param header: a list of header strings.
"""
def get_aligner(align: str) -> Callable:
if align == ">":
return rjust
if align == "^":
return centerize
else:
return ljust
sizes = list(
map(
lambda column: max(map(lambda x: len(strip_ansi(x)), column)),
zip_longest(header or [], *rows, fillvalue=""),
)
)
aligners = [ljust] * len(sizes)
if header:
aligners = []
for i, head in enumerate(header):
aligners.append(get_aligner(head[0]))
if head[0] in (">", "^", "<"):
header[i] = head[1:]
self.echo(
" ".join(
aligner(head, size)
for aligner, head, size in zip(aligners, header, sizes)
)
)
# Print a separator
self.echo(" ".join("-" * size for size in sizes))
for row in rows:
self.echo(
" ".join(
aligner(item, size)
for aligner, item, size in zip(aligners, row, sizes)
)
)
@contextlib.contextmanager
def indent(self, prefix: str) -> Iterator[None]:
"""Indent the following lines with a prefix."""
_indent = self._indent
self._indent += prefix
yield
self._indent = _indent
@contextlib.contextmanager
def logging(self, type_: str = "install") -> Iterator[logging.Logger]:
"""A context manager that opens a file for logging when verbosity is NORMAL or
print to the stdout otherwise.
"""
file_name = mktemp(".log", f"pdm-{type_}-")
if self.verbosity >= DETAIL:
handler = logging.StreamHandler()
else:
handler = logging.FileHandler(file_name, encoding="utf-8")
handler.setLevel(logging.DEBUG)
logger.handlers[1:] = [handler]
pip_logger = logging.getLogger("pip.subprocessor")
pip_logger.handlers[:] = [handler]
def cleanup() -> None:
try:
os.unlink(file_name)
except OSError:
pass
try:
yield logger
except Exception:
if self.verbosity < DETAIL:
logger.exception("Error occurs")
self.echo(yellow(f"See {file_name} for detailed debug log."))
raise
else:
atexit.register(cleanup)
finally:
logger.handlers.remove(handler)
pip_logger.handlers.remove(handler)
def open_spinner(
self, title: str, spinner: str = "dots"
) -> Union[DummySpinner, halo.Halo]:
"""Open a spinner as a context manager."""
if self.verbosity >= DETAIL or not self.supports_ansi:
return DummySpinner()
else:
return halo.Halo( # type: ignore
title, spinner=spinner, indent=self._indent
)
class Emoji:
"""A collection of emoji characters used in terminal output"""
if supports_unicode(): # type: ignore
SUCC = "🎉"
LOCK = "🔒"
else:
SUCC = ""
LOCK = ""
| []
| []
| [
"CI"
]
| [] | ["CI"] | python | 1 | 0 | |
capsule/capsule_layer.py |
import tensorflow as tf
from capsule.utils import squash
layers = tf.keras.layers
models = tf.keras.models
class Capsule(tf.keras.Model):
def __init__(self, in_capsules, in_dim, out_capsules, out_dim, stdev=0.2, routing_iterations=2, use_bias=True, name=''):
super(Capsule, self).__init__(name=name)
self.in_capsules = in_capsules
self.in_dim = in_dim
self.out_capsules = out_capsules
self.out_dim = out_dim
self.routing_iterations = routing_iterations
self.use_bias = use_bias
with tf.name_scope(self.name):
w_init = tf.random_normal_initializer(stddev=0.2)
self.W = tf.Variable(name="W", initial_value=w_init(shape=(1, out_capsules, in_capsules, out_dim, in_dim),
dtype='float32'),
trainable=True)
if self.use_bias:
bias_init = tf.constant_initializer(0.1)
self.bias = tf.Variable(name="bias", initial_value=bias_init(shape=(1, out_capsules, out_dim),
dtype='float32'),
trainable=True)
def call(self, u):
batch_size = tf.shape(u)[0]
u = tf.expand_dims(u, 1)
u = tf.expand_dims(u, 3)
u = tf.tile(u, [1, self.out_capsules, 1, 1, 1])
u = tf.tile(u, [1, 1, 1, self.out_dim, 1])
w = tf.tile(self.W, [batch_size, 1, 1, 1, 1])
u_hat = tf.reduce_sum(u * w, axis=-1)
bias = tf.tile(self.bias, [batch_size, 1, 1]) if self.use_bias else 0.0
b_ij = tf.zeros(shape=[batch_size, self.out_capsules, self.in_capsules, 1])
for r in range(self.routing_iterations):
c_ij = tf.nn.softmax(b_ij, axis=1)
c_ij_tiled = tf.tile(c_ij, [1, 1, 1, self.out_dim])
s_j = tf.reduce_sum(c_ij_tiled * u_hat, axis=2) + bias
v_j = squash(s_j)
if(r < self.routing_iterations - 1):
v_j = tf.expand_dims(v_j, 2)
v_j = tf.tile(v_j, [1, 1, self.in_capsules, 1])
u_x_v = tf.reduce_sum(v_j * u_hat, axis=-1)
b_ij = b_ij + tf.expand_dims(u_x_v, axis=-1)
return v_j
| []
| []
| []
| [] | [] | python | null | null | null |
t00ls.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import requests
import logging
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s - %(filename)s [line:%(lineno)d] - %(levelname)s: %(message)s')
# usage:
logging.debug('this is a loggging debug message')
logging.warning('this is loggging a warning message')
logging.error('this is an loggging error message')
logging.critical('this is a loggging critical message')
username = os.environ['USERNAME'] # 帐号
password = os.environ['PASSWORD'] # 密码MD5 32位(小写)
question_num = os.environ['QUESTION'] # 安全提问 参考下面
question_answer = os.environ['ANSWER'] # 安全提问答案
# 0 = 没有安全提问
# 1 = 母亲的名字
# 2 = 爷爷的名字
# 3 = 父亲出生的城市
# 4 = 您其中一位老师的名字
# 5 = 您个人计算机的型号
# 6 = 您最喜欢的餐馆名称
# 7 = 驾驶执照的最后四位数字
def t00ls_login(u_name, u_pass, q_num, q_ans):
"""
t00ls 登录函数
:param u_name: 用户名
:param u_pass: 密码的 md5 值 32 位小写
:param q_num: 安全提问类型
:param q_ans: 安全提问答案
:return: 签到要用的 hash 和 登录后的 Cookies
"""
login_data = {
'action': 'login',
'username': u_name,
'password': u_pass,
'questionid': q_num,
'answer': q_ans
}
response_login = requests.post('https://www.t00ls.cc/login.json', data=login_data)
response_login_json = json.loads(response_login.text)
if response_login_json['status'] != 'success':
return None
else:
logging.warning("用户: {0} 登入成功!".format(username))
formhash = response_login_json['formhash']
t00ls_cookies = response_login.cookies
return formhash, t00ls_cookies
def t00ls_sign(t00ls_hash, t00ls_cookies):
"""
t00ls 签到函数
:param t00ls_hash: 签到要用的 hash
:param t00ls_cookies: 登录后的 Cookies
:return: 签到后的 JSON 数据
"""
sign_data = {
'formhash': t00ls_hash,
'signsubmit': "true"
}
response_sign = requests.post('https://www.t00ls.cc/ajax-sign.json', data=sign_data, cookies=t00ls_cookies)
return json.loads(response_sign.text)
def main():
response_login = t00ls_login(username, password, question_num, question_answer)
if response_login:
response_sign = t00ls_sign(response_login[0], response_login[1])
if response_sign['status'] == 'success':
logging.warning("签到成功")
elif response_sign['message'] == 'alreadysign':
logging.warning("今日已签到")
else:
logging.error("出现玄学问题了,签到失败")
sys.exit(1)
else:
logging.error("登录失败,请检查输入资料是否正确")
sys.exit(1)
if __name__ == '__main__':
main()
| []
| []
| [
"QUESTION",
"USERNAME",
"PASSWORD",
"ANSWER"
]
| [] | ["QUESTION", "USERNAME", "PASSWORD", "ANSWER"] | python | 4 | 0 | |
test/tests_for_sdkmr/ee2_SDKMethodRunner_test_EE2Runjob_test.py | # -*- coding: utf-8 -*-
import copy
import logging
import os
import unittest
from configparser import ConfigParser
from unittest.mock import patch
import requests_mock
from mock import MagicMock
from lib.execution_engine2.db.MongoUtil import MongoUtil
from lib.execution_engine2.db.models.models import Job
from lib.execution_engine2.sdk.SDKMethodRunner import SDKMethodRunner
from lib.execution_engine2.utils.CondorTuples import SubmissionInfo, CondorResources
from test.utils_shared.test_utils import (
bootstrap,
get_example_job,
run_job_adapter,
get_example_job_as_dict,
)
from tests_for_db.mongo_test_helper import MongoTestHelper
logging.basicConfig(level=logging.INFO)
bootstrap()
from test.tests_for_sdkmr.ee2_SDKMethodRunner_test_utils import ee2_sdkmr_test_helper
class ee2_SDKMethodRunner_test(unittest.TestCase):
@classmethod
def setUpClass(cls):
config_file = os.environ.get("KB_DEPLOYMENT_CONFIG", "test/deploy.cfg")
logging.info(f"Loading config from {config_file}")
config_parser = ConfigParser()
config_parser.read(config_file)
cls.cfg = {}
for nameval in config_parser.items("execution_engine2"):
cls.cfg[nameval[0]] = nameval[1]
mongo_in_docker = cls.cfg.get("mongo-in-docker-compose", None)
if mongo_in_docker is not None:
cls.cfg["mongo-host"] = cls.cfg["mongo-in-docker-compose"]
cls.user_id = "wsadmin"
cls.ws_id = 9999
cls.token = "token"
cls.method_runner = SDKMethodRunner(
cls.cfg, user_id=cls.user_id, token=cls.token
)
cls.mongo_util = MongoUtil(cls.cfg)
cls.mongo_helper = MongoTestHelper(cls.cfg)
cls.test_collection = cls.mongo_helper.create_test_db(
db=cls.cfg["mongo-database"], col=cls.cfg["mongo-jobs-collection"]
)
cls.cr = CondorResources(
request_cpus="1",
request_disk="1GB",
request_memory="100M",
client_group="njs",
)
cls.sdkmr_test_helper = ee2_sdkmr_test_helper(mr=cls.method_runner)
def getRunner(self) -> SDKMethodRunner:
# Initialize these clients from None
runner = copy.copy(self.__class__.method_runner) # type : SDKMethodRunner
runner.get_jobs_status()
runner.get_runjob()
runner.get_job_logs()
return runner
def create_job_rec(self):
return self.sdkmr_test_helper.create_job_rec()
def test_init_ok(self):
class_attri = ["config", "catalog_utils", "workspace", "mongo_util", "condor"]
runner = self.getRunner()
self.assertTrue(set(class_attri) <= set(runner.__dict__.keys()))
def test_init_job_rec(self):
with self.mongo_util.mongo_engine_connection():
ori_job_count = Job.objects.count()
runner = self.getRunner()
job_params = {
"wsid": self.ws_id,
"method": "MEGAHIT.run_megahit",
"app_id": "MEGAHIT/run_megahit",
"service_ver": "2.2.1",
"params": [
{
"workspace_name": "wjriehl:1475006266615",
"read_library_refs": ["18836/5/1"],
"output_contigset_name": "rhodo_contigs",
"recipe": "auto",
"assembler": None,
"pipeline": None,
"min_contig_len": None,
}
],
"source_ws_objects": ["a/b/c", "e/d"],
"parent_job_id": "9998",
"meta": {"tag": "dev", "token_id": "12345"},
}
job_id = runner.get_runjob()._init_job_rec(self.user_id, job_params)
self.assertEqual(ori_job_count, Job.objects.count() - 1)
job = Job.objects.get(id=job_id)
self.assertEqual(job.user, self.user_id)
self.assertEqual(job.authstrat, "kbaseworkspace")
self.assertEqual(job.wsid, self.ws_id)
job_input = job.job_input
self.assertEqual(job_input.wsid, self.ws_id)
self.assertEqual(job_input.method, "MEGAHIT.run_megahit")
self.assertEqual(job_input.app_id, "MEGAHIT/run_megahit")
# TODO this is an integration test
# self.assertEqual(job_input.service_ver, "2.2.1")
self.assertEqual(
job_input.service_ver, "048baf3c2b76cb923b3b4c52008ed77dbe20292d"
)
self.assertCountEqual(job_input.source_ws_objects, ["a/b/c", "e/d"])
self.assertEqual(job_input.parent_job_id, "9998")
narrative_cell_info = job_input.narrative_cell_info
self.assertEqual(narrative_cell_info.tag, "dev")
self.assertEqual(narrative_cell_info.token_id, "12345")
self.assertFalse(narrative_cell_info.status)
self.assertFalse(job.job_output)
self.mongo_util.get_job(job_id=job_id).delete()
self.assertEqual(ori_job_count, Job.objects.count())
def test_get_job_params(self):
with self.mongo_util.mongo_engine_connection():
ori_job_count = Job.objects.count()
job_id = self.create_job_rec()
self.assertEqual(ori_job_count, Job.objects.count() - 1)
runner = self.getRunner()
runner._test_job_permissions = MagicMock(return_value=True)
params = runner.get_job_params(job_id)
expected_params_keys = [
"wsid",
"method",
"params",
"service_ver",
"app_id",
"source_ws_objects",
"parent_job_id",
]
self.assertCountEqual(params.keys(), expected_params_keys)
self.assertEqual(params["wsid"], self.ws_id)
self.assertEqual(params["method"], "MEGAHIT.run_megahit")
self.assertEqual(params["app_id"], "MEGAHIT/run_megahit")
self.assertEqual(params["service_ver"], "2.2.1")
self.assertCountEqual(params["source_ws_objects"], ["a/b/c", "e/d"])
self.assertEqual(params["parent_job_id"], "9998")
self.mongo_util.get_job(job_id=job_id).delete()
self.assertEqual(ori_job_count, Job.objects.count())
def test_start_job(self):
with self.mongo_util.mongo_engine_connection():
ori_job_count = Job.objects.count()
job_id = self.create_job_rec()
self.assertEqual(ori_job_count, Job.objects.count() - 1)
job = self.mongo_util.get_job(job_id=job_id)
self.assertEqual(job.status, "created")
self.assertFalse(job.finished)
self.assertFalse(job.running)
self.assertFalse(job.estimating)
runner = self.getRunner()
runner._test_job_permissions = MagicMock(return_value=True)
# test missing job_id input
with self.assertRaises(ValueError) as context:
runner.start_job(None)
self.assertEqual("Please provide valid job_id", str(context.exception))
# start a created job, set job to estimation status
runner.start_job(job_id, skip_estimation=False)
job = self.mongo_util.get_job(job_id=job_id)
self.assertEqual(job.status, "estimating")
self.assertFalse(job.running)
self.assertTrue(job.estimating)
# start a estimating job, set job to running status
runner.start_job(job_id, skip_estimation=False)
job = self.mongo_util.get_job(job_id=job_id)
self.assertEqual(job.status, "running")
self.assertTrue(job.running)
self.assertTrue(job.estimating)
# test start a job with invalid status
with self.assertRaises(ValueError) as context:
runner.start_job(job_id)
self.assertIn("Unexpected job status", str(context.exception))
self.mongo_util.get_job(job_id=job_id).delete()
self.assertEqual(ori_job_count, Job.objects.count())
@requests_mock.Mocker()
@patch("lib.execution_engine2.utils.Condor.Condor", autospec=True)
def test_run_job(self, rq_mock, condor_mock):
rq_mock.add_matcher(
run_job_adapter(
ws_perms_info={"user_id": self.user_id, "ws_perms": {self.ws_id: "a"}}
)
)
runner = self.getRunner()
runner.get_condor = MagicMock(return_value=condor_mock)
job = get_example_job_as_dict(user=self.user_id, wsid=self.ws_id)
si = SubmissionInfo(clusterid="test", submit=job, error=None)
condor_mock.run_job = MagicMock(return_value=si)
condor_mock.extract_resources = MagicMock(return_value=self.cr)
job_id = runner.run_job(params=job)
print(f"Job id is {job_id} ")
@requests_mock.Mocker()
@patch("lib.execution_engine2.utils.Condor.Condor", autospec=True)
def test_run_job_batch(self, rq_mock, condor_mock):
"""
Test running batch jobs
"""
rq_mock.add_matcher(
run_job_adapter(
ws_perms_info={"user_id": self.user_id, "ws_perms": {self.ws_id: "a"}}
)
)
runner = self.getRunner()
runner.get_condor = MagicMock(return_value=condor_mock)
job = get_example_job_as_dict(user=self.user_id, wsid=self.ws_id)
si = SubmissionInfo(clusterid="test", submit=job, error=None)
condor_mock.run_job = MagicMock(return_value=si)
condor_mock.extract_resources = MagicMock(return_value=self.cr)
jobs = [job, job, job]
job_ids = runner.run_job_batch(params=jobs, batch_params={"wsid": self.ws_id})
assert "parent_job_id" in job_ids and isinstance(job_ids["parent_job_id"], str)
assert "child_job_ids" in job_ids and isinstance(job_ids["child_job_ids"], list)
assert len(job_ids["child_job_ids"]) == len(jobs)
# Test that you can't run a job in someone elses workspace
with self.assertRaises(PermissionError):
job_bad = get_example_job(user=self.user_id, wsid=1234).to_mongo().to_dict()
job_bad["method"] = job["job_input"]["app_id"]
job_bad["app_id"] = job["job_input"]["app_id"]
job_bad["service_ver"] = job["job_input"]["service_ver"]
jobs = [job, job_bad]
runner.run_job_batch(params=jobs, batch_params={"wsid": self.ws_id})
@requests_mock.Mocker()
@patch("lib.execution_engine2.utils.Condor.Condor", autospec=True)
def test_run_job_fail(self, rq_mock, condor_mock):
rq_mock.add_matcher(
run_job_adapter(
ws_perms_info={"user_id": self.user_id, "ws_perms": {self.ws_id: "a"}}
)
)
runner = self.getRunner()
job = get_example_job_as_dict(user=self.user_id, wsid=self.ws_id)
si = SubmissionInfo(clusterid="test", submit=job, error=None)
condor_mock.run_job = MagicMock(return_value=si)
condor_mock.extract_resources = MagicMock(return_value=self.cr)
with self.assertRaises(expected_exception=RuntimeError):
runner.run_job(params=job)
| []
| []
| [
"KB_DEPLOYMENT_CONFIG"
]
| [] | ["KB_DEPLOYMENT_CONFIG"] | python | 1 | 0 | |
commands/events/on_member_edit.py | ## -- IMPORTING -- ##
# MODULES
import disnake
import os
import random
import asyncio
import datetime
import certifi
from disnake.ext import commands
from disnake.errors import Forbidden, HTTPException
from disnake.ext.commands import errors
from pymongo import MongoClient
from extra.webhooks import Webhook
# FILES
import extra.config as config
## -- VARIABLES -- ##
mongo_login = os.environ.get("MONGO_LOGIN")
client = MongoClient(f"{mongo_login}",tlsCAFile=certifi.where())
db = client[config.database_collection]
server_data_col = db["server_data"]
muted_users_col = db["muted_users"]
user_data_col = db["user_data"]
## -- COG -- ##
class OnMemberEdit(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_edit(self, message):
query = {
"guild_id": str(message.guild.id)
}
data = {
"guild_id": str(message.guild.id),
"event_logs_webhook": "None"
}
update = { "set": { "member_edit_logs_webhook": "None" } }
server_data = server_data_col.find_one(query)
if not server_data:
server_data_col.insert_one(data)
await self.on_event(message)
return
webhook_url = server_data.get("member_edit_logs_webhook")
if webhook_url == "None" or not webhook_url:
if not webhook_url:
server_data_col.update_one(query, update)
return
return
webhook = Webhook(url=webhook_url, username="OutDash Logging", avatar_url=str(self.bot.user.avatar))
embed = disnake.Embed(description=f"", color=config.logs_embed_color)
embed.set_author(name=message.guild.name, icon_url=message.guild.icon or "https://cdn.discordapp.com/embed/avatars/1.png")
embed.timestamp = datetime.datetime.utcnow()
webhook.add_embed(embed)
webhook.post()
def setup(bot):
bot.add_cog(OnMemberEdit(bot)) | []
| []
| [
"MONGO_LOGIN"
]
| [] | ["MONGO_LOGIN"] | python | 1 | 0 | |
services/dlab-utils/src/main/java/com/epam/dlab/util/ServiceUtils.java | /*
* Copyright (c) 2018, EPAM SYSTEMS INC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.epam.dlab.util;
import com.epam.dlab.exceptions.DlabException;
import java.io.IOException;
import java.net.JarURLConnection;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import java.util.jar.Attributes;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
public class ServiceUtils {
private static String includePath = null;
static {
includePath = System.getenv("DLAB_CONF_DIR");
if ( includePath == null || includePath.isEmpty() ) {
includePath = getUserDir();
}
}
/* Return working directory.
*/
public static String getUserDir() {
return System.getProperty("user.dir");
}
/** Return path to DLab configuration directory.
* @return
*/
public static String getConfPath() {
return includePath;
}
/** Return manifest for given class or empty manifest if {@link JarFile#MANIFEST_NAME} not found.
* @param clazz class.
* @throws IOException
*/
private static Manifest getManifestForClass(Class<?> clazz) throws IOException {
URL url = clazz.getClassLoader().getResource(JarFile.MANIFEST_NAME);
return (url == null ? new Manifest() : new Manifest(url.openStream()));
}
/** Return manifest from JAR file.
* @param classPath path to class in JAR file.
* @throws IOException
*/
private static Manifest getManifestFromJar(String classPath) throws IOException {
URL url = new URL(classPath);
JarURLConnection jarConnection = (JarURLConnection) url.openConnection();
return jarConnection.getManifest();
}
/** Return manifest map for given class or empty map if manifest not found or cannot be read.
* @param clazz class.
*/
public static Map<String, String> getManifest(Class<?> clazz) {
String className = "/" + clazz.getName().replace('.', '/') + ".class";
String classPath = clazz.getResource(className).toString();
Map<String, String> map = new HashMap<>();
try {
Manifest manifest = (classPath.startsWith("jar:file:") ? getManifestFromJar(classPath) : getManifestForClass(clazz));
Attributes attributes = manifest.getMainAttributes();
for (Object key : attributes.keySet()) {
map.put(key.toString(), (String) attributes.get(key));
}
} catch (IOException e) {
System.err.println("Cannot found or open manifest for class " + className);
throw new DlabException("Cannot read manifest file", e);
}
return map;
}
/** Print to standard output the manifest info about application. If parameter <b>args</b> is not
* <b>null</b> and one or more arguments have value -v or --version then print version and return <b>true<b/>
* otherwise <b>false</b>.
* @param mainClass the main class of application.
* @param args the arguments of main class function or null.
* @return if parameter <b>args</b> is not null and one or more arguments have value -v or --version
* then return <b>true<b/> otherwise <b>false</b>.
*/
public static boolean printAppVersion(Class<?> mainClass, String ... args) {
boolean result = false;
if (args != null) {
for (String arg : args) {
if (arg.equals("-v") ||
arg.equals("--version")) {
result = true;
}
}
if (!result) {
return result;
}
}
Map<String, String> manifest = getManifest(mainClass);
if (manifest.isEmpty()) {
return result;
}
System.out.println("Title " + manifest.get("Implementation-Title"));
System.out.println("Version " + manifest.get("Implementation-Version"));
System.out.println("Created By " + manifest.get("Created-By"));
System.out.println("Vendor " + manifest.get("Implementation-Vendor"));
System.out.println("GIT-Branch " + manifest.get("GIT-Branch"));
System.out.println("GIT-Commit " + manifest.get("GIT-Commit"));
System.out.println("Build JDK " + manifest.get("Build-Jdk"));
System.out.println("Build OS " + manifest.get("Build-OS"));
System.out.println("Built Time " + manifest.get("Build-Time"));
System.out.println("Built By " + manifest.get("Built-By"));
return result;
}
}
| [
"\"DLAB_CONF_DIR\""
]
| []
| [
"DLAB_CONF_DIR"
]
| [] | ["DLAB_CONF_DIR"] | java | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.