filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
pictest/wsgi.py
|
"""
WSGI config for pictest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pictest.settings")
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, PROJECT_DIR)
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
domain/deploymentsRunntimeDocker.go
|
package domain
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
)
type DeploymentsRuntimeDocker struct {
ctx *context.Context
}
// Run Container
func (dr *DeploymentsRuntimeDocker) RunContainer(props ContainerProps) (container.ContainerCreateCreatedBody, error) {
containerBody := container.ContainerCreateCreatedBody{}
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return containerBody, err
}
authBytes, _ := json.Marshal(map[string]string{
"username": os.Getenv("DOCKER_USERNAME"),
"password": os.Getenv("DOCKER_TOKEN"),
})
// Pull Container Image
reader, err := cli.ImagePull(*dr.ctx, props.Image, types.ImagePullOptions{
RegistryAuth: base64.StdEncoding.EncodeToString(authBytes),
})
if err != nil {
return containerBody, err
}
io.Copy(os.Stdout, reader)
fmt.Println("")
// Host config
hostConfig := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(props.Port): []nat.PortBinding{
{
HostIP: props.HostIP,
HostPort: props.HostPort,
},
},
},
}
// Container config
containerConfig := &container.Config{
Image: props.Image,
Cmd: props.Command,
Labels: props.Label,
}
// Create Container
resp, err := cli.ContainerCreate(*dr.ctx, containerConfig, hostConfig, nil, nil, props.Name)
if err != nil {
return containerBody, err
}
// Start Container
if err := cli.ContainerStart(*dr.ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
return containerBody, err
}
return resp, err
}
// Stop Container
func (dr *DeploymentsRuntimeDocker) Stop(id string, timeout time.Duration) error {
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
fmt.Println(err.Error())
return err
}
if err := cli.ContainerStop(*dr.ctx, id, &timeout); err != nil {
return err
}
return nil
}
func (dr *DeploymentsRuntimeDocker) GetContainer(id string) (types.Container, error) {
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
panic(err)
}
filter := filters.NewArgs()
filter.Add("id", id)
containers, err := cli.ContainerList(*dr.ctx, types.ContainerListOptions{Filters: filter})
return containers[0], err
}
// Delete Container
func (dr *DeploymentsRuntimeDocker) DeleteContainer(id string) error {
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return err
}
if err := cli.ContainerRemove(*dr.ctx, id, types.ContainerRemoveOptions{}); err != nil {
return err
}
return nil
}
func NewDeploymentsRuntime(ctx *context.Context) DeploymentsRuntimeDocker {
return DeploymentsRuntimeDocker{ctx: ctx}
}
|
[
"\"DOCKER_USERNAME\"",
"\"DOCKER_TOKEN\""
] |
[] |
[
"DOCKER_USERNAME",
"DOCKER_TOKEN"
] |
[]
|
["DOCKER_USERNAME", "DOCKER_TOKEN"]
|
go
| 2 | 0 | |
src/edge_update_function/main.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import os
import io
import json
import boto3
import logging
import zipfile
from urllib.request import urlopen
from crhelper import CfnResource
logger = logging.getLogger()
logger.setLevel(logging.INFO)
helper = CfnResource()
lambda_client = boto3.client('lambda')
def update_function(event):
user_pool_id = event['ResourceProperties']['UserPoolId']
cognito_region = event['ResourceProperties']['CognitoRegion']
source_url = event['ResourceProperties'].get('SourceUrl')
edge_function_arn = event['ResourceProperties']['EdgeFunctionArn']
function_filename = event['ResourceProperties'].get('FunctionFilename', 'index.js')
logger.info("Downloading well-known jwks.json from Cognito")
jwks_url = f'https://cognito-idp.{cognito_region}.amazonaws.com/{user_pool_id}/.well-known/jwks.json'
with urlopen(jwks_url) as http_response:
jwks = str(http_response.read())
jwks = jwks.replace('b\'{', '{')
jwks = jwks.replace('}\'', '}')
logger.debug(json.dumps(jwks, indent = 2, default = str))
if not source_url:
logger.info('SourceUrl not specified so determining code location from Lambda for "Templated" alias')
# The "Templated" alias is created when the edge auth function is deployed and represents the original
# version of the function that is templated with replacement variables.
response = lambda_client.get_function(
FunctionName = f'{edge_function_arn}:Templated'
)
source_url = response['Code']['Location']
logger.info("Building updated function zip archive")
js = None
with urlopen(source_url) as zip_resp:
with zipfile.ZipFile(io.BytesIO(zip_resp.read())) as zin:
with zipfile.ZipFile('/tmp/edge-code.zip', 'w') as zout:
zout.comment = zin.comment
for item in zin.infolist():
if item.filename == function_filename:
js = io.TextIOWrapper(io.BytesIO(zin.read(item.filename))).read()
else:
zout.writestr(item, zin.read(item.filename))
if not js:
raise Exception(f'Function code archive does not contain the file "{function_filename}"')
js = js.replace('##JWKS##', jwks)
js = js.replace('##USERPOOLID##', user_pool_id)
js = js.replace('##COGNITOREGION##', cognito_region)
logger.info('Writing updated js file %s to archive', function_filename)
with zipfile.ZipFile('/tmp/edge-code.zip', mode='a', compression=zipfile.ZIP_DEFLATED) as zf:
zf.writestr(function_filename, js)
# Load file into memory
with open('/tmp/edge-code.zip', 'rb') as file_data:
bytes_content = file_data.read()
logger.info('Updating lambda function with updated code archive')
response = lambda_client.update_function_code(
FunctionName = edge_function_arn,
ZipFile = bytes_content
)
logger.debug(response)
@helper.create
@helper.update
def create_or_update_resource(event, _):
update_function(event)
def lambda_handler(event, context):
logger.info(os.environ)
logger.info(json.dumps(event, indent = 2, default = str))
# If the event has a RequestType, we're being called by CFN as custom resource
if event.get('RequestType'):
logger.info('Function called from CloudFormation as custom resource')
helper(event, context)
else:
logger.info('Function called outside of CloudFormation')
# Call function directly (i.e. testing in Lambda console or called directly)
update_function(event)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
airflow/contrib/example_dags/example_gcp_sql_query.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that performs query in a Cloud SQL instance.
This DAG relies on the following OS environment variables
* GCP_PROJECT_ID - Google Cloud Platform project for the Cloud SQL instance
* GCP_REGION - Google Cloud region where the database is created
*
* GCSQL_POSTGRES_INSTANCE_NAME - Name of the postgres Cloud SQL instance
* GCSQL_POSTGRES_USER - Name of the postgres database user
* GCSQL_POSTGRES_PASSWORD - Password of the postgres database user
* GCSQL_POSTGRES_PUBLIC_IP - Public IP of the Postgres database
* GCSQL_POSTGRES_PUBLIC_PORT - Port of the postgres database
*
* GCSQL_MYSQL_INSTANCE_NAME - Name of the postgres Cloud SQL instance
* GCSQL_MYSQL_USER - Name of the mysql database user
* GCSQL_MYSQL_PASSWORD - Password of the mysql database user
* GCSQL_MYSQL_PUBLIC_IP - Public IP of the mysql database
* GCSQL_MYSQL_PUBLIC_PORT - Port of the mysql database
"""
import os
import subprocess
from os.path import expanduser
from urllib.parse import quote_plus
import airflow
from airflow import models
from airflow.contrib.operators.gcp_sql_operator import CloudSqlQueryOperator
# [START howto_operator_cloudsql_query_arguments]
GCP_PROJECT_ID = os.environ.get('GCP_PROJECT_ID', 'example-project')
GCP_REGION = os.environ.get('GCP_REGION', 'europe-west-1b')
GCSQL_POSTGRES_INSTANCE_NAME_QUERY = os.environ.get(
'GCSQL_POSTGRES_INSTANCE_NAME_QUERY',
'testpostgres')
GCSQL_POSTGRES_DATABASE_NAME = os.environ.get('GCSQL_POSTGRES_DATABASE_NAME',
'postgresdb')
GCSQL_POSTGRES_USER = os.environ.get('GCSQL_POSTGRES_USER', 'postgres_user')
GCSQL_POSTGRES_PASSWORD = os.environ.get('GCSQL_POSTGRES_PASSWORD', 'password')
GCSQL_POSTGRES_PUBLIC_IP = os.environ.get('GCSQL_POSTGRES_PUBLIC_IP', '0.0.0.0')
GCSQL_POSTGRES_PUBLIC_PORT = os.environ.get('GCSQL_POSTGRES_PUBLIC_PORT', 5432)
GCSQL_POSTGRES_CLIENT_CERT_FILE = os.environ.get('GCSQL_POSTGRES_CLIENT_CERT_FILE',
".key/postgres-client-cert.pem")
GCSQL_POSTGRES_CLIENT_KEY_FILE = os.environ.get('GCSQL_POSTGRES_CLIENT_KEY_FILE',
".key/postgres-client-key.pem")
GCSQL_POSTGRES_SERVER_CA_FILE = os.environ.get('GCSQL_POSTGRES_SERVER_CA_FILE',
".key/postgres-server-ca.pem")
GCSQL_MYSQL_INSTANCE_NAME_QUERY = os.environ.get('GCSQL_MYSQL_INSTANCE_NAME_QUERY',
'testmysql')
GCSQL_MYSQL_DATABASE_NAME = os.environ.get('GCSQL_MYSQL_DATABASE_NAME', 'mysqldb')
GCSQL_MYSQL_USER = os.environ.get('GCSQL_MYSQL_USER', 'mysql_user')
GCSQL_MYSQL_PASSWORD = os.environ.get('GCSQL_MYSQL_PASSWORD', 'password')
GCSQL_MYSQL_PUBLIC_IP = os.environ.get('GCSQL_MYSQL_PUBLIC_IP', '0.0.0.0')
GCSQL_MYSQL_PUBLIC_PORT = os.environ.get('GCSQL_MYSQL_PUBLIC_PORT', 3306)
GCSQL_MYSQL_CLIENT_CERT_FILE = os.environ.get('GCSQL_MYSQL_CLIENT_CERT_FILE',
".key/mysql-client-cert.pem")
GCSQL_MYSQL_CLIENT_KEY_FILE = os.environ.get('GCSQL_MYSQL_CLIENT_KEY_FILE',
".key/mysql-client-key.pem")
GCSQL_MYSQL_SERVER_CA_FILE = os.environ.get('GCSQL_MYSQL_SERVER_CA_FILE',
".key/mysql-server-ca.pem")
SQL = [
'CREATE TABLE IF NOT EXISTS TABLE_TEST (I INTEGER)',
'CREATE TABLE IF NOT EXISTS TABLE_TEST (I INTEGER)', # shows warnings logged
'INSERT INTO TABLE_TEST VALUES (0)',
'CREATE TABLE IF NOT EXISTS TABLE_TEST2 (I INTEGER)',
'DROP TABLE TABLE_TEST',
'DROP TABLE TABLE_TEST2',
]
# [END howto_operator_cloudsql_query_arguments]
default_args = {
'start_date': airflow.utils.dates.days_ago(1)
}
# [START howto_operator_cloudsql_query_connections]
HOME_DIR = expanduser("~")
def get_absolute_path(path):
if path.startswith("/"):
return path
else:
return os.path.join(HOME_DIR, path)
postgres_kwargs = dict(
user=quote_plus(GCSQL_POSTGRES_USER),
password=quote_plus(GCSQL_POSTGRES_PASSWORD),
public_port=GCSQL_POSTGRES_PUBLIC_PORT,
public_ip=quote_plus(GCSQL_POSTGRES_PUBLIC_IP),
project_id=quote_plus(GCP_PROJECT_ID),
location=quote_plus(GCP_REGION),
instance=quote_plus(GCSQL_POSTGRES_INSTANCE_NAME_QUERY),
database=quote_plus(GCSQL_POSTGRES_DATABASE_NAME),
client_cert_file=quote_plus(get_absolute_path(GCSQL_POSTGRES_CLIENT_CERT_FILE)),
client_key_file=quote_plus(get_absolute_path(GCSQL_POSTGRES_CLIENT_KEY_FILE)),
server_ca_file=quote_plus(get_absolute_path(GCSQL_POSTGRES_SERVER_CA_FILE))
)
# The connections below are created using one of the standard approaches - via environment
# variables named AIRFLOW_CONN_* . The connections can also be created in the database
# of AIRFLOW (using command line or UI).
# Postgres: connect via proxy over TCP
os.environ['AIRFLOW_CONN_PROXY_POSTGRES_TCP'] = \
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?" \
"database_type=postgres&" \
"project_id={project_id}&" \
"location={location}&" \
"instance={instance}&" \
"use_proxy=True&" \
"sql_proxy_use_tcp=True".format(**postgres_kwargs)
# Postgres: connect via proxy over UNIX socket (specific proxy version)
os.environ['AIRFLOW_CONN_PROXY_POSTGRES_SOCKET'] = \
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?" \
"database_type=postgres&" \
"project_id={project_id}&" \
"location={location}&" \
"instance={instance}&" \
"use_proxy=True&" \
"sql_proxy_version=v1.13&" \
"sql_proxy_use_tcp=False".format(**postgres_kwargs)
# Postgres: connect directly via TCP (non-SSL)
os.environ['AIRFLOW_CONN_PUBLIC_POSTGRES_TCP'] = \
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?" \
"database_type=postgres&" \
"project_id={project_id}&" \
"location={location}&" \
"instance={instance}&" \
"use_proxy=False&" \
"use_ssl=False".format(**postgres_kwargs)
# Postgres: connect directly via TCP (SSL)
os.environ['AIRFLOW_CONN_PUBLIC_POSTGRES_TCP_SSL'] = \
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?" \
"database_type=postgres&" \
"project_id={project_id}&" \
"location={location}&" \
"instance={instance}&" \
"use_proxy=False&" \
"use_ssl=True&" \
"sslcert={client_cert_file}&" \
"sslkey={client_key_file}&" \
"sslrootcert={server_ca_file}"\
.format(**postgres_kwargs)
mysql_kwargs = dict(
user=quote_plus(GCSQL_MYSQL_USER),
password=quote_plus(GCSQL_MYSQL_PASSWORD),
public_port=GCSQL_MYSQL_PUBLIC_PORT,
public_ip=quote_plus(GCSQL_MYSQL_PUBLIC_IP),
project_id=quote_plus(GCP_PROJECT_ID),
location=quote_plus(GCP_REGION),
instance=quote_plus(GCSQL_MYSQL_INSTANCE_NAME_QUERY),
database=quote_plus(GCSQL_MYSQL_DATABASE_NAME),
client_cert_file=quote_plus(get_absolute_path(GCSQL_MYSQL_CLIENT_CERT_FILE)),
client_key_file=quote_plus(get_absolute_path(GCSQL_MYSQL_CLIENT_KEY_FILE)),
server_ca_file=quote_plus(get_absolute_path(GCSQL_MYSQL_SERVER_CA_FILE))
)
# MySQL: connect via proxy over TCP (specific proxy version)
os.environ['AIRFLOW_CONN_PROXY_MYSQL_TCP'] = \
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?" \
"database_type=mysql&" \
"project_id={project_id}&" \
"location={location}&" \
"instance={instance}&" \
"use_proxy=True&" \
"sql_proxy_version=v1.13&" \
"sql_proxy_use_tcp=True".format(**mysql_kwargs)
# MySQL: connect via proxy over UNIX socket using pre-downloaded Cloud Sql Proxy binary
try:
sql_proxy_binary_path = subprocess.check_output(
['which', 'cloud_sql_proxy']).decode('utf-8').rstrip()
except subprocess.CalledProcessError:
sql_proxy_binary_path = "/tmp/anyhow_download_cloud_sql_proxy"
os.environ['AIRFLOW_CONN_PROXY_MYSQL_SOCKET'] = \
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?" \
"database_type=mysql&" \
"project_id={project_id}&" \
"location={location}&" \
"instance={instance}&" \
"use_proxy=True&" \
"sql_proxy_binary_path={sql_proxy_binary_path}&" \
"sql_proxy_use_tcp=False".format(
sql_proxy_binary_path=quote_plus(sql_proxy_binary_path), **mysql_kwargs)
# MySQL: connect directly via TCP (non-SSL)
os.environ['AIRFLOW_CONN_PUBLIC_MYSQL_TCP'] = \
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?" \
"database_type=mysql&" \
"project_id={project_id}&" \
"location={location}&" \
"instance={instance}&" \
"use_proxy=False&" \
"use_ssl=False".format(**mysql_kwargs)
# MySQL: connect directly via TCP (SSL) and with fixed Cloud Sql Proxy binary path
os.environ['AIRFLOW_CONN_PUBLIC_MYSQL_TCP_SSL'] = \
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?" \
"database_type=mysql&" \
"project_id={project_id}&" \
"location={location}&" \
"instance={instance}&" \
"use_proxy=False&" \
"use_ssl=True&" \
"sslcert={client_cert_file}&" \
"sslkey={client_key_file}&" \
"sslrootcert={server_ca_file}".format(**mysql_kwargs)
# Special case: MySQL: connect directly via TCP (SSL) and with fixed Cloud Sql
# Proxy binary path AND with missing project_id
os.environ['AIRFLOW_CONN_PUBLIC_MYSQL_TCP_SSL_NO_PROJECT_ID'] = \
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?" \
"database_type=mysql&" \
"location={location}&" \
"instance={instance}&" \
"use_proxy=False&" \
"use_ssl=True&" \
"sslcert={client_cert_file}&" \
"sslkey={client_key_file}&" \
"sslrootcert={server_ca_file}".format(**mysql_kwargs)
# [END howto_operator_cloudsql_query_connections]
# [START howto_operator_cloudsql_query_operators]
connection_names = [
"proxy_postgres_tcp",
"proxy_postgres_socket",
"public_postgres_tcp",
"public_postgres_tcp_ssl",
"proxy_mysql_tcp",
"proxy_mysql_socket",
"public_mysql_tcp",
"public_mysql_tcp_ssl",
"public_mysql_tcp_ssl_no_project_id"
]
tasks = []
with models.DAG(
dag_id='example_gcp_sql_query',
default_args=default_args,
schedule_interval=None
) as dag:
prev_task = None
for connection_name in connection_names:
task = CloudSqlQueryOperator(
gcp_cloudsql_conn_id=connection_name,
task_id="example_gcp_sql_task_" + connection_name,
sql=SQL
)
tasks.append(task)
if prev_task:
prev_task >> task
prev_task = task
# [END howto_operator_cloudsql_query_operators]
|
[] |
[] |
[
"AIRFLOW_CONN_PUBLIC_POSTGRES_TCP_SSL",
"AIRFLOW_CONN_PUBLIC_POSTGRES_TCP",
"GCSQL_POSTGRES_CLIENT_CERT_FILE",
"GCSQL_MYSQL_SERVER_CA_FILE",
"GCSQL_MYSQL_PASSWORD",
"GCSQL_POSTGRES_USER",
"GCSQL_MYSQL_PUBLIC_IP",
"AIRFLOW_CONN_PROXY_POSTGRES_TCP",
"AIRFLOW_CONN_PUBLIC_MYSQL_TCP_SSL",
"GCP_PROJECT_ID",
"AIRFLOW_CONN_PROXY_POSTGRES_SOCKET",
"AIRFLOW_CONN_PROXY_MYSQL_SOCKET",
"GCSQL_MYSQL_CLIENT_KEY_FILE",
"GCSQL_POSTGRES_SERVER_CA_FILE",
"GCSQL_MYSQL_CLIENT_CERT_FILE",
"AIRFLOW_CONN_PROXY_MYSQL_TCP",
"GCSQL_MYSQL_INSTANCE_NAME_QUERY",
"GCSQL_POSTGRES_CLIENT_KEY_FILE",
"GCSQL_POSTGRES_PASSWORD",
"GCSQL_POSTGRES_DATABASE_NAME",
"AIRFLOW_CONN_PUBLIC_MYSQL_TCP_SSL_NO_PROJECT_ID",
"GCSQL_POSTGRES_PUBLIC_PORT",
"GCSQL_MYSQL_PUBLIC_PORT",
"GCSQL_POSTGRES_INSTANCE_NAME_QUERY",
"GCP_REGION",
"GCSQL_MYSQL_DATABASE_NAME",
"GCSQL_MYSQL_USER",
"GCSQL_POSTGRES_PUBLIC_IP",
"AIRFLOW_CONN_PUBLIC_MYSQL_TCP"
] |
[]
|
["AIRFLOW_CONN_PUBLIC_POSTGRES_TCP_SSL", "AIRFLOW_CONN_PUBLIC_POSTGRES_TCP", "GCSQL_POSTGRES_CLIENT_CERT_FILE", "GCSQL_MYSQL_SERVER_CA_FILE", "GCSQL_MYSQL_PASSWORD", "GCSQL_POSTGRES_USER", "GCSQL_MYSQL_PUBLIC_IP", "AIRFLOW_CONN_PROXY_POSTGRES_TCP", "AIRFLOW_CONN_PUBLIC_MYSQL_TCP_SSL", "GCP_PROJECT_ID", "AIRFLOW_CONN_PROXY_POSTGRES_SOCKET", "AIRFLOW_CONN_PROXY_MYSQL_SOCKET", "GCSQL_MYSQL_CLIENT_KEY_FILE", "GCSQL_POSTGRES_SERVER_CA_FILE", "GCSQL_MYSQL_CLIENT_CERT_FILE", "AIRFLOW_CONN_PROXY_MYSQL_TCP", "GCSQL_MYSQL_INSTANCE_NAME_QUERY", "GCSQL_POSTGRES_CLIENT_KEY_FILE", "GCSQL_POSTGRES_PASSWORD", "GCSQL_POSTGRES_DATABASE_NAME", "AIRFLOW_CONN_PUBLIC_MYSQL_TCP_SSL_NO_PROJECT_ID", "GCSQL_POSTGRES_PUBLIC_PORT", "GCSQL_MYSQL_PUBLIC_PORT", "GCSQL_POSTGRES_INSTANCE_NAME_QUERY", "GCP_REGION", "GCSQL_MYSQL_DATABASE_NAME", "GCSQL_MYSQL_USER", "GCSQL_POSTGRES_PUBLIC_IP", "AIRFLOW_CONN_PUBLIC_MYSQL_TCP"]
|
python
| 29 | 0 | |
scripts/time_helpers.py
|
import pytz
import datetime, os
### CONVERT FROM ONE TIME ZONE TO ANOTHER
def convert_timezone(_from, to):
# samples 'Africa/Lagos', 'US/Central'
source_zone = pytz.timezone(_from)
target_zone = pytz.timezone(to)
curtime = source_zone.localize(datetime.datetime.now())
curtime = curtime.astimezone(target_zone)
return curtime
### MAKE DATETIME AWARE OF SERVER TIMEZONE
def localize_time(time):
target_zone = pytz.timezone(os.getenv("time_zone"))
localized_time = target_zone.localize(time)
return time
def convert_date(date):
# RETURN DATE OBJECT FROM SET FORMAT DD-MM-YY HH:MM
date_object = datetime.datetime.strptime(date, "%d-%m-%Y %H:%M")
return date_object
|
[] |
[] |
[
"time_zone"
] |
[]
|
["time_zone"]
|
python
| 1 | 0 | |
torrent/torrent_tracker/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tracker_backend.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
backend/b_y_o_brandz_33929/settings.py
|
"""
Django settings for b_y_o_brandz_33929 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'b_y_o_brandz_33929.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'b_y_o_brandz_33929.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
|
[] |
[] |
[
"SETTINGS_NAME"
] |
[]
|
["SETTINGS_NAME"]
|
python
| 1 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Sphinx documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 13 11:09:54 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from pathlib import Path
from lino.sphinxcontrib import configure
import lino
import lino_xl
import lino_book
sys.path.insert(0, Path(__file__).parent.absolute())
extlinks = {}
intersphinx_mapping = {}
extensions = []
configure(globals(), 'lino_book.projects.min9.settings')
language = 'en'
# extensions += ['sphinxcontrib.taglist']
extensions += ['atelier.sphinxconf.blog']
extensions += ['atelier.sphinxconf.complex_tables']
extensions += ['lino.sphinxcontrib.logo']
extensions += ['lino.sphinxcontrib.actordoc']
extensions += ['lino.sphinxcontrib.base']
# extensions += ['sphinx.ext.napoleon']
extensions += ['atelier.sphinxconf.sigal_image']
sigal_base_url = 'http://sigal.saffre-rumma.net'
extensions += ['lino.sphinxcontrib.help_texts_extractor']
help_texts_builder_targets = {
# 'lino.': 'lino.modlib.lino_startup',
'lino.': 'lino',
# 'lino.modlib.': 'lino.modlib.lino_startup',
'lino_xl.': 'lino_xl.lib.xl',
'lino_noi.': 'lino_noi.lib.noi',
'lino_tera.': 'lino_tera.lib.tera',
'lino_vilma.': 'lino_vilma.lib.vilma',
'lino_avanti.': 'lino_avanti.lib.avanti',
'lino_cosi.': 'lino_cosi.lib.cosi',
'lino_care.': 'lino_care.lib.care',
'lino_voga.': 'lino_voga.lib.voga',
# 'lino_welfare.': 'lino_welfare.modlib.welfare',
}
if False:
extensions += ['sphinxcontrib.blockdiag']
# Fontpath for blockdiag (truetype font)
blockdiag_fontpath = '/usr/share/fonts/truetype/ipafont/ipagp.ttf'
# templates_path = ['.templates']
# General information about the project.
project = "The Lino Book"
copyright = '2002-2021 Rumma & Ko Ltd'
from lino_book import SETUP_INFO
release = SETUP_INFO['version']
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_patterns = [
'blog/*',
# 'blog/2009/*',
# 'blog/2010/*',
# 'blog/2011/*',
# 'blog/2012/*',
# 'blog/2013/*',
'old/*',
'tickets/*',
# 'releases/2010/*',
# 'releases/2011/*',
'include/*',
'shared/include/*',
]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "The Lino Book"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
html_static_path = ['.static']
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'logo.png' # The logo is set in lino.sphinxcontrib.logo
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = 'favicon.ico'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
#~ last_updated = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['globaltoc.html', 'searchbox.html', 'links.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#~ html_additional_pages = {
#~ '*': 'links.html',
#~ }
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
#~ html_use_opensearch = 'http://lino.saffre-rumma.net'
html_use_opensearch = lino.SETUP_INFO['url']
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'lino'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'lino.tex', 'lino', 'Luc Saffre', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
#language="de"
#~ show_source = True
# setup1 = setup
# def setup(app):
# app.add_stylesheet('linodocs.css')
# app.add_stylesheet('centeredlogo.css')
# setup1(app)
#~ app.add_stylesheet('dialog.css')
#~ app.add_stylesheet('scrollwide.css')
# extlinks.update(ticket=('http://trac.lino-framework.org/ticket/%s', '#'))
#extlinks.update(ticket=('http://bugs.saffre-rumma.net/ticket/%s', '#'))
extlinks.update(ticket=('http://bugs.saffre-rumma.net/tickets/Ticket/%s', '#'))
extlinks.update({
'issue': (
'http://code.google.com/p/lino/issues/detail?id=%s', '# '),
'checkin': (
'http://code.google.com/p/lino/source/detail?r=%s', 'Checkin '),
'srcref': (lino.srcref_url, ''),
'srcref_xl': (lino_xl.srcref_url, ''),
'srcref_book': (lino_book.srcref_url, ''),
'extjs': ('http://www.sencha.com/deploy/dev/docs/?class=%s', ''),
'extux': ('http://extjs-ux.org/ext-docs/?class=%s', ''),
'djangoticket': (
'https://code.djangoproject.com/ticket/%s', 'Django ticket #'),
'welfare': ('https://welfare.lino-framework.org%s.html', ''),
# 'welfareticket': (
# 'http://welfare.lino-framework.org/tickets/%s.html', ''),
# 'welfareusermande': (
# 'http://welfare-userman.lino-framework.org/de%s.html', ''),
# 'welfareusermanfr': (
# 'http://welfare-userman.lino-framework.org/fr%s.html', ''),
})
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# if on_rtd:
# for n in """python django
# atelier lino
# lino-welfare lino-faggio lino-patrols""".split():
# intersphinx_mapping[n] = (
# 'http://%s.readthedocs.org/en/latest/' % n, None)
if False: # on_rtd:
for n in """python django lino xl""".split():
intersphinx_mapping[n] = ('https://%s.readthedocs.io/en/latest/' % n, None)
from atelier.sphinxconf import interproject
interproject.configure(
globals(), 'atelier etgen eidreader lino lino_xl',
cg=('https://community.lino-framework.org/', None),
django=('https://docs.djangoproject.com/en/3.1/', 'https://docs.djangoproject.com/en/dev/_objects/'),
sphinx=('https://www.sphinx-doc.org/en/master/', None))
#intersphinx_mapping['cg'] = ('https://community.lino-framework.org/', None)
autosummary_generate = True
#~ nitpicky = True # use -n in Makefile instead
# http://sphinx.pocoo.org/theming.html
# html_theme = "sizzle"
# html_theme_options = dict(collapsiblesidebar=True, externalrefs=True)
todo_include_todos = True
#~ New in version 1.1
gettext_compact = True
# print 20150311, extensions, templates_path
# print 20150701, autodoc_default_flags
# raise 123
# autodoc_default_flags = ['no-imported-members']
# autodoc_inherit_docstrings = False
extensions += ['sphinx.ext.inheritance_diagram']
inheritance_graph_attrs = dict(rankdir="TB")
# inheritance_graph_attrs.update(size='"12.0, 16.0"')
inheritance_graph_attrs.update(size='"48.0, 64.0"')
inheritance_graph_attrs.update(fontsize=14, ratio='compress')
suppress_warnings = ['image.nonlocal_uri']
# doctest_global_setup = """
# import sys
# sys.setdefaultencoding("UTF-8")
# """
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
testscripts/RDKB/component/TAD/TS_TAD_IPPing_GetDiagnosticsState.py
|
##########################################################################
#If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?>
<xml>
<id/>
<version>2</version>
<name>TS_TAD_IPPing_GetDiagnosticsState</name>
<primitive_test_id/>
<primitive_test_name>TADstub_Set</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>To set all IPPing parameters and get the IPPing diagnostic state after a successful ping</synopsis>
<groups_id/>
<execution_time>2</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
<box_type>Emulator</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_TAD_28</test_case_id>
<test_objective>To set all IPPing parameters and get the IPPing diagnostic state after a successful ping</test_objective>
<test_type>Positive</test_type>
<test_setup>XB3,Emulator</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components.
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>TADstub_Get, TADstub_Set</api_or_interface_used>
<input_parameters>Device.IP.Diagnostics.IPPing.Interface
Device.IP.Diagnostics.IPPing.Host
Device.IP.Diagnostics.IPPing.DiagnosticsState</input_parameters>
<automation_approch>1. Load TAD modules
2. From script invoke TADstub_Get to get the IPPing Diagnostics state
3. Check whether the IPPing Diagnostics state is valid after successful ping
4. Validation of the result is done within the python script and send the result status to Test Manager.
5.Test Manager will publish the result in GUI as PASS/FAILURE based on the response from TAD stub.</automation_approch>
<except_output>CheckPoint 1:
The output should be logged in the Agent console/Component log
CheckPoint 2:
Stub function result should be success and should see corresponding log in the agent console log
CheckPoint 3:
TestManager GUI will publish the result as PASS in Execution/Console page of Test Manager</except_output>
<priority>High</priority>
<test_stub_interface>None</test_stub_interface>
<test_script>TS_TAD_IPPing_GetDiagnosticsState</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
import time;
import tdkutility;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("tad","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_TAD_IPPing_GetDiagnosticsState');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
if "SUCCESS" in loadmodulestatus.upper():
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
host = tdkutility.readtdkbConfigFile(obj);
tdkTestObj = obj.createTestStep('TADstub_Get');
tdkTestObj.addParameter("paramName","Device.IP.Diagnostics.TraceRoute.Host");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
if host == "NULL":
tdkTestObj.setResultStatus("FAILURE");
print "Host name not available in tdkb config file"
else:
tdkTestObj = obj.createTestStep('TADstub_Set');
tdkTestObj.addParameter("ParamName","Device.IP.Diagnostics.IPPing.Interface");
tdkTestObj.addParameter("ParamValue","Interface_erouter0");
tdkTestObj.addParameter("Type","string");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Set the interface of IPPing";
print "EXPECTED RESULT 1: Should set the interface of IPPing";
print "ACTUAL RESULT 1: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('TADstub_Set');
tdkTestObj.addParameter("ParamName","Device.IP.Diagnostics.IPPing.Host");
tdkTestObj.addParameter("ParamValue",host);
tdkTestObj.addParameter("Type","string");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Set the host of IPPing";
print "EXPECTED RESULT 2: Should set the host of IPPing";
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('TADstub_Set');
tdkTestObj.addParameter("ParamName","Device.IP.Diagnostics.IPPing.DiagnosticsState");
tdkTestObj.addParameter("ParamValue","Requested");
tdkTestObj.addParameter("Type","string");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Set DiagnosticsState of IPPing as Requested";
print "EXPECTED RESULT 3: Should set DiagnosticsState of IPPing as Requested";
print "ACTUAL RESULT 3: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
time.sleep(40);
tdkTestObj = obj.createTestStep('TADstub_Get');
tdkTestObj.addParameter("paramName","Device.IP.Diagnostics.IPPing.DiagnosticsState");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult and "Complete" in details:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 4: Get DiagnosticsState of IPPing";
print "EXPECTED RESULT 4: Should get DiagnosticsState of IPPing as Complete";
print "ACTUAL RESULT 4: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 4: Get DiagnosticsState of IPPing";
print "EXPECTED RESULT 4: Should get DiagnosticsState of IPPing as Complete";
print "ACTUAL RESULT 4: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Set DiagnosticsState of IPPing as Requested";
print "EXPECTED RESULT 3: Should set DiagnosticsState of IPPing as Requested";
print "ACTUAL RESULT 3: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Set the host of IPPing";
print "EXPECTED RESULT 2: Should set the host of IPPing";
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Set the interface of IPPing";
print "EXPECTED RESULT 1: Should set the interface of IPPing";
print "ACTUAL RESULT 1: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("tad");
else:
print "Failed to load tad module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
PY_4/__init__.py
|
from . import website_stats
from . import command_parser
from . import crawler
from . import storage
from . import representation
from . import helpcode
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
teabag_app.py
|
# teabag app
from flask import Flask, render_template
import random as r
import os
app = Flask(__name__)
partsOfSpeech = {'nouns1': ['an aura', 'an accomplishment', 'the love', 'the life', 'the soul'],
'nouns2': ['respect', 'compassion', 'kindness', 'love', 'life', 'knowledge', 'strength',
'generosity', 'love', 'goodness', 'strength',
'belief', 'light', 'love', 'happiness', 'love', 'love', 'everything', 'trust', 'heart'],
'adverbs': ['righteously', 'sincerely'],
'verbs': ['live', 'sing', 'love', 'love', 'live', 'love', 'love', 'give', 'speak', 'speak', 'create',
'intend', 'intend', 'respect'],
'adjectives': ['happy', 'sacred', 'good', 'compassionate', 'giving', 'forgiving', 'loving', 'joyful',
'sincere']
}
phraseDict = {
0: f"You are {r.choice(partsOfSpeech['adjectives'])}",
1: f"{r.choice(partsOfSpeech['verbs']).title()} {r.choice(partsOfSpeech['adverbs'])}; you will build up {r.choice(partsOfSpeech['nouns1'])} of {r.choice(partsOfSpeech['nouns2'])}",
2: f"{r.choice(partsOfSpeech['verbs']).title()} to make yourself {r.choice(partsOfSpeech['adjectives'])}",
3: f"{r.choice(partsOfSpeech['nouns2']).title()} is {r.choice(partsOfSpeech['nouns1'])}",
4: f"It is not to talk of {r.choice(partsOfSpeech['nouns2'])} but to {r.choice(partsOfSpeech['verbs'])} {r.choice(partsOfSpeech['nouns2'])} that is {r.choice(partsOfSpeech['nouns2'])}",
5: f"{r.choice(partsOfSpeech['nouns2']).title()} is for now, {r.choice(partsOfSpeech['nouns2'])} is for the future",
6: f"{r.choice(partsOfSpeech['verbs']).title()} what you {r.choice(partsOfSpeech['verbs'])}, {r.choice(partsOfSpeech['verbs'])} what you {r.choice(partsOfSpeech['verbs'])}",
7: f"Your {r.choice(partsOfSpeech['nouns2'])} is your own {r.choice(partsOfSpeech['nouns2'])}",
8: f"{r.choice(partsOfSpeech['nouns2']).title()} has no limit, {r.choice(partsOfSpeech['nouns2'])} has no enemy",
9: f"{r.choice(partsOfSpeech['verbs']).title()} yourself so that you may know to to {r.choice(partsOfSpeech['verbs'])} with {r.choice(partsOfSpeech['nouns2'])}",
10: f"You don't need {r.choice(partsOfSpeech['nouns2'])} if you are {r.choice(partsOfSpeech['nouns2'])}",
11: f"{r.choice(partsOfSpeech['verbs']).title()} the sequence of {r.choice(partsOfSpeech['nouns2'])}, the consequences will always be {r.choice(partsOfSpeech['adjectives'])}",
12: f"People who {r.choice(partsOfSpeech['verbs'])} are {r.choice(partsOfSpeech['adjectives'])}",
13: f"Be {r.choice(partsOfSpeech['adjectives'])}",
14: f"{r.choice(partsOfSpeech['nouns2']).title()} is the constant state of {r.choice(partsOfSpeech['nouns2'])} for others",
15: f"{r.choice(partsOfSpeech['verbs']).title()} by your inner {r.choice(partsOfSpeech['nouns2'])}",
16: f"Develop the power of {r.choice(partsOfSpeech['nouns2'])}",
17: f"People who {r.choice(partsOfSpeech['verbs'])} are {r.choice(partsOfSpeech['adjectives'])}",
18: f"The principal ingredient of {r.choice(partsOfSpeech['nouns2'])} is {r.choice(partsOfSpeech['nouns2'])}",
19: "You're already dead",
20: f"{r.choice(partsOfSpeech['nouns1']).title()} of {r.choice(partsOfSpeech['nouns2'])}",
21: f"You are {r.choice(partsOfSpeech['adjectives'])}",
22: f"{r.choice(partsOfSpeech['verbs']).title()} {r.choice(partsOfSpeech['adverbs'])}; you will build up {r.choice(partsOfSpeech['nouns1'])} of {r.choice(partsOfSpeech['nouns2'])}",
23: f"{r.choice(partsOfSpeech['verbs']).title()} to make yourself {r.choice(partsOfSpeech['adjectives'])}",
24: f"{r.choice(partsOfSpeech['nouns2']).title()} is {r.choice(partsOfSpeech['nouns1'])}",
25: f"It is not to talk of {r.choice(partsOfSpeech['nouns2'])} but to {r.choice(partsOfSpeech['verbs'])} {r.choice(partsOfSpeech['nouns2'])} that is {r.choice(partsOfSpeech['nouns2'])}",
26: f"{r.choice(partsOfSpeech['nouns2']).title()} is for now, {r.choice(partsOfSpeech['nouns2'])} is for the future",
27: f"{r.choice(partsOfSpeech['verbs']).title()} what you {r.choice(partsOfSpeech['verbs'])}, {r.choice(partsOfSpeech['verbs'])} what you {r.choice(partsOfSpeech['verbs'])}",
28: f"Your {r.choice(partsOfSpeech['nouns2'])} is your own {r.choice(partsOfSpeech['nouns2'])}",
29: f"{r.choice(partsOfSpeech['nouns2']).title()} has no limit, {r.choice(partsOfSpeech['nouns2'])} has no enemy",
30: f"{r.choice(partsOfSpeech['verbs']).title()} yourself so that you may know to to {r.choice(partsOfSpeech['verbs'])} with {r.choice(partsOfSpeech['nouns2'])}",
31: f"You don't need {r.choice(partsOfSpeech['nouns2'])} if you are {r.choice(partsOfSpeech['nouns2'])}",
32: f"{r.choice(partsOfSpeech['verbs']).title()} the sequence of {r.choice(partsOfSpeech['nouns2'])}, the consequences will always be {r.choice(partsOfSpeech['adjectives'])}",
33: f"People who {r.choice(partsOfSpeech['verbs'])} are {r.choice(partsOfSpeech['adjectives'])}",
34: f"Be {r.choice(partsOfSpeech['adjectives'])}",
35: f"{r.choice(partsOfSpeech['nouns2']).title()} is the constant state of {r.choice(partsOfSpeech['nouns2'])} for others",
36: f"{r.choice(partsOfSpeech['verbs']).title()} by your inner {r.choice(partsOfSpeech['nouns2'])}",
37: f"Develop the power of {r.choice(partsOfSpeech['nouns2'])}",
38: f"People who {r.choice(partsOfSpeech['verbs'])} are {r.choice(partsOfSpeech['adjectives'])}",
39: f"The principal ingredient of {r.choice(partsOfSpeech['nouns2'])} is {r.choice(partsOfSpeech['nouns2'])}",
40: f"{r.choice(partsOfSpeech['nouns1']).title()} of {r.choice(partsOfSpeech['nouns2'])}",
}
@app.route('/') # endpoint of domain name
def teaBagger():
phrases = list(range(len(phraseDict)))
phraseKey = r.choice(phrases)
sentence = phraseDict[phraseKey]
return render_template('teasite.jinja2', sentence=sentence)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 5000)))
|
[] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
python
| 1 | 0 | |
examples/simple/main.go
|
// app example generated using Mercurius
package main
import (
"gopkg.in/macaron.v1"
"log"
"os"
"strconv"
config "github.com/novatrixtech/mercurius/examples/simple/conf"
conf "github.com/novatrixtech/mercurius/examples/simple/conf/app"
)
func main() {
app := macaron.New()
conf.SetupMiddlewares(app)
conf.SetupRoutes(app)
/*
Generated using http://www.kammerl.de/ascii/AsciiSignature.php - (Font: 'starwars')
All signatures are made with FIGlet (c) 1991, 1993, 1994 Glenn Chappell and Ian Chai
All fonts are taken from figlet.org and jave.de.
Please check for Font Credits the figlet font database!
Figlet Frontend - Written by Julius Kammerl - 2005
*/
log.Println(".___ ___. _______ .______ ______ __ __ .______ __ __ __ _______. ___ ___ ")
log.Println("| \\/ | | ____|| _ \\ / || | | | | _ \\ | | | | | | / | / _ \\ / _ ")
log.Println("| \\ / | | |__ | |_) | | ,----'| | | | | |_) | | | | | | | | (----` | | | | | (_) |")
log.Println("| |\\/| | | __| | / | | | | | | | / | | | | | | \\ \\ | | | | > _ < ")
log.Println("| | | | | |____ | |\\ \\----.| `----.| `--' | | |\\ \\----.| | | `--' | .----) | | |_| | _| (_) |")
log.Println("|__| |__| |_______|| _| `._____| \\______| \\______/ | _| `._____||__| \\______/ |_______/ \\___/ (__)___/ ")
app.Run(port())
}
func port() int {
port, err := config.Cfg.Section("").Key("http_port").Int()
if err != nil {
log.Fatal(err)
}
if forceLocal, _ := config.Cfg.Section("").Key("force_local_http_port").Bool(); forceLocal == false {
if i, err := strconv.Atoi(os.Getenv("PORT")); err == nil {
port = i
}
}
return port
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
torchrec/distributed/tests/test_train_pipeline.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
from dataclasses import dataclass
from typing import Tuple, List, Optional, Dict
import torch
import torch.distributed as dist
from torch import nn, optim
from torchrec.distributed import DistributedModelParallel
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.embedding_types import (
SparseFeaturesList,
)
from torchrec.distributed.embeddingbag import (
ShardedEmbeddingBagCollection,
EmbeddingBagCollectionSharder,
)
from torchrec.distributed.test_utils.test_model import (
TestSparseNN,
ModelInput,
TestEBCSharder,
)
from torchrec.distributed.train_pipeline import (
TrainPipelineBase,
TrainPipelineSparseDist,
)
from torchrec.distributed.types import (
Awaitable,
ParameterSharding,
ShardedModuleContext,
ShardingEnv,
)
from torchrec.distributed.types import (
ShardingType,
)
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.optim.keyed import KeyedOptimizerWrapper
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Pipelineable
from torchrec.test_utils import get_free_port, init_distributed_single_host
class TestShardedEmbeddingBagCollection(ShardedEmbeddingBagCollection):
def input_dist(
self,
ctx: ShardedModuleContext,
features: KeyedJaggedTensor,
) -> Awaitable[SparseFeaturesList]:
return super().input_dist(ctx, features)
class TestCustomEBCSharder(EmbeddingBagCollectionSharder[EmbeddingBagCollection]):
def shard(
self,
module: EmbeddingBagCollection,
params: Dict[str, ParameterSharding],
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> TestShardedEmbeddingBagCollection:
return TestShardedEmbeddingBagCollection(
module, params, env, self.fused_params, device
)
def sharding_types(self, compute_device_type: str) -> List[str]:
return [
ShardingType.TABLE_WISE.value,
]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
return [EmbeddingComputeKernel.DENSE.value]
@dataclass
class ModelInputSimple(Pipelineable):
float_features: torch.Tensor
label: torch.Tensor
def to(self, device: torch.device, non_blocking: bool) -> "ModelInputSimple":
return ModelInputSimple(
float_features=self.float_features.to(
device=device, non_blocking=non_blocking
),
label=self.label.to(device=device, non_blocking=non_blocking),
)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self.float_features.record_stream(stream)
self.label.record_stream(stream)
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.model = nn.Linear(10, 1)
self.loss_fn = nn.BCEWithLogitsLoss()
def forward(
self, model_input: ModelInputSimple
) -> Tuple[torch.Tensor, torch.Tensor]:
pred = self.model(model_input.float_features)
loss = self.loss_fn(pred, model_input.label)
return (loss, pred)
class TrainPipelineBaseTest(unittest.TestCase):
def setUp(self) -> None:
self.device = torch.device("cuda:0")
torch.backends.cudnn.allow_tf32 = False
torch.backends.cuda.matmul.allow_tf32 = False
# pyre-fixme[56]: Pyre was not able to infer the type of argument
@unittest.skipIf(
torch.cuda.device_count() <= 1,
"Not enough GPUs, this test requires at least two GPUs",
)
def test_equal_to_non_pipelined(self) -> None:
model_cpu = TestModule()
model_gpu = TestModule().to(self.device)
model_gpu.load_state_dict(model_cpu.state_dict())
optimizer_cpu = optim.SGD(model_cpu.model.parameters(), lr=0.01)
optimizer_gpu = optim.SGD(model_gpu.model.parameters(), lr=0.01)
data = [
ModelInputSimple(
float_features=torch.rand((10,)),
label=torch.randint(2, (1,), dtype=torch.float32),
)
for b in range(5)
]
dataloader = iter(data)
pipeline = TrainPipelineBase(model_gpu, optimizer_gpu, self.device)
for example in data[:-1]:
optimizer_cpu.zero_grad()
loss, pred = model_cpu(example)
loss.backward()
optimizer_cpu.step()
pred_gpu = pipeline.progress(dataloader)
self.assertEqual(pred_gpu.device, self.device)
self.assertTrue(torch.isclose(pred_gpu.cpu(), pred))
class TrainPipelineSparseDistTest(unittest.TestCase):
def setUp(self) -> None:
os.environ["MASTER_ADDR"] = str("localhost")
os.environ["MASTER_PORT"] = str(get_free_port())
self.pg = init_distributed_single_host(backend="gloo", rank=0, world_size=1)
num_features = 4
num_weighted_features = 2
self.tables = [
EmbeddingBagConfig(
num_embeddings=(i + 1) * 100,
embedding_dim=(i + 1) * 4,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
)
for i in range(num_features)
]
self.weighted_tables = [
EmbeddingBagConfig(
num_embeddings=(i + 1) * 100,
embedding_dim=(i + 1) * 4,
name="weighted_table_" + str(i),
feature_names=["weighted_feature_" + str(i)],
)
for i in range(num_weighted_features)
]
self.device = torch.device("cuda:0")
def tearDown(self) -> None:
super().tearDown()
dist.destroy_process_group(self.pg)
def _test_move_cpu_gpu_helper(
self, distributed_model: DistributedModelParallel
) -> None:
model_cpu = TestSparseNN(
tables=self.tables, weighted_tables=self.weighted_tables
)
optimizer_cpu = optim.SGD(model_cpu.parameters(), lr=0.1)
optimizer_distributed = KeyedOptimizerWrapper(
dict(distributed_model.named_parameters()),
lambda params: optim.SGD(params, lr=0.1),
)
pipeline = TrainPipelineSparseDist(
distributed_model, optimizer_distributed, self.device
)
data = [
ModelInput.generate(
tables=self.tables,
weighted_tables=self.weighted_tables,
batch_size=1,
world_size=1,
num_float_features=10,
)[0]
for i in range(5)
]
dataloader = iter(data)
for example in data[:-2]:
optimizer_cpu.zero_grad()
loss, pred = model_cpu(example)
loss.backward()
optimizer_cpu.step()
pred_gpu = pipeline.progress(dataloader)
self.assertEqual(pred_gpu.device, self.device)
self.assertEqual(pred_gpu.cpu().size(), pred.size())
self.assertEqual(len(pipeline._pipelined_modules), 2)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
@unittest.skipIf(
torch.cuda.device_count() <= 1,
"Not enough GPUs, this test requires at least two GPUs",
)
def test_move_cpu_gpu(self) -> None:
unsharded_model = TestSparseNN(
tables=self.tables,
weighted_tables=self.weighted_tables,
dense_device=self.device,
sparse_device=torch.device("meta"),
)
distributed_model = DistributedModelParallel(
unsharded_model,
env=ShardingEnv.from_process_group(self.pg),
init_data_parallel=False,
device=self.device,
# pyre-ignore [6]
sharders=[
TestEBCSharder(
sharding_type=ShardingType.TABLE_WISE.value,
kernel_type=EmbeddingComputeKernel.DENSE.value,
)
],
)
self._test_move_cpu_gpu_helper(distributed_model)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
@unittest.skipIf(
torch.cuda.device_count() <= 1,
"Not enough GPUs, this test requires at least two GPUs",
)
def test_pipelining(self) -> None:
unsharded_model = TestSparseNN(
tables=self.tables,
weighted_tables=self.weighted_tables,
dense_device=self.device,
sparse_device=torch.device("meta"),
)
distributed_model = DistributedModelParallel(
unsharded_model,
env=ShardingEnv.from_process_group(self.pg),
init_data_parallel=False,
device=self.device,
# pyre-fixme [6]
sharders=[TestCustomEBCSharder()],
)
self._test_move_cpu_gpu_helper(distributed_model)
|
[] |
[] |
[
"MASTER_ADDR",
"MASTER_PORT"
] |
[]
|
["MASTER_ADDR", "MASTER_PORT"]
|
python
| 2 | 0 | |
src/debugpy/_vendored/pydevd/build_tools/build_binaries_osx.py
|
from __future__ import unicode_literals
import os
import subprocess
import sys
miniconda64_envs = os.getenv('MINICONDA64_ENVS')
python_installations = [
r'%s/py26_64/bin/python' % miniconda64_envs,
r'%s/py27_64/bin/python' % miniconda64_envs,
r'%s/py34_64/bin/python' % miniconda64_envs,
r'%s/py35_64/bin/python' % miniconda64_envs,
r'%s/py36_64/bin/python' % miniconda64_envs,
r'%s/py37_64/bin/python' % miniconda64_envs,
]
root_dir = os.path.dirname(os.path.dirname(__file__))
def list_binaries():
for f in os.listdir(os.path.join(root_dir, '_pydevd_bundle')):
if f.endswith('.so'):
yield f
def extract_version(python_install):
return python_install.split('/')[-3][2:]
def main():
from generate_code import generate_dont_trace_files
from generate_code import generate_cython_module
# First, make sure that our code is up to date.
generate_dont_trace_files()
generate_cython_module()
for python_install in python_installations:
assert os.path.exists(python_install)
from build import remove_binaries
remove_binaries(['.so'])
for f in list_binaries():
raise AssertionError('Binary not removed: %s' % (f,))
for i, python_install in enumerate(python_installations):
new_name = 'pydevd_cython_%s_%s' % (sys.platform, extract_version(python_install))
args = [
python_install, os.path.join(root_dir, 'build_tools', 'build.py'), '--no-remove-binaries', '--target-pyd-name=%s' % new_name, '--force-cython']
if i != 0:
args.append('--no-regenerate-files')
version_number = extract_version(python_install)
if version_number.startswith('36') or version_number.startswith('37'):
name_frame_eval = 'pydevd_frame_evaluator_%s_%s' % (sys.platform, extract_version(python_install))
args.append('--target-pyd-frame-eval=%s' % name_frame_eval)
print('Calling: %s' % (' '.join(args)))
subprocess.check_call(args)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"MINICONDA64_ENVS"
] |
[]
|
["MINICONDA64_ENVS"]
|
python
| 1 | 0 | |
tools/jsrun.py
|
import time, os, sys, logging
from subprocess import Popen, PIPE, STDOUT
TRACK_PROCESS_SPAWNS = True if (os.getenv('EM_BUILD_VERBOSE') and int(os.getenv('EM_BUILD_VERBOSE')) >= 3) else False
def timeout_run(proc, timeout=None, note='unnamed process', full_output=False):
start = time.time()
if timeout is not None:
while time.time() - start < timeout and proc.poll() is None:
time.sleep(0.1)
if proc.poll() is None:
proc.kill() # XXX bug: killing emscripten.py does not kill it's child process!
raise Exception("Timed out: " + note)
out = proc.communicate()
out = map(lambda o: '' if o is None else o, out)
if TRACK_PROCESS_SPAWNS:
logging.info('Process ' + str(proc.pid) + ' finished after ' + str(time.time() - start) + ' seconds.')
return '\n'.join(out) if full_output else out[0]
def run_js(filename, engine=None, args=[], check_timeout=False, stdin=None, stdout=PIPE, stderr=None, cwd=None, full_output=False):
if type(engine) is not list:
engine = [engine]
command = engine + [filename] + (['--'] if 'd8' in engine[0] or 'jsc' in engine[0] else []) + args
proc = Popen(
command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
cwd=cwd)
timeout = 15*60 if check_timeout else None
if TRACK_PROCESS_SPAWNS:
logging.info('Blocking on process ' + str(proc.pid) + ': ' + str(command) + (' for ' + str(timeout) + ' seconds' if timeout else ' until it finishes.'))
return timeout_run(
proc,
timeout,
'Execution',
full_output=full_output)
|
[] |
[] |
[
"EM_BUILD_VERBOSE"
] |
[]
|
["EM_BUILD_VERBOSE"]
|
python
| 1 | 0 | |
train.py
|
from __future__ import print_function
import sys
if len(sys.argv) != 4:
print('Usage:')
print('python3 train.py datacfg cfgfile weightfile')
exit()
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torchvision import datasets, transforms
from torch.autograd import Variable
import dataset
import random
import math
import os
from utils import *
from cfg import parse_cfg
from region_loss import RegionLoss
from darknet import Darknet
from models.tiny_yolo import TinyYoloNet
# Training settings
datacfg = sys.argv[1]
cfgfile = sys.argv[2]
weightfile = sys.argv[3]
data_options = read_data_cfg(datacfg)
net_options = parse_cfg(cfgfile)[0]
trainlist = data_options['train']
testlist = data_options['valid']
backupdir = data_options['backup']
nsamples = file_lines(trainlist)
gpus = data_options['gpus'] # e.g. 0,1,2,3
ngpus = len(gpus.split(','))
num_workers = int(data_options['num_workers'])
batch_size = int(net_options['batch'])
max_batches = int(net_options['max_batches'])
learning_rate = float(net_options['learning_rate'])
momentum = float(net_options['momentum'])
decay = float(net_options['decay'])
steps = [float(step) for step in net_options['steps'].split(',')]
scales = [float(scale) for scale in net_options['scales'].split(',')]
#Train parameters
max_epochs = max_batches*batch_size//nsamples+1
use_cuda = True
seed = int(time.time())
eps = 1e-5
save_interval = 10 # epoches
dot_interval = 70 # batches
# Test parameters
conf_thresh = 0.25
nms_thresh = 0.4
iou_thresh = 0.5
if not os.path.exists(backupdir):
os.mkdir(backupdir)
###############
torch.manual_seed(seed)
if use_cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
torch.cuda.manual_seed(seed)
model = Darknet(cfgfile)
region_loss = model.loss
model.load_weights(weightfile)
model.print_network()
region_loss.seen = model.seen
processed_batches = model.seen/batch_size
init_width = model.width
init_height = model.height
init_epoch = model.seen/nsamples
kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
dataset.listDataset(testlist, shape=(init_width, init_height),
shuffle=False,
transform=transforms.Compose([
transforms.ToTensor(),
]), train=False),
batch_size=batch_size, shuffle=False, **kwargs)
if use_cuda:
if ngpus > 1:
model = torch.nn.DataParallel(model).cuda()
else:
model = model.cuda()
params_dict = dict(model.named_parameters())
params = []
for key, value in params_dict.items():
if key.find('.bn') >= 0 or key.find('.bias') >= 0:
params += [{'params': [value], 'weight_decay': 0.0}]
else:
params += [{'params': [value], 'weight_decay': decay*batch_size}]
"""
for name,child in model.named_children():
for name,param in child.named_parameters():
if not (29 <= int(name.split('.')[0]) <= 30):
param.requires_grad = False
"""
optimizer = optim.SGD(model.parameters(), lr=learning_rate/batch_size, momentum=momentum, dampening=0, weight_decay=decay*batch_size)
# optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate/batch_size, momentum=momentum, dampening=0, weight_decay=decay*batch_size)
def adjust_learning_rate(optimizer, batch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = learning_rate
for i in range(len(steps)):
scale = scales[i] if i < len(scales) else 1
if batch >= steps[i]:
lr = lr * scale
if batch == steps[i]:
break
else:
break
for param_group in optimizer.param_groups:
param_group['lr'] = lr/batch_size
return lr
def train(epoch):
global processed_batches
t0 = time.time()
if ngpus > 1:
cur_model = model.module
else:
cur_model = model
train_loader = torch.utils.data.DataLoader(
dataset.listDataset(trainlist, shape=(init_width, init_height),
shuffle=True,
transform=transforms.Compose([
transforms.ToTensor(),
]),
train=True,
seen=cur_model.seen,
batch_size=batch_size,
num_workers=num_workers),
batch_size=batch_size, shuffle=False, **kwargs)
lr = adjust_learning_rate(optimizer, processed_batches)
logging('epoch %d, processed %d samples, lr %f' % (epoch, epoch * len(train_loader.dataset), lr))
model.train()
t1 = time.time()
avg_time = torch.zeros(9)
for batch_idx, (data, target) in enumerate(train_loader):
t2 = time.time()
adjust_learning_rate(optimizer, processed_batches)
processed_batches = processed_batches + 1
#if (batch_idx+1) % dot_interval == 0:
# sys.stdout.write('.')
if use_cuda:
data = data.cuda()
#target= target.cuda()
t3 = time.time()
data, target = Variable(data), Variable(target)
t4 = time.time()
optimizer.zero_grad()
t5 = time.time()
output = model(data)
t6 = time.time()
region_loss.seen = region_loss.seen + data.data.size(0)
loss = region_loss(output, target)
t7 = time.time()
loss.backward()
t8 = time.time()
optimizer.step()
t9 = time.time()
if False and batch_idx > 1:
avg_time[0] = avg_time[0] + (t2-t1)
avg_time[1] = avg_time[1] + (t3-t2)
avg_time[2] = avg_time[2] + (t4-t3)
avg_time[3] = avg_time[3] + (t5-t4)
avg_time[4] = avg_time[4] + (t6-t5)
avg_time[5] = avg_time[5] + (t7-t6)
avg_time[6] = avg_time[6] + (t8-t7)
avg_time[7] = avg_time[7] + (t9-t8)
avg_time[8] = avg_time[8] + (t9-t1)
print('-------------------------------')
print(' load data : %f' % (avg_time[0]/(batch_idx)))
print(' cpu to cuda : %f' % (avg_time[1]/(batch_idx)))
print('cuda to variable : %f' % (avg_time[2]/(batch_idx)))
print(' zero_grad : %f' % (avg_time[3]/(batch_idx)))
print(' forward feature : %f' % (avg_time[4]/(batch_idx)))
print(' forward loss : %f' % (avg_time[5]/(batch_idx)))
print(' backward : %f' % (avg_time[6]/(batch_idx)))
print(' step : %f' % (avg_time[7]/(batch_idx)))
print(' total : %f' % (avg_time[8]/(batch_idx)))
t1 = time.time()
print('')
t1 = time.time()
logging('training with %f samples/s' % (len(train_loader.dataset)/(t1-t0)))
if (epoch+1) % save_interval == 0 or 25 <= epoch+1 <= 35:
logging('save weights to %s/%06d.weights' % (backupdir, epoch+1))
cur_model.seen = (epoch + 1) * len(train_loader.dataset)
cur_model.save_weights('%s/%06d.weights' % (backupdir, epoch+1))
def test(epoch):
def truths_length(truths):
for i in range(50):
if truths[i][1] == 0:
return i
model.eval()
if ngpus > 1:
cur_model = model.module
else:
cur_model = model
num_classes = cur_model.num_classes
anchors = cur_model.anchors
num_anchors = cur_model.num_anchors
total = 0.0
proposals = 0.0
correct = 0.0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if use_cuda:
data = data.cuda()
data = Variable(data)
output = model(data).data
all_boxes = get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors)
for i in range(output.size(0)):
boxes = all_boxes[i]
boxes = nms(boxes, nms_thresh)
truths = target[i].view(-1, 5)
num_gts = truths_length(truths)
total = total + num_gts
for i in range(len(boxes)):
if boxes[i][4] > conf_thresh:
proposals = proposals+1
for i in range(num_gts):
box_gt = [truths[i][1], truths[i][2], truths[i][3], truths[i][4], 1.0, 1.0, truths[i][0]]
best_iou = 0
best_j = -1
for j in range(len(boxes)):
iou = bbox_iou(box_gt, boxes[j], x1y1x2y2=False)
if iou > best_iou:
best_j = j
best_iou = iou
if best_iou > iou_thresh and boxes[best_j][6] == box_gt[6]:
correct = correct+1
precision = 1.0*correct/(proposals+eps)
recall = 1.0*correct/(total+eps)
fscore = 2.0*precision*recall/(precision+recall+eps)
logging("precision: %f, recall: %f, fscore: %f" % (precision, recall, fscore))
evaluate = False
if evaluate:
logging('evaluating ...')
test(0)
else:
for epoch in range(init_epoch, max_epochs):
train(epoch)
test(epoch)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
tests/framework/installer/installer.go
|
/*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package installer
import (
"fmt"
"os"
"strings"
"testing"
"github.com/coreos/pkg/capnslog"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/api/errors"
)
const (
// VersionMaster tag for the latest manifests
VersionMaster = "master"
// test suite names
CassandraTestSuite = "cassandra"
CephTestSuite = "ceph"
CockroachDBTestSuite = "cockroachdb"
EdgeFSTestSuite = "edgefs"
NFSTestSuite = "nfs"
YugabyteDBTestSuite = "yugabytedb"
)
var (
// ** Variables that might need to be changed depending on the dev environment. The init function below will modify some of them automatically. **
baseTestDir string
createBaseTestDir = true
// ** end of Variables to modify
logger = capnslog.NewPackageLogger("github.com/rook/rook", "installer")
createArgs = []string{"create", "-f"}
createFromStdinArgs = append(createArgs, "-")
deleteArgs = []string{"delete", "-f"}
deleteFromStdinArgs = append(deleteArgs, "-")
)
type TestSuite interface {
Setup()
Teardown()
}
func SkipTestSuite(name string) bool {
testsToRun := os.Getenv("STORAGE_PROVIDER_TESTS")
// jenkins passes "null" if the env var is not set.
if testsToRun == "" || testsToRun == "null" {
// run all test suites
return false
}
if strings.EqualFold(testsToRun, name) {
// this suite was requested
return false
}
logger.Infof("skipping test suite since only %s should be tested rather than %s", testsToRun, name)
return true
}
func init() {
// If the base test directory is actively set to empty (as in CI), we use the current working directory.
baseTestDir = Env.BaseTestDir
if baseTestDir == "" {
baseTestDir, _ = os.Getwd()
}
if baseTestDir == "/data" {
createBaseTestDir = false
}
}
func SystemNamespace(namespace string) string {
return fmt.Sprintf("%s-system", namespace)
}
func checkError(t *testing.T, err error, message string) {
// During cleanup the resource might not be found because the test might have failed before the test was done and never created the resource
if err == nil || errors.IsNotFound(err) {
return
}
assert.NoError(t, err, "%s. %+v", message, err)
}
func concatYaml(first, second string) string {
return first + `
---
` + second
}
|
[
"\"STORAGE_PROVIDER_TESTS\""
] |
[] |
[
"STORAGE_PROVIDER_TESTS"
] |
[]
|
["STORAGE_PROVIDER_TESTS"]
|
go
| 1 | 0 | |
pkg/controller/kubelet_client.go
|
/*
Copyright 2021 Juicedata Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"time"
corev1 "k8s.io/api/core/v1"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/client-go/rest"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/transport"
"k8s.io/klog"
)
const (
defaultKubeletTimeout = 10
serviceAccountTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
)
type kubeletClient struct {
host string
port int
client *http.Client
}
// KubeletClientConfig defines config parameters for the kubelet client
type KubeletClientConfig struct {
// Address specifies the kubelet address
Address string
// Port specifies the default port - used if no information about Kubelet port can be found in Node.NodeStatus.DaemonEndpoints.
Port int
// TLSClientConfig contains settings to enable transport layer security
restclient.TLSClientConfig
// Server requires Bearer authentication
BearerToken string
// HTTPTimeout is used by the client to timeout http requests to Kubelet.
HTTPTimeout time.Duration
}
// makeTransport creates a RoundTripper for HTTP Transport.
func makeTransport(config *KubeletClientConfig, insecureSkipTLSVerify bool) (http.RoundTripper, error) {
// do the insecureSkipTLSVerify on the pre-transport *before* we go get a potentially cached connection.
// transportConfig always produces a new struct pointer.
preTLSConfig := config.transportConfig()
if insecureSkipTLSVerify && preTLSConfig != nil {
preTLSConfig.TLS.Insecure = true
preTLSConfig.TLS.CAData = nil
preTLSConfig.TLS.CAFile = ""
}
tlsConfig, err := transport.TLSConfigFor(preTLSConfig)
if err != nil {
return nil, err
}
rt := http.DefaultTransport
if tlsConfig != nil {
// If SSH Tunnel is turned on
rt = utilnet.SetOldTransportDefaults(&http.Transport{
TLSClientConfig: tlsConfig,
})
}
return transport.HTTPWrappersForConfig(config.transportConfig(), rt)
}
// transportConfig converts a client config to an appropriate transport config.
func (c *KubeletClientConfig) transportConfig() *transport.Config {
cfg := &transport.Config{
TLS: transport.TLSConfig{
CAFile: c.CAFile,
CAData: c.CAData,
CertFile: c.CertFile,
CertData: c.CertData,
KeyFile: c.KeyFile,
KeyData: c.KeyData,
},
BearerToken: c.BearerToken,
}
if !cfg.HasCA() {
cfg.TLS.Insecure = true
}
return cfg
}
func newKubeletClient(host string, port int) (*kubeletClient, error) {
var token string
var err error
kubeletClientCert := os.Getenv("KUBELET_CLIENT_CERT")
kubeletClientKey := os.Getenv("KUBELET_CLIENT_KEY")
if kubeletClientCert == "" && kubeletClientKey == "" {
// get CSI sa token
tokenByte, err := ioutil.ReadFile(serviceAccountTokenFile)
if err != nil {
return nil, fmt.Errorf("in cluster mode, find token failed: %v", err)
}
token = string(tokenByte)
}
kubeletTimeout := defaultKubeletTimeout
if os.Getenv("KUBELET_TIMEOUT") != "" {
if kubeletTimeout, err = strconv.Atoi(os.Getenv("KUBELET_TIMEOUT")); err != nil {
return nil, fmt.Errorf("got error when parsing kubelet timeout: %v", err)
}
}
config := &KubeletClientConfig{
Address: host,
Port: port,
TLSClientConfig: rest.TLSClientConfig{
ServerName: "kubelet",
Insecure: true,
CertFile: kubeletClientCert,
KeyFile: kubeletClientKey,
},
BearerToken: token,
HTTPTimeout: time.Duration(kubeletTimeout) * time.Second,
}
trans, err := makeTransport(config, config.Insecure)
if err != nil {
return nil, err
}
return &kubeletClient{
host: config.Address,
port: config.Port,
client: &http.Client{
Transport: trans,
Timeout: config.HTTPTimeout,
},
}, nil
}
func (kc *kubeletClient) GetNodeRunningPods() (*corev1.PodList, error) {
resp, err := kc.client.Get(fmt.Sprintf("https://%v:%d/pods/", kc.host, kc.port))
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
podLists := &corev1.PodList{}
if err = json.Unmarshal(body, &podLists); err != nil {
klog.V(5).Infof("GetNodeRunningPods err: %s", body)
return nil, err
}
return podLists, err
}
|
[
"\"KUBELET_CLIENT_CERT\"",
"\"KUBELET_CLIENT_KEY\"",
"\"KUBELET_TIMEOUT\"",
"\"KUBELET_TIMEOUT\""
] |
[] |
[
"KUBELET_TIMEOUT",
"KUBELET_CLIENT_CERT",
"KUBELET_CLIENT_KEY"
] |
[]
|
["KUBELET_TIMEOUT", "KUBELET_CLIENT_CERT", "KUBELET_CLIENT_KEY"]
|
go
| 3 | 0 | |
example/bucket/delete.go
|
package main
import (
"context"
"net/url"
"os"
"net/http"
"github.com/huanght1997/cos-go-sdk-v5"
"github.com/huanght1997/cos-go-sdk-v5/debug"
)
func main() {
u, _ := url.Parse("https://testdelete-1253846586.cos.ap-guangzhou.myqcloud.com")
b := &cos.BaseURL{
BucketURL: u,
}
c := cos.NewClient(b, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: os.Getenv("COS_SECRETID"),
SecretKey: os.Getenv("COS_SECRETKEY"),
Transport: &debug.DebugRequestTransport{
RequestHeader: true,
RequestBody: true,
ResponseHeader: true,
ResponseBody: true,
},
},
})
_, err := c.Bucket.Delete(context.Background())
if err != nil {
panic(err)
}
}
|
[
"\"COS_SECRETID\"",
"\"COS_SECRETKEY\""
] |
[] |
[
"COS_SECRETKEY",
"COS_SECRETID"
] |
[]
|
["COS_SECRETKEY", "COS_SECRETID"]
|
go
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'where_to_go.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
vote/app.py
|
from flask import Flask, render_template, request, make_response, g
from redis import Redis
import os
import socket
import random
import json
option_a = os.getenv('OPTION_A', "Portland")
option_b = os.getenv('OPTION_B', "Kings Island")
hostname = socket.gethostname()
app = Flask(__name__)
def get_redis():
if not hasattr(g, 'redis'):
redis_host = os.getenv('REDIS_HOST')
g.redis = Redis(host=redis_host, db=0, socket_timeout=5)
return g.redis
@app.route("/", methods=['POST','GET'])
def hello():
voter_id = request.cookies.get('voter_id')
if not voter_id:
voter_id = hex(random.getrandbits(64))[2:-1]
vote = None
if request.method == 'POST':
redis = get_redis()
vote = request.form['vote']
data = json.dumps({'voter_id': voter_id, 'vote': vote})
redis.rpush('votes', data)
resp = make_response(render_template(
'index.html',
option_a=option_a,
option_b=option_b,
hostname=hostname,
vote=vote,
))
resp.set_cookie('voter_id', voter_id)
return resp
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
|
[] |
[] |
[
"OPTION_A",
"REDIS_HOST",
"OPTION_B"
] |
[]
|
["OPTION_A", "REDIS_HOST", "OPTION_B"]
|
python
| 3 | 0 | |
lib-python/3/test/test_importlib/util.py
|
import builtins
import contextlib
import errno
import functools
import importlib
from importlib import machinery, util, invalidate_caches
import os
import os.path
from test import support
import unittest
import sys
import tempfile
import types
BUILTINS = types.SimpleNamespace()
BUILTINS.good_name = None
BUILTINS.bad_name = None
if 'errno' in sys.builtin_module_names:
BUILTINS.good_name = 'errno'
if 'importlib' not in sys.builtin_module_names:
BUILTINS.bad_name = 'importlib'
EXTENSIONS = types.SimpleNamespace()
EXTENSIONS.path = None
EXTENSIONS.ext = None
EXTENSIONS.filename = None
EXTENSIONS.file_path = None
EXTENSIONS.name = '_testcapi'
def _extension_details():
global EXTENSIONS
# we need this hack on PyPy because _testcapi is built lazily
import _testcapi
import _pypy_testcapi
lib_pypy_dir = os.path.dirname(_pypy_testcapi.__file__)
c_file = os.path.join(lib_pypy_dir, '_testcapimodule.c')
for path in [_pypy_testcapi.get_hashed_dir(c_file)]:
for ext in machinery.EXTENSION_SUFFIXES:
filename = EXTENSIONS.name + ext
file_path = os.path.join(path, filename)
if os.path.exists(file_path):
EXTENSIONS.path = path
EXTENSIONS.ext = ext
EXTENSIONS.filename = filename
EXTENSIONS.file_path = file_path
return
_extension_details()
def import_importlib(module_name):
"""Import a module from importlib both w/ and w/o _frozen_importlib."""
fresh = ('importlib',) if '.' in module_name else ()
frozen = support.import_fresh_module(module_name)
source = support.import_fresh_module(module_name, fresh=fresh,
blocked=('_frozen_importlib', '_frozen_importlib_external'))
return {'Frozen': frozen, 'Source': source}
def specialize_class(cls, kind, base=None, **kwargs):
# XXX Support passing in submodule names--load (and cache) them?
# That would clean up the test modules a bit more.
if base is None:
base = unittest.TestCase
elif not isinstance(base, type):
base = base[kind]
name = '{}_{}'.format(kind, cls.__name__)
bases = (cls, base)
specialized = types.new_class(name, bases)
specialized.__module__ = cls.__module__
specialized._NAME = cls.__name__
specialized._KIND = kind
for attr, values in kwargs.items():
value = values[kind]
setattr(specialized, attr, value)
return specialized
def split_frozen(cls, base=None, **kwargs):
frozen = specialize_class(cls, 'Frozen', base, **kwargs)
source = specialize_class(cls, 'Source', base, **kwargs)
return frozen, source
def test_both(test_class, base=None, **kwargs):
return split_frozen(test_class, base, **kwargs)
CASE_INSENSITIVE_FS = True
# Windows is the only OS that is *always* case-insensitive
# (OS X *can* be case-sensitive).
if sys.platform not in ('win32', 'cygwin'):
changed_name = __file__.upper()
if changed_name == __file__:
changed_name = __file__.lower()
if not os.path.exists(changed_name):
CASE_INSENSITIVE_FS = False
source_importlib = import_importlib('importlib')['Source']
__import__ = {'Frozen': staticmethod(builtins.__import__),
'Source': staticmethod(source_importlib.__import__)}
def case_insensitive_tests(test):
"""Class decorator that nullifies tests requiring a case-insensitive
file system."""
return unittest.skipIf(not CASE_INSENSITIVE_FS,
"requires a case-insensitive filesystem")(test)
def submodule(parent, name, pkg_dir, content=''):
path = os.path.join(pkg_dir, name + '.py')
with open(path, 'w') as subfile:
subfile.write(content)
return '{}.{}'.format(parent, name), path
@contextlib.contextmanager
def uncache(*names):
"""Uncache a module from sys.modules.
A basic sanity check is performed to prevent uncaching modules that either
cannot/shouldn't be uncached.
"""
for name in names:
if name in ('sys', 'marshal', 'imp'):
raise ValueError(
"cannot uncache {0}".format(name))
try:
del sys.modules[name]
except KeyError:
pass
try:
yield
finally:
for name in names:
try:
del sys.modules[name]
except KeyError:
pass
@contextlib.contextmanager
def temp_module(name, content='', *, pkg=False):
conflicts = [n for n in sys.modules if n.partition('.')[0] == name]
with support.temp_cwd(None) as cwd:
with uncache(name, *conflicts):
with support.DirsOnSysPath(cwd):
invalidate_caches()
location = os.path.join(cwd, name)
if pkg:
modpath = os.path.join(location, '__init__.py')
os.mkdir(name)
else:
modpath = location + '.py'
if content is None:
# Make sure the module file gets created.
content = ''
if content is not None:
# not a namespace package
with open(modpath, 'w') as modfile:
modfile.write(content)
yield location
@contextlib.contextmanager
def import_state(**kwargs):
"""Context manager to manage the various importers and stored state in the
sys module.
The 'modules' attribute is not supported as the interpreter state stores a
pointer to the dict that the interpreter uses internally;
reassigning to sys.modules does not have the desired effect.
"""
originals = {}
try:
for attr, default in (('meta_path', []), ('path', []),
('path_hooks', []),
('path_importer_cache', {})):
originals[attr] = getattr(sys, attr)
if attr in kwargs:
new_value = kwargs[attr]
del kwargs[attr]
else:
new_value = default
setattr(sys, attr, new_value)
if len(kwargs):
raise ValueError(
'unrecognized arguments: {0}'.format(kwargs.keys()))
yield
finally:
for attr, value in originals.items():
setattr(sys, attr, value)
class _ImporterMock:
"""Base class to help with creating importer mocks."""
def __init__(self, *names, module_code={}):
self.modules = {}
self.module_code = {}
for name in names:
if not name.endswith('.__init__'):
import_name = name
else:
import_name = name[:-len('.__init__')]
if '.' not in name:
package = None
elif import_name == name:
package = name.rsplit('.', 1)[0]
else:
package = import_name
module = types.ModuleType(import_name)
module.__loader__ = self
module.__file__ = '<mock __file__>'
module.__package__ = package
module.attr = name
if import_name != name:
module.__path__ = ['<mock __path__>']
self.modules[import_name] = module
if import_name in module_code:
self.module_code[import_name] = module_code[import_name]
def __getitem__(self, name):
return self.modules[name]
def __enter__(self):
self._uncache = uncache(*self.modules.keys())
self._uncache.__enter__()
return self
def __exit__(self, *exc_info):
self._uncache.__exit__(None, None, None)
class mock_modules(_ImporterMock):
"""Importer mock using PEP 302 APIs."""
def find_module(self, fullname, path=None):
if fullname not in self.modules:
return None
else:
return self
def load_module(self, fullname):
if fullname not in self.modules:
raise ImportError
else:
sys.modules[fullname] = self.modules[fullname]
if fullname in self.module_code:
try:
self.module_code[fullname]()
except Exception:
del sys.modules[fullname]
raise
return self.modules[fullname]
class mock_spec(_ImporterMock):
"""Importer mock using PEP 451 APIs."""
def find_spec(self, fullname, path=None, parent=None):
try:
module = self.modules[fullname]
except KeyError:
return None
spec = util.spec_from_file_location(
fullname, module.__file__, loader=self,
submodule_search_locations=getattr(module, '__path__', None))
return spec
def create_module(self, spec):
if spec.name not in self.modules:
raise ImportError
return self.modules[spec.name]
def exec_module(self, module):
try:
self.module_code[module.__spec__.name]()
except KeyError:
pass
def writes_bytecode_files(fxn):
"""Decorator to protect sys.dont_write_bytecode from mutation and to skip
tests that require it to be set to False."""
if sys.dont_write_bytecode:
return lambda *args, **kwargs: None
@functools.wraps(fxn)
def wrapper(*args, **kwargs):
original = sys.dont_write_bytecode
sys.dont_write_bytecode = False
try:
to_return = fxn(*args, **kwargs)
finally:
sys.dont_write_bytecode = original
return to_return
return wrapper
def ensure_bytecode_path(bytecode_path):
"""Ensure that the __pycache__ directory for PEP 3147 pyc file exists.
:param bytecode_path: File system path to PEP 3147 pyc file.
"""
try:
os.mkdir(os.path.dirname(bytecode_path))
except OSError as error:
if error.errno != errno.EEXIST:
raise
@contextlib.contextmanager
def create_modules(*names):
"""Temporarily create each named module with an attribute (named 'attr')
that contains the name passed into the context manager that caused the
creation of the module.
All files are created in a temporary directory returned by
tempfile.mkdtemp(). This directory is inserted at the beginning of
sys.path. When the context manager exits all created files (source and
bytecode) are explicitly deleted.
No magic is performed when creating packages! This means that if you create
a module within a package you must also create the package's __init__ as
well.
"""
source = 'attr = {0!r}'
created_paths = []
mapping = {}
state_manager = None
uncache_manager = None
try:
temp_dir = tempfile.mkdtemp()
mapping['.root'] = temp_dir
import_names = set()
for name in names:
if not name.endswith('__init__'):
import_name = name
else:
import_name = name[:-len('.__init__')]
import_names.add(import_name)
if import_name in sys.modules:
del sys.modules[import_name]
name_parts = name.split('.')
file_path = temp_dir
for directory in name_parts[:-1]:
file_path = os.path.join(file_path, directory)
if not os.path.exists(file_path):
os.mkdir(file_path)
created_paths.append(file_path)
file_path = os.path.join(file_path, name_parts[-1] + '.py')
with open(file_path, 'w') as file:
file.write(source.format(name))
created_paths.append(file_path)
mapping[name] = file_path
uncache_manager = uncache(*import_names)
uncache_manager.__enter__()
state_manager = import_state(path=[temp_dir])
state_manager.__enter__()
yield mapping
finally:
if state_manager is not None:
state_manager.__exit__(None, None, None)
if uncache_manager is not None:
uncache_manager.__exit__(None, None, None)
support.rmtree(temp_dir)
def mock_path_hook(*entries, importer):
"""A mock sys.path_hooks entry."""
def hook(entry):
if entry not in entries:
raise ImportError
return importer
return hook
class CASEOKTestBase:
def caseok_env_changed(self, *, should_exist):
possibilities = b'PYTHONCASEOK', 'PYTHONCASEOK'
if any(x in self.importlib._bootstrap_external._os.environ
for x in possibilities) != should_exist:
self.skipTest('os.environ changes not reflected in _os.environ')
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
nats_test.go
|
// Copyright 2012-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nats
////////////////////////////////////////////////////////////////////////////////
// Package scoped specific tests here..
////////////////////////////////////////////////////////////////////////////////
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"regexp"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/nats-io/gnatsd/server"
gnatsd "github.com/nats-io/gnatsd/test"
"github.com/nats-io/nkeys"
)
func TestVersion(t *testing.T) {
// Semantic versioning
verRe := regexp.MustCompile(`\d+.\d+.\d+(-\S+)?`)
if !verRe.MatchString(Version) {
t.Fatalf("Version not compatible with semantic versioning: %q", Version)
}
}
// Dumb wait program to sync on callbacks, etc... Will timeout
func Wait(ch chan bool) error {
return WaitTime(ch, 5*time.Second)
}
func WaitTime(ch chan bool, timeout time.Duration) error {
select {
case <-ch:
return nil
case <-time.After(timeout):
}
return errors.New("timeout")
}
func stackFatalf(t *testing.T, f string, args ...interface{}) {
lines := make([]string, 0, 32)
msg := fmt.Sprintf(f, args...)
lines = append(lines, msg)
// Generate the Stack of callers: Skip us and verify* frames.
for i := 1; true; i++ {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
}
msg := fmt.Sprintf("%d - %s:%d", i, file, line)
lines = append(lines, msg)
}
t.Fatalf("%s", strings.Join(lines, "\n"))
}
func TestVersionMatchesTag(t *testing.T) {
tag := os.Getenv("TRAVIS_TAG")
if tag == "" {
t.SkipNow()
}
// We expect a tag of the form vX.Y.Z. If that's not the case,
// we need someone to have a look. So fail if first letter is not
// a `v`
if tag[0] != 'v' {
t.Fatalf("Expect tag to start with `v`, tag is: %s", tag)
}
// Strip the `v` from the tag for the version comparison.
if Version != tag[1:] {
t.Fatalf("Version (%s) does not match tag (%s)", Version, tag[1:])
}
}
////////////////////////////////////////////////////////////////////////////////
// Reconnect tests
////////////////////////////////////////////////////////////////////////////////
const TEST_PORT = 8368
var reconnectOpts = Options{
Url: fmt.Sprintf("nats://localhost:%d", TEST_PORT),
AllowReconnect: true,
MaxReconnect: 10,
ReconnectWait: 100 * time.Millisecond,
Timeout: DefaultTimeout,
}
func RunServerOnPort(port int) *server.Server {
opts := gnatsd.DefaultTestOptions
opts.Port = port
return RunServerWithOptions(opts)
}
func RunServerWithOptions(opts server.Options) *server.Server {
return gnatsd.RunServer(&opts)
}
func TestReconnectServerStats(t *testing.T) {
ts := RunServerOnPort(TEST_PORT)
opts := reconnectOpts
nc, _ := opts.Connect()
defer nc.Close()
nc.Flush()
ts.Shutdown()
// server is stopped here...
ts = RunServerOnPort(TEST_PORT)
defer ts.Shutdown()
if err := nc.FlushTimeout(5 * time.Second); err != nil {
t.Fatalf("Error on Flush: %v", err)
}
// Make sure the server who is reconnected has the reconnects stats reset.
nc.mu.Lock()
_, cur := nc.currentServer()
nc.mu.Unlock()
if cur.reconnects != 0 {
t.Fatalf("Current Server's reconnects should be 0 vs %d\n", cur.reconnects)
}
}
func TestParseStateReconnectFunctionality(t *testing.T) {
ts := RunServerOnPort(TEST_PORT)
ch := make(chan bool)
opts := reconnectOpts
dch := make(chan bool)
opts.DisconnectedCB = func(_ *Conn) {
dch <- true
}
nc, errc := opts.Connect()
if errc != nil {
t.Fatalf("Failed to create a connection: %v\n", errc)
}
ec, errec := NewEncodedConn(nc, DEFAULT_ENCODER)
if errec != nil {
nc.Close()
t.Fatalf("Failed to create an encoded connection: %v\n", errec)
}
defer ec.Close()
testString := "bar"
ec.Subscribe("foo", func(s string) {
if s != testString {
t.Fatal("String doesn't match")
}
ch <- true
})
ec.Flush()
// Got a RACE condition with Travis build. The locking below does not
// really help because the parser running in the readLoop accesses
// nc.ps without the connection lock. Sleeping may help better since
// it would make the memory write in parse.go (when processing the
// pong) further away from the modification below.
time.Sleep(1 * time.Second)
// Simulate partialState, this needs to be cleared
nc.mu.Lock()
nc.ps.state = OP_PON
nc.mu.Unlock()
ts.Shutdown()
// server is stopped here...
if err := Wait(dch); err != nil {
t.Fatal("Did not get the DisconnectedCB")
}
if err := ec.Publish("foo", testString); err != nil {
t.Fatalf("Failed to publish message: %v\n", err)
}
ts = RunServerOnPort(TEST_PORT)
defer ts.Shutdown()
if err := ec.FlushTimeout(5 * time.Second); err != nil {
t.Fatalf("Error on Flush: %v", err)
}
if err := Wait(ch); err != nil {
t.Fatal("Did not receive our message")
}
expectedReconnectCount := uint64(1)
reconnectedCount := ec.Conn.Stats().Reconnects
if reconnectedCount != expectedReconnectCount {
t.Fatalf("Reconnect count incorrect: %d vs %d\n",
reconnectedCount, expectedReconnectCount)
}
}
////////////////////////////////////////////////////////////////////////////////
// ServerPool tests
////////////////////////////////////////////////////////////////////////////////
var testServers = []string{
"nats://localhost:1222",
"nats://localhost:1223",
"nats://localhost:1224",
"nats://localhost:1225",
"nats://localhost:1226",
"nats://localhost:1227",
"nats://localhost:1228",
}
func TestSimplifiedURLs(t *testing.T) {
opts := GetDefaultOptions()
opts.NoRandomize = true
opts.Servers = []string{
"nats://host1:1234",
"nats://host2:",
"nats://host3",
"host4:1234",
"host5:",
"host6",
"nats://[1:2:3:4]:1234",
"nats://[5:6:7:8]:",
"nats://[9:10:11:12]",
"[13:14:15:16]:",
"[17:18:19:20]:1234",
}
// We expect the result in the server pool to be:
expected := []string{
"nats://host1:1234",
"nats://host2:4222",
"nats://host3:4222",
"nats://host4:1234",
"nats://host5:4222",
"nats://host6:4222",
"nats://[1:2:3:4]:1234",
"nats://[5:6:7:8]:4222",
"nats://[9:10:11:12]:4222",
"nats://[13:14:15:16]:4222",
"nats://[17:18:19:20]:1234",
}
nc := &Conn{Opts: opts}
if err := nc.setupServerPool(); err != nil {
t.Fatalf("Problem setting up Server Pool: %v\n", err)
}
// Check server pool directly
for i, u := range nc.srvPool {
if u.url.String() != expected[i] {
t.Fatalf("Expected url %q, got %q", expected[i], u.url.String())
}
}
}
func TestServersRandomize(t *testing.T) {
opts := GetDefaultOptions()
opts.Servers = testServers
nc := &Conn{Opts: opts}
if err := nc.setupServerPool(); err != nil {
t.Fatalf("Problem setting up Server Pool: %v\n", err)
}
// Build []string from srvPool
clientServers := []string{}
for _, s := range nc.srvPool {
clientServers = append(clientServers, s.url.String())
}
// In theory this could happen..
if reflect.DeepEqual(testServers, clientServers) {
t.Fatalf("ServerPool list not randomized\n")
}
// Now test that we do not randomize if proper flag is set.
opts = GetDefaultOptions()
opts.Servers = testServers
opts.NoRandomize = true
nc = &Conn{Opts: opts}
if err := nc.setupServerPool(); err != nil {
t.Fatalf("Problem setting up Server Pool: %v\n", err)
}
// Build []string from srvPool
clientServers = []string{}
for _, s := range nc.srvPool {
clientServers = append(clientServers, s.url.String())
}
if !reflect.DeepEqual(testServers, clientServers) {
t.Fatalf("ServerPool list should not be randomized\n")
}
// Although the original intent was that if Opts.Url is
// set, Opts.Servers is not (and vice versa), the behavior
// is that Opts.Url is always first, even when randomization
// is enabled. So make sure that this is still the case.
opts = GetDefaultOptions()
opts.Url = DefaultURL
opts.Servers = testServers
nc = &Conn{Opts: opts}
if err := nc.setupServerPool(); err != nil {
t.Fatalf("Problem setting up Server Pool: %v\n", err)
}
// Build []string from srvPool
clientServers = []string{}
for _, s := range nc.srvPool {
clientServers = append(clientServers, s.url.String())
}
// In theory this could happen..
if reflect.DeepEqual(testServers, clientServers) {
t.Fatalf("ServerPool list not randomized\n")
}
if clientServers[0] != DefaultURL {
t.Fatalf("Options.Url should be first in the array, got %v", clientServers[0])
}
}
func TestSelectNextServer(t *testing.T) {
opts := GetDefaultOptions()
opts.Servers = testServers
opts.NoRandomize = true
nc := &Conn{Opts: opts}
if err := nc.setupServerPool(); err != nil {
t.Fatalf("Problem setting up Server Pool: %v\n", err)
}
if nc.current != nc.srvPool[0] {
t.Fatalf("Wrong default selection: %v\n", nc.current.url)
}
sel, err := nc.selectNextServer()
if err != nil {
t.Fatalf("Got an err: %v\n", err)
}
// Check that we are now looking at #2, and current is now last.
if len(nc.srvPool) != len(testServers) {
t.Fatalf("List is incorrect size: %d vs %d\n", len(nc.srvPool), len(testServers))
}
if nc.current.url.String() != testServers[1] {
t.Fatalf("Selection incorrect: %v vs %v\n", nc.current.url, testServers[1])
}
if nc.srvPool[len(nc.srvPool)-1].url.String() != testServers[0] {
t.Fatalf("Did not push old to last position\n")
}
if sel != nc.srvPool[0] {
t.Fatalf("Did not return correct server: %v vs %v\n", sel.url, nc.srvPool[0].url)
}
// Test that we do not keep servers where we have tried to reconnect past our limit.
nc.srvPool[0].reconnects = int(opts.MaxReconnect)
if _, err := nc.selectNextServer(); err != nil {
t.Fatalf("Got an err: %v\n", err)
}
// Check that we are now looking at #3, and current is not in the list.
if len(nc.srvPool) != len(testServers)-1 {
t.Fatalf("List is incorrect size: %d vs %d\n", len(nc.srvPool), len(testServers)-1)
}
if nc.current.url.String() != testServers[2] {
t.Fatalf("Selection incorrect: %v vs %v\n", nc.current.url, testServers[2])
}
if nc.srvPool[len(nc.srvPool)-1].url.String() == testServers[1] {
t.Fatalf("Did not throw away the last server correctly\n")
}
}
// This will test that comma separated url strings work properly for
// the Connect() command.
func TestUrlArgument(t *testing.T) {
check := func(url string, expected []string) {
if !reflect.DeepEqual(processUrlString(url), expected) {
t.Fatalf("Got wrong response processing URL: %q, RES: %#v\n", url, processUrlString(url))
}
}
// This is normal case
oneExpected := []string{"nats://localhost:1222"}
check("nats://localhost:1222", oneExpected)
check("nats://localhost:1222 ", oneExpected)
check(" nats://localhost:1222", oneExpected)
check(" nats://localhost:1222 ", oneExpected)
var multiExpected = []string{
"nats://localhost:1222",
"nats://localhost:1223",
"nats://localhost:1224",
}
check("nats://localhost:1222,nats://localhost:1223,nats://localhost:1224", multiExpected)
check("nats://localhost:1222, nats://localhost:1223, nats://localhost:1224", multiExpected)
check(" nats://localhost:1222, nats://localhost:1223, nats://localhost:1224 ", multiExpected)
check("nats://localhost:1222, nats://localhost:1223 ,nats://localhost:1224", multiExpected)
}
func TestParserPing(t *testing.T) {
c := &Conn{}
fake := &bytes.Buffer{}
c.bw = bufio.NewWriterSize(fake, c.Opts.ReconnectBufSize)
c.ps = &parseState{}
if c.ps.state != OP_START {
t.Fatalf("Expected OP_START vs %d\n", c.ps.state)
}
ping := []byte("PING\r\n")
err := c.parse(ping[:1])
if err != nil || c.ps.state != OP_P {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(ping[1:2])
if err != nil || c.ps.state != OP_PI {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(ping[2:3])
if err != nil || c.ps.state != OP_PIN {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(ping[3:4])
if err != nil || c.ps.state != OP_PING {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(ping[4:5])
if err != nil || c.ps.state != OP_PING {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(ping[5:6])
if err != nil || c.ps.state != OP_START {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(ping)
if err != nil || c.ps.state != OP_START {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
// Should tolerate spaces
ping = []byte("PING \r")
err = c.parse(ping)
if err != nil || c.ps.state != OP_PING {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
c.ps.state = OP_START
ping = []byte("PING \r \n")
err = c.parse(ping)
if err != nil || c.ps.state != OP_START {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
}
func TestParserErr(t *testing.T) {
c := &Conn{}
c.status = CLOSED
fake := &bytes.Buffer{}
c.bw = bufio.NewWriterSize(fake, c.Opts.ReconnectBufSize)
c.ps = &parseState{}
// This test focuses on the parser only, not how the error is
// actually processed by the upper layer.
if c.ps.state != OP_START {
t.Fatalf("Expected OP_START vs %d\n", c.ps.state)
}
expectedError := "'Any kind of error'"
errProto := []byte("-ERR " + expectedError + "\r\n")
err := c.parse(errProto[:1])
if err != nil || c.ps.state != OP_MINUS {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(errProto[1:2])
if err != nil || c.ps.state != OP_MINUS_E {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(errProto[2:3])
if err != nil || c.ps.state != OP_MINUS_ER {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(errProto[3:4])
if err != nil || c.ps.state != OP_MINUS_ERR {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(errProto[4:5])
if err != nil || c.ps.state != OP_MINUS_ERR_SPC {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(errProto[5:6])
if err != nil || c.ps.state != OP_MINUS_ERR_SPC {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
// Check with split arg buffer
err = c.parse(errProto[6:7])
if err != nil || c.ps.state != MINUS_ERR_ARG {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(errProto[7:10])
if err != nil || c.ps.state != MINUS_ERR_ARG {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(errProto[10 : len(errProto)-2])
if err != nil || c.ps.state != MINUS_ERR_ARG {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
if c.ps.argBuf == nil {
t.Fatal("ArgBuf should not be nil")
}
s := string(c.ps.argBuf)
if s != expectedError {
t.Fatalf("Expected %v, got %v", expectedError, s)
}
err = c.parse(errProto[len(errProto)-2:])
if err != nil || c.ps.state != OP_START {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
// Check without split arg buffer
errProto = []byte("-ERR 'Any error'\r\n")
err = c.parse(errProto)
if err != nil || c.ps.state != OP_START {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
}
func TestParserOK(t *testing.T) {
c := &Conn{}
c.ps = &parseState{}
if c.ps.state != OP_START {
t.Fatalf("Expected OP_START vs %d\n", c.ps.state)
}
errProto := []byte("+OKay\r\n")
err := c.parse(errProto[:1])
if err != nil || c.ps.state != OP_PLUS {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(errProto[1:2])
if err != nil || c.ps.state != OP_PLUS_O {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(errProto[2:3])
if err != nil || c.ps.state != OP_PLUS_OK {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(errProto[3:])
if err != nil || c.ps.state != OP_START {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
}
func TestParserShouldFail(t *testing.T) {
c := &Conn{}
c.ps = &parseState{}
if err := c.parse([]byte(" PING")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("POO")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("Px")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("PIx")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("PINx")); err == nil {
t.Fatal("Should have received a parse error")
}
// Stop here because 'PING' protos are tolerant for anything between PING and \n
c.ps.state = OP_START
if err := c.parse([]byte("POx")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("PONx")); err == nil {
t.Fatal("Should have received a parse error")
}
// Stop here because 'PONG' protos are tolerant for anything between PONG and \n
c.ps.state = OP_START
if err := c.parse([]byte("ZOO")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("Mx\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("MSx\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("MSGx\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("MSG foo\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("MSG \r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("MSG foo 1\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("MSG foo bar 1\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("MSG foo bar 1 baz\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("MSG foo 1 bar baz\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("+x\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("+Ox\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("-x\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("-Ex\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("-ERx\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
c.ps.state = OP_START
if err := c.parse([]byte("-ERRx\r\n")); err == nil {
t.Fatal("Should have received a parse error")
}
}
func TestParserSplitMsg(t *testing.T) {
nc := &Conn{}
nc.ps = &parseState{}
buf := []byte("MSG a\r\n")
err := nc.parse(buf)
if err == nil {
t.Fatal("Expected an error")
}
nc.ps = &parseState{}
buf = []byte("MSG a b c\r\n")
err = nc.parse(buf)
if err == nil {
t.Fatal("Expected an error")
}
nc.ps = &parseState{}
expectedCount := uint64(1)
expectedSize := uint64(3)
buf = []byte("MSG a")
err = nc.parse(buf)
if err != nil {
t.Fatalf("Parser error: %v", err)
}
if nc.ps.argBuf == nil {
t.Fatal("Arg buffer should have been created")
}
buf = []byte(" 1 3\r\nf")
err = nc.parse(buf)
if err != nil {
t.Fatalf("Parser error: %v", err)
}
if nc.ps.ma.size != 3 {
t.Fatalf("Wrong msg size: %d instead of 3", nc.ps.ma.size)
}
if nc.ps.ma.sid != 1 {
t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid)
}
if string(nc.ps.ma.subject) != "a" {
t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject))
}
if nc.ps.msgBuf == nil {
t.Fatal("Msg buffer should have been created")
}
buf = []byte("oo\r\n")
err = nc.parse(buf)
if err != nil {
t.Fatalf("Parser error: %v", err)
}
if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) {
t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize)
}
if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) {
t.Fatal("Buffers should be nil now")
}
buf = []byte("MSG a 1 3\r\nfo")
err = nc.parse(buf)
if err != nil {
t.Fatalf("Parser error: %v", err)
}
if nc.ps.ma.size != 3 {
t.Fatalf("Wrong msg size: %d instead of 3", nc.ps.ma.size)
}
if nc.ps.ma.sid != 1 {
t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid)
}
if string(nc.ps.ma.subject) != "a" {
t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject))
}
if nc.ps.argBuf == nil {
t.Fatal("Arg buffer should have been created")
}
if nc.ps.msgBuf == nil {
t.Fatal("Msg buffer should have been created")
}
expectedCount++
expectedSize += 3
buf = []byte("o\r\n")
err = nc.parse(buf)
if err != nil {
t.Fatalf("Parser error: %v", err)
}
if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) {
t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize)
}
if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) {
t.Fatal("Buffers should be nil now")
}
buf = []byte("MSG a 1 6\r\nfo")
err = nc.parse(buf)
if err != nil {
t.Fatalf("Parser error: %v", err)
}
if nc.ps.ma.size != 6 {
t.Fatalf("Wrong msg size: %d instead of 3", nc.ps.ma.size)
}
if nc.ps.ma.sid != 1 {
t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid)
}
if string(nc.ps.ma.subject) != "a" {
t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject))
}
if nc.ps.argBuf == nil {
t.Fatal("Arg buffer should have been created")
}
if nc.ps.msgBuf == nil {
t.Fatal("Msg buffer should have been created")
}
buf = []byte("ob")
err = nc.parse(buf)
if err != nil {
t.Fatalf("Parser error: %v", err)
}
expectedCount++
expectedSize += 6
buf = []byte("ar\r\n")
err = nc.parse(buf)
if err != nil {
t.Fatalf("Parser error: %v", err)
}
if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) {
t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize)
}
if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) {
t.Fatal("Buffers should be nil now")
}
// Let's have a msg that is bigger than the parser's scratch size.
// Since we prepopulate the msg with 'foo', adding 3 to the size.
msgSize := cap(nc.ps.scratch) + 100 + 3
buf = []byte(fmt.Sprintf("MSG a 1 b %d\r\nfoo", msgSize))
err = nc.parse(buf)
if err != nil {
t.Fatalf("Parser error: %v", err)
}
if nc.ps.ma.size != msgSize {
t.Fatalf("Wrong msg size: %d instead of %d", nc.ps.ma.size, msgSize)
}
if nc.ps.ma.sid != 1 {
t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid)
}
if string(nc.ps.ma.subject) != "a" {
t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject))
}
if string(nc.ps.ma.reply) != "b" {
t.Fatalf("Wrong reply: '%s' instead of 'b'", string(nc.ps.ma.reply))
}
if nc.ps.argBuf == nil {
t.Fatal("Arg buffer should have been created")
}
if nc.ps.msgBuf == nil {
t.Fatal("Msg buffer should have been created")
}
expectedCount++
expectedSize += uint64(msgSize)
bufSize := msgSize - 3
buf = make([]byte, bufSize)
for i := 0; i < bufSize; i++ {
buf[i] = byte('a' + (i % 26))
}
err = nc.parse(buf)
if err != nil {
t.Fatalf("Parser error: %v", err)
}
if nc.ps.state != MSG_PAYLOAD {
t.Fatalf("Wrong state: %v instead of %v", nc.ps.state, MSG_PAYLOAD)
}
if nc.ps.ma.size != msgSize {
t.Fatalf("Wrong (ma) msg size: %d instead of %d", nc.ps.ma.size, msgSize)
}
if len(nc.ps.msgBuf) != msgSize {
t.Fatalf("Wrong msg size: %d instead of %d", len(nc.ps.msgBuf), msgSize)
}
// Check content:
if string(nc.ps.msgBuf[0:3]) != "foo" {
t.Fatalf("Wrong msg content: %s", string(nc.ps.msgBuf))
}
for k := 3; k < nc.ps.ma.size; k++ {
if nc.ps.msgBuf[k] != byte('a'+((k-3)%26)) {
t.Fatalf("Wrong msg content: %s", string(nc.ps.msgBuf))
}
}
buf = []byte("\r\n")
if err := nc.parse(buf); err != nil {
t.Fatalf("Unexpected error during parsing: %v", err)
}
if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) {
t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize)
}
if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) {
t.Fatal("Buffers should be nil now")
}
if nc.ps.state != OP_START {
t.Fatalf("Wrong state: %v", nc.ps.state)
}
}
func TestNormalizeError(t *testing.T) {
expected := "Typical Error"
if s := normalizeErr("-ERR '" + expected + "'"); s != expected {
t.Fatalf("Expected '%s', got '%s'", expected, s)
}
expected = "Trim Surrounding Spaces"
if s := normalizeErr("-ERR '" + expected + "' "); s != expected {
t.Fatalf("Expected '%s', got '%s'", expected, s)
}
expected = "Trim Surrounding Spaces Without Quotes"
if s := normalizeErr("-ERR " + expected + " "); s != expected {
t.Fatalf("Expected '%s', got '%s'", expected, s)
}
expected = "Error Without Quotes"
if s := normalizeErr("-ERR " + expected); s != expected {
t.Fatalf("Expected '%s', got '%s'", expected, s)
}
expected = "Error With Quote Only On Left"
if s := normalizeErr("-ERR '" + expected); s != expected {
t.Fatalf("Expected '%s', got '%s'", expected, s)
}
expected = "Error With Quote Only On Right"
if s := normalizeErr("-ERR " + expected + "'"); s != expected {
t.Fatalf("Expected '%s', got '%s'", expected, s)
}
}
func TestAsyncINFO(t *testing.T) {
opts := GetDefaultOptions()
c := &Conn{Opts: opts}
c.ps = &parseState{}
if c.ps.state != OP_START {
t.Fatalf("Expected OP_START vs %d\n", c.ps.state)
}
info := []byte("INFO {}\r\n")
if c.ps.state != OP_START {
t.Fatalf("Expected OP_START vs %d\n", c.ps.state)
}
err := c.parse(info[:1])
if err != nil || c.ps.state != OP_I {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(info[1:2])
if err != nil || c.ps.state != OP_IN {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(info[2:3])
if err != nil || c.ps.state != OP_INF {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(info[3:4])
if err != nil || c.ps.state != OP_INFO {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(info[4:5])
if err != nil || c.ps.state != OP_INFO_SPC {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
err = c.parse(info[5:])
if err != nil || c.ps.state != OP_START {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
// All at once
err = c.parse(info)
if err != nil || c.ps.state != OP_START {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
// Server pool needs to be setup
c.setupServerPool()
// Partials requiring argBuf
expectedServer := serverInfo{
Id: "test",
Host: "localhost",
Port: 4222,
Version: "1.2.3",
AuthRequired: true,
TLSRequired: true,
MaxPayload: 2 * 1024 * 1024,
ConnectURLs: []string{"localhost:5222", "localhost:6222"},
}
// Set NoRandomize so that the check with expectedServer info
// matches.
c.Opts.NoRandomize = true
b, _ := json.Marshal(expectedServer)
info = []byte(fmt.Sprintf("INFO %s\r\n", b))
if c.ps.state != OP_START {
t.Fatalf("Expected OP_START vs %d\n", c.ps.state)
}
err = c.parse(info[:9])
if err != nil || c.ps.state != INFO_ARG || c.ps.argBuf == nil {
t.Fatalf("Unexpected: %d err: %v argBuf: %v\n", c.ps.state, err, c.ps.argBuf)
}
err = c.parse(info[9:11])
if err != nil || c.ps.state != INFO_ARG || c.ps.argBuf == nil {
t.Fatalf("Unexpected: %d err: %v argBuf: %v\n", c.ps.state, err, c.ps.argBuf)
}
err = c.parse(info[11:])
if err != nil || c.ps.state != OP_START || c.ps.argBuf != nil {
t.Fatalf("Unexpected: %d err: %v argBuf: %v\n", c.ps.state, err, c.ps.argBuf)
}
if !reflect.DeepEqual(c.info, expectedServer) {
t.Fatalf("Expected server info to be: %v, got: %v", expectedServer, c.info)
}
// Good INFOs
good := []string{"INFO {}\r\n", "INFO {}\r\n", "INFO {} \r\n", "INFO { \"server_id\": \"test\" } \r\n", "INFO {\"connect_urls\":[]}\r\n"}
for _, gi := range good {
c.ps = &parseState{}
err = c.parse([]byte(gi))
if err != nil || c.ps.state != OP_START {
t.Fatalf("Protocol %q should be fine. Err=%v state=%v", gi, err, c.ps.state)
}
}
// Wrong INFOs
wrong := []string{"IxNFO {}\r\n", "INxFO {}\r\n", "INFxO {}\r\n", "INFOx {}\r\n", "INFO{}\r\n", "INFO {}"}
for _, wi := range wrong {
c.ps = &parseState{}
err = c.parse([]byte(wi))
if err == nil && c.ps.state == OP_START {
t.Fatalf("Protocol %q should have failed", wi)
}
}
checkPool := func(urls ...string) {
// Check both pool and urls map
if len(c.srvPool) != len(urls) {
stackFatalf(t, "Pool should have %d elements, has %d", len(urls), len(c.srvPool))
}
if len(c.urls) != len(urls) {
stackFatalf(t, "Map should have %d elements, has %d", len(urls), len(c.urls))
}
for _, url := range urls {
if _, present := c.urls[url]; !present {
stackFatalf(t, "Pool should have %q", url)
}
}
}
// Now test the decoding of "connect_urls"
// Reset the pool
c.setupServerPool()
// Reinitialize the parser
c.ps = &parseState{}
info = []byte("INFO {\"connect_urls\":[\"localhost:4222\", \"localhost:5222\"]}\r\n")
err = c.parse(info)
if err != nil || c.ps.state != OP_START {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
// Pool now should contain localhost:4222 (the default URL) and localhost:5222
checkPool("localhost:4222", "localhost:5222")
// Make sure that if client receives the same, it is not added again.
err = c.parse(info)
if err != nil || c.ps.state != OP_START {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
// Pool should still contain localhost:4222 (the default URL) and localhost:5222
checkPool("localhost:4222", "localhost:5222")
// Receive a new URL
info = []byte("INFO {\"connect_urls\":[\"localhost:4222\", \"localhost:5222\", \"localhost:6222\"]}\r\n")
err = c.parse(info)
if err != nil || c.ps.state != OP_START {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
// Pool now should contain localhost:4222 (the default URL) localhost:5222 and localhost:6222
checkPool("localhost:4222", "localhost:5222", "localhost:6222")
// Check that pool may be randomized on setup, but new URLs are always
// added at end of pool.
c.Opts.NoRandomize = false
c.Opts.Servers = testServers
// Reset the pool
c.setupServerPool()
// Reinitialize the parser
c.ps = &parseState{}
// Capture the pool sequence after randomization
urlsAfterPoolSetup := make([]string, 0, len(c.srvPool))
for _, srv := range c.srvPool {
urlsAfterPoolSetup = append(urlsAfterPoolSetup, srv.url.Host)
}
checkPoolOrderDidNotChange := func() {
for i := 0; i < len(urlsAfterPoolSetup); i++ {
if c.srvPool[i].url.Host != urlsAfterPoolSetup[i] {
stackFatalf(t, "Pool should have %q at index %q, has %q", urlsAfterPoolSetup[i], i, c.srvPool[i].url.Host)
}
}
}
// Add new urls
newURLs := []string{
"localhost:6222",
"localhost:7222",
"localhost:8222\", \"localhost:9222",
"localhost:10222\", \"localhost:11222\", \"localhost:12222,",
}
for _, newURL := range newURLs {
info = []byte("INFO {\"connect_urls\":[\"" + newURL + "]}\r\n")
err = c.parse(info)
if err != nil || c.ps.state != OP_START {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
// Check that pool order does not change up to the new addition(s).
checkPoolOrderDidNotChange()
}
}
func TestConnServers(t *testing.T) {
opts := GetDefaultOptions()
c := &Conn{Opts: opts}
c.ps = &parseState{}
c.setupServerPool()
validateURLs := func(serverUrls []string, expectedUrls ...string) {
var found bool
if len(serverUrls) != len(expectedUrls) {
stackFatalf(t, "Array should have %d elements, has %d", len(expectedUrls), len(serverUrls))
}
for _, ev := range expectedUrls {
found = false
for _, av := range serverUrls {
if ev == av {
found = true
break
}
}
if !found {
stackFatalf(t, "array is missing %q in %v", ev, serverUrls)
}
}
}
// check the default url
validateURLs(c.Servers(), "nats://localhost:4222")
if len(c.DiscoveredServers()) != 0 {
t.Fatalf("Expected no discovered servers")
}
// Add a new URL
err := c.parse([]byte("INFO {\"connect_urls\":[\"localhost:5222\"]}\r\n"))
if err != nil {
t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err)
}
// Server list should now contain both the default and the new url.
validateURLs(c.Servers(), "nats://localhost:4222", "nats://localhost:5222")
// Discovered servers should only contain the new url.
validateURLs(c.DiscoveredServers(), "nats://localhost:5222")
// verify user credentials are stripped out.
opts.Servers = []string{"nats://user:pass@localhost:4333", "nats://token@localhost:4444"}
c = &Conn{Opts: opts}
c.ps = &parseState{}
c.setupServerPool()
validateURLs(c.Servers(), "nats://localhost:4333", "nats://localhost:4444")
}
func TestConnAsyncCBDeadlock(t *testing.T) {
s := RunServerOnPort(TEST_PORT)
defer s.Shutdown()
ch := make(chan bool)
o := GetDefaultOptions()
o.Url = fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT)
o.ClosedCB = func(_ *Conn) {
ch <- true
}
o.AsyncErrorCB = func(nc *Conn, sub *Subscription, err error) {
// do something with nc that requires locking behind the scenes
_ = nc.LastError()
}
nc, err := o.Connect()
if err != nil {
t.Fatalf("Should have connected ok: %v", err)
}
total := 300
wg := &sync.WaitGroup{}
wg.Add(total)
for i := 0; i < total; i++ {
go func() {
// overwhelm asyncCB with errors
nc.processErr(AUTHORIZATION_ERR)
wg.Done()
}()
}
wg.Wait()
nc.Close()
if e := Wait(ch); e != nil {
t.Fatal("Deadlock")
}
}
func TestPingTimerLeakedOnClose(t *testing.T) {
s := RunServerOnPort(TEST_PORT)
defer s.Shutdown()
nc, err := Connect(fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
nc.Close()
// There was a bug (issue #338) that if connection
// was created and closed quickly, the pinger would
// be created from a go-routine and would cause the
// connection object to be retained until the ping
// timer fired.
// Wait a little bit and check if the timer is set.
// With the defect it would be.
time.Sleep(100 * time.Millisecond)
nc.mu.Lock()
pingTimerSet := nc.ptmr != nil
nc.mu.Unlock()
if pingTimerSet {
t.Fatal("Pinger timer should not be set")
}
}
func TestNoEcho(t *testing.T) {
s := RunServerOnPort(TEST_PORT)
defer s.Shutdown()
url := fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT)
nc, err := Connect(url, NoEcho())
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer nc.Close()
r := int32(0)
_, err = nc.Subscribe("foo", func(m *Msg) {
atomic.AddInt32(&r, 1)
})
if err != nil {
t.Fatalf("Error on subscribe: %v", err)
}
err = nc.Publish("foo", []byte("Hello World"))
if err != nil {
t.Fatalf("Error on publish: %v", err)
}
nc.Flush()
nc.Flush()
if nr := atomic.LoadInt32(&r); nr != 0 {
t.Fatalf("Expected no messages echoed back, received %d\n", nr)
}
}
func TestNoEchoOldServer(t *testing.T) {
opts := GetDefaultOptions()
opts.Url = DefaultURL
opts.NoEcho = true
nc := &Conn{Opts: opts}
if err := nc.setupServerPool(); err != nil {
t.Fatalf("Problem setting up Server Pool: %v\n", err)
}
// Old style with no proto, meaning 0. We need Proto:1 for NoEcho support.
oldInfo := "{\"server_id\":\"22\",\"version\":\"1.1.0\",\"go\":\"go1.10.2\",\"port\":4222,\"max_payload\":1048576}"
err := nc.processInfo(oldInfo)
if err != nil {
t.Fatalf("Error processing old style INFO: %v\n", err)
}
// Make sure connectProto generates an error.
_, err = nc.connectProto()
if err == nil {
t.Fatalf("Expected an error but got none\n")
}
}
// Trust Server Tests
var (
oSeed = []byte("SOAL7GTNI66CTVVNXBNQMG6V2HTDRWC3HGEP7D2OUTWNWSNYZDXWFOX4SU")
aSeed = []byte("SAAASUPRY3ONU4GJR7J5RUVYRUFZXG56F4WEXELLLORQ65AEPSMIFTOJGE")
uSeed = []byte("SUAMK2FG4MI6UE3ACF3FK3OIQBCEIEZV7NSWFFEW63UXMRLFM2XLAXK4GY")
aJWT = "eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJLWjZIUVRXRlY3WkRZSFo3NklRNUhPM0pINDVRNUdJS0JNMzJTSENQVUJNNk5PNkU3TUhRIiwiaWF0IjoxNTQ0MDcxODg5LCJpc3MiOiJPRDJXMkk0TVZSQTVUR1pMWjJBRzZaSEdWTDNPVEtGV1FKRklYNFROQkVSMjNFNlA0NlMzNDVZWSIsInN1YiI6IkFBUFFKUVVQS1ZYR1c1Q1pINUcySEZKVUxZU0tERUxBWlJWV0pBMjZWRFpPN1dTQlVOSVlSRk5RIiwidHlwZSI6ImFjY291bnQiLCJuYXRzIjp7ImxpbWl0cyI6eyJzdWJzIjotMSwiY29ubiI6LTEsImltcG9ydHMiOi0xLCJleHBvcnRzIjotMSwiZGF0YSI6LTEsInBheWxvYWQiOi0xLCJ3aWxkY2FyZHMiOnRydWV9fX0.8o35JPQgvhgFT84Bi2Z-zAeSiLrzzEZn34sgr1DIBEDTwa-EEiMhvTeos9cvXxoZVCCadqZxAWVwS6paAMj8Bg"
uJWT = "eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJBSFQzRzNXRElDS1FWQ1FUWFJUTldPRlVVUFRWNE00RFZQV0JGSFpJQUROWEZIWEpQR0FBIiwiaWF0IjoxNTQ0MDcxODg5LCJpc3MiOiJBQVBRSlFVUEtWWEdXNUNaSDVHMkhGSlVMWVNLREVMQVpSVldKQTI2VkRaTzdXU0JVTklZUkZOUSIsInN1YiI6IlVBVDZCV0NTQ1dMVUtKVDZLNk1CSkpPRU9UWFo1QUpET1lLTkVWUkZDN1ZOTzZPQTQzTjRUUk5PIiwidHlwZSI6InVzZXIiLCJuYXRzIjp7InB1YiI6e30sInN1YiI6e319fQ._8A1XM88Q2kp7XVJZ42bQuO9E3QPsNAGKtVjAkDycj8A5PtRPby9UpqBUZzBwiJQQO3TUcD5GGqSvsMm6X8hCQ"
chained = `
-----BEGIN NATS USER JWT-----
eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJBSFQzRzNXRElDS1FWQ1FUWFJUTldPRlVVUFRWNE00RFZQV0JGSFpJQUROWEZIWEpQR0FBIiwiaWF0IjoxNTQ0MDcxODg5LCJpc3MiOiJBQVBRSlFVUEtWWEdXNUNaSDVHMkhGSlVMWVNLREVMQVpSVldKQTI2VkRaTzdXU0JVTklZUkZOUSIsInN1YiI6IlVBVDZCV0NTQ1dMVUtKVDZLNk1CSkpPRU9UWFo1QUpET1lLTkVWUkZDN1ZOTzZPQTQzTjRUUk5PIiwidHlwZSI6InVzZXIiLCJuYXRzIjp7InB1YiI6e30sInN1YiI6e319fQ._8A1XM88Q2kp7XVJZ42bQuO9E3QPsNAGKtVjAkDycj8A5PtRPby9UpqBUZzBwiJQQO3TUcD5GGqSvsMm6X8hCQ
------END NATS USER JWT------
************************* IMPORTANT *************************
NKEY Seed printed below can be used sign and prove identity.
NKEYs are sensitive and should be treated as secrets.
-----BEGIN USER NKEY SEED-----
SUAMK2FG4MI6UE3ACF3FK3OIQBCEIEZV7NSWFFEW63UXMRLFM2XLAXK4GY
------END USER NKEY SEED------
`
)
func runTrustServer() *server.Server {
kp, _ := nkeys.FromSeed(oSeed)
pub, _ := kp.PublicKey()
opts := gnatsd.DefaultTestOptions
opts.Port = TEST_PORT
opts.TrustedKeys = []string{string(pub)}
s := RunServerWithOptions(opts)
mr := &server.MemAccResolver{}
akp, _ := nkeys.FromSeed(aSeed)
apub, _ := akp.PublicKey()
mr.Store(string(apub), aJWT)
s.SetAccountResolver(mr)
return s
}
func TestBasicUserJWTAuth(t *testing.T) {
if server.VERSION[0] == '1' {
t.Skip()
}
ts := runTrustServer()
defer ts.Shutdown()
url := fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT)
_, err := Connect(url)
if err == nil {
t.Fatalf("Expecting an error on connect")
}
jwtCB := func() (string, error) {
return uJWT, nil
}
sigCB := func(nonce []byte) ([]byte, error) {
kp, _ := nkeys.FromSeed(uSeed)
sig, _ := kp.Sign(nonce)
return sig, nil
}
// Try with user jwt but no sig
_, err = Connect(url, UserJWT(jwtCB, nil))
if err == nil {
t.Fatalf("Expecting an error on connect")
}
// Try with user callback
_, err = Connect(url, UserJWT(nil, sigCB))
if err == nil {
t.Fatalf("Expecting an error on connect")
}
nc, err := Connect(url, UserJWT(jwtCB, sigCB))
if err != nil {
t.Fatalf("Expected to connect, got %v", err)
}
nc.Close()
}
func TestUserCredentialsTwoFiles(t *testing.T) {
if server.VERSION[0] == '1' {
t.Skip()
}
ts := runTrustServer()
defer ts.Shutdown()
userJWTFile := createTmpFile(t, []byte(uJWT))
defer os.Remove(userJWTFile)
userSeedFile := createTmpFile(t, uSeed)
defer os.Remove(userSeedFile)
url := fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT)
nc, err := Connect(url, UserCredentials(userJWTFile, userSeedFile))
if err != nil {
t.Fatalf("Expected to connect, got %v", err)
}
nc.Close()
}
func TestUserCredentialsChainedFile(t *testing.T) {
if server.VERSION[0] == '1' {
t.Skip()
}
ts := runTrustServer()
defer ts.Shutdown()
chainedFile := createTmpFile(t, []byte(chained))
defer os.Remove(chainedFile)
url := fmt.Sprintf("nats://127.0.0.1:%d", TEST_PORT)
nc, err := Connect(url, UserCredentials(chainedFile))
if err != nil {
t.Fatalf("Expected to connect, got %v", err)
}
nc.Close()
}
func TestNkeyAuth(t *testing.T) {
if server.VERSION[0] == '1' {
t.Skip()
}
seed := []byte("SUAKYRHVIOREXV7EUZTBHUHL7NUMHPMAS7QMDU3GTIUWEI5LDNOXD43IZY")
kp, _ := nkeys.FromSeed(seed)
pub, _ := kp.PublicKey()
sopts := gnatsd.DefaultTestOptions
sopts.Port = TEST_PORT
sopts.Nkeys = []*server.NkeyUser{&server.NkeyUser{Nkey: string(pub)}}
ts := RunServerWithOptions(sopts)
defer ts.Shutdown()
opts := reconnectOpts
if _, err := opts.Connect(); err == nil {
t.Fatalf("Expected to fail with no nkey auth defined")
}
opts.Nkey = string(pub)
if _, err := opts.Connect(); err != ErrNkeyButNoSigCB {
t.Fatalf("Expected to fail with nkey defined but no signature callback, got %v", err)
}
badSign := func(nonce []byte) ([]byte, error) {
return []byte("VALID?"), nil
}
opts.SignatureCB = badSign
if _, err := opts.Connect(); err == nil {
t.Fatalf("Expected to fail with nkey and bad signature callback")
}
goodSign := func(nonce []byte) ([]byte, error) {
sig, err := kp.Sign(nonce)
if err != nil {
t.Fatalf("Failed signing nonce: %v", err)
}
return sig, nil
}
opts.SignatureCB = goodSign
nc, err := opts.Connect()
if err != nil {
t.Fatalf("Expected to succeed but got %v", err)
}
// Now disconnect by killing the server and restarting.
ts.Shutdown()
ts = RunServerWithOptions(sopts)
defer ts.Shutdown()
if err := nc.FlushTimeout(5 * time.Second); err != nil {
t.Fatalf("Error on Flush: %v", err)
}
}
func createTmpFile(t *testing.T, content []byte) string {
t.Helper()
conf, err := ioutil.TempFile("", "")
if err != nil {
t.Fatalf("Error creating conf file: %v", err)
}
fName := conf.Name()
conf.Close()
if err := ioutil.WriteFile(fName, content, 0666); err != nil {
os.Remove(fName)
t.Fatalf("Error writing conf file: %v", err)
}
return fName
}
func TestNKeyOptionFromSeed(t *testing.T) {
if _, err := NkeyOptionFromSeed("file_that_does_not_exist"); err == nil {
t.Fatal("Expected error got none")
}
seedFile := createTmpFile(t, []byte(`
# No seed
THIS_NOT_A_NKEY_SEED
`))
defer os.Remove(seedFile)
if _, err := NkeyOptionFromSeed(seedFile); err == nil || !strings.Contains(err.Error(), "seed found") {
t.Fatalf("Expected error about seed not found, got %v", err)
}
os.Remove(seedFile)
seedFile = createTmpFile(t, []byte(`
# Invalid seed
SUBADSEED
`))
// Make sure that we detect SU (trim space) but it still fails because
// this is not a valid NKey.
if _, err := NkeyOptionFromSeed(seedFile); err == nil || strings.Contains(err.Error(), "seed found") {
t.Fatalf("Expected error about invalid key, got %v", err)
}
os.Remove(seedFile)
kp, _ := nkeys.CreateUser()
seed, _ := kp.Seed()
seedFile = createTmpFile(t, seed)
opt, err := NkeyOptionFromSeed(seedFile)
if err != nil {
t.Fatalf("Error: %v", err)
}
l, e := net.Listen("tcp", "127.0.0.1:0")
if e != nil {
t.Fatal("Could not listen on an ephemeral port")
}
tl := l.(*net.TCPListener)
defer tl.Close()
addr := tl.Addr().(*net.TCPAddr)
wg := sync.WaitGroup{}
wg.Add(1)
ch := make(chan bool, 1)
rs := func(ch chan bool) {
defer wg.Done()
conn, err := l.Accept()
if err != nil {
t.Fatalf("Error accepting client connection: %v\n", err)
}
defer conn.Close()
info := "INFO {\"server_id\":\"foobar\",\"nonce\":\"anonce\"}\r\n"
conn.Write([]byte(info))
// Read connect and ping commands sent from the client
br := bufio.NewReaderSize(conn, 10*1024)
line, _, _ := br.ReadLine()
if err != nil {
t.Fatalf("Expected CONNECT and PING from client, got: %s", err)
}
// If client got an error reading the seed, it will not send it
if bytes.Contains(line, []byte(`"sig":`)) {
conn.Write([]byte("PONG\r\n"))
} else {
conn.Write([]byte(`-ERR go away\r\n`))
conn.Close()
}
// Now wait to be notified that we can finish
<-ch
}
go rs(ch)
nc, err := Connect(fmt.Sprintf("nats://127.0.0.1:%d", addr.Port), opt)
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
nc.Close()
close(ch)
wg.Wait()
// Now that option is already created, change content of file
ioutil.WriteFile(seedFile, []byte(`xxxxx`), 0666)
ch = make(chan bool, 1)
wg.Add(1)
go rs(ch)
if _, err := Connect(fmt.Sprintf("nats://127.0.0.1:%d", addr.Port), opt); err == nil {
t.Fatal("Expected error, got none")
}
close(ch)
wg.Wait()
}
func TestLookupHostResultIsRandomized(t *testing.T) {
orgAddrs, err := net.LookupHost("localhost")
if err != nil {
t.Fatalf("Error looking up host: %v", err)
}
if len(orgAddrs) < 2 {
t.Skip("localhost resolves to less than 2 addresses, so test not relevant")
}
opts := gnatsd.DefaultTestOptions
// For this test, important to be able to listen on both IPv4/v6
// because it is likely than in local test, localhost will resolve
// to ::1 and 127.0.0.1
opts.Host = "0.0.0.0"
opts.Port = TEST_PORT
s := RunServerWithOptions(opts)
defer s.Shutdown()
for i := 0; i < 10; i++ {
nc, err := Connect(fmt.Sprintf("localhost:%d", TEST_PORT))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
nc.mu.Lock()
host, _, _ := net.SplitHostPort(nc.conn.LocalAddr().String())
nc.mu.Unlock()
isFirst := host == orgAddrs[0]
nc.Close()
if !isFirst {
// We used one that is not the first of the resolved addresses,
// so we consider the test good in that IPs were randomized.
return
}
}
t.Fatalf("Always used first address returned by LookupHost")
}
|
[
"\"TRAVIS_TAG\""
] |
[] |
[
"TRAVIS_TAG"
] |
[]
|
["TRAVIS_TAG"]
|
go
| 1 | 0 | |
internal/pwsia/sqlite.go
|
package pwsia
import (
"github.com/jmoiron/sqlx"
"github.com/thoas/go-funk"
"github.com/xo/dburl"
"io/ioutil"
"log"
"os"
)
func OpenDB(dsn string) *sqlx.DB {
// Connect to SQLite3 with WAL enabled.
// SEE: https://github.com/mattn/go-sqlite3#connection-string
// SEE: [go-sqlite3 with journal_mode=WAL gives 'database is locked' error - Stack Overflow](https://stackoverflow.com/questions/57118674/go-sqlite3-with-journal-mode-wal-gives-database-is-locked-error)
// SEE: [Support vfs for Open by mattn · Pull Request #877 · mattn/go-sqlite3](https://github.com/mattn/go-sqlite3/pull/877)
// SEE: https://stackoverflow.com/a/42492845/9998350, at [linux - Why does sqlite3 not work on Amazon Elastic File System? - Stack Overflow](https://stackoverflow.com/questions/42070214/why-does-sqlite3-not-work-on-amazon-elastic-file-system)
u, err := dburl.Parse(dsn)
if err != nil {
return nil
}
db, err := sqlx.Open(u.Driver, u.DSN)
if err != nil {
return nil
}
var tableNames []string
db.Select(&tableNames, "SELECT name FROM sqlite_master\nWHERE type='table'\nORDER BY name;")
// Set busy_timeout for Litestream.
// Litestream recommends "busy_timeout = 5000", but for NFS usage we need make timeout value much longer.
db.MustExec("PRAGMA busy_timeout = 30000;")
db.MustExec("PRAGMA synchronous = NORMAL;\n")
initialDBSchemaPath := os.Getenv("INITIAL_DB_SCHEMA_PATH")
if len(initialDBSchemaPath) == 0 {
log.Print("INITIAL_DB_SCHEMA_PATH not specified")
return db
}
isInitialized := funk.ContainsString(tableNames, "page_views")
if !isInitialized {
// Run initial migration
file, err := ioutil.ReadFile(initialDBSchemaPath)
if err != nil {
log.Fatal(err)
return nil
}
db.MustExec(string(file))
}
return db
}
|
[
"\"INITIAL_DB_SCHEMA_PATH\""
] |
[] |
[
"INITIAL_DB_SCHEMA_PATH"
] |
[]
|
["INITIAL_DB_SCHEMA_PATH"]
|
go
| 1 | 0 | |
test/init_test.go
|
//go:build conformance || e2e || examples
// +build conformance e2e examples
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file contains initialization logic for the tests, such as special magical global state that needs to be initialized.
package test
import (
"context"
"flag"
"fmt"
"os"
"strings"
"sync"
"testing"
"github.com/tektoncd/resolution/pkg/names"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // Mysteriously by k8s libs, or they fail to create `KubeClient`s when using oidc authentication. Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/345
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
knativetest "knative.dev/pkg/test"
"knative.dev/pkg/test/logging" // Mysteriously by k8s libs, or they fail to create `KubeClient`s from config. Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/242
"knative.dev/pkg/test/logstream"
"sigs.k8s.io/yaml"
)
var initMetrics sync.Once
var skipRootUserTests = false
func init() {
flag.BoolVar(&skipRootUserTests, "skipRootUserTests", false, "Skip tests that require root user")
}
func setup(ctx context.Context, t *testing.T, fn ...func(context.Context, *testing.T, *clients, string)) (*clients, string) {
t.Helper()
namespace := names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("arendelle")
initializeLogsAndMetrics(t)
// Inline controller logs from SYSTEM_NAMESPACE into the t.Log output.
cancel := logstream.Start(t)
t.Cleanup(cancel)
c := newClients(t, knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster, namespace)
createNamespace(ctx, t, namespace, c.KubeClient)
for _, f := range fn {
f(ctx, t, c, namespace)
}
return c, namespace
}
func header(t *testing.T, text string) {
t.Helper()
left := "### "
right := " ###"
txt := left + text + right
bar := strings.Repeat("#", len(txt))
t.Logf(bar)
t.Logf(txt)
t.Logf(bar)
}
func tearDown(ctx context.Context, t *testing.T, cs *clients, namespace string) {
t.Helper()
if cs.KubeClient == nil {
return
}
if t.Failed() {
header(t, fmt.Sprintf("Dumping objects from %s", namespace))
bs, err := getCRDYaml(ctx, cs, namespace)
if err != nil {
t.Error(err)
} else {
t.Log(string(bs))
}
}
if os.Getenv("TEST_KEEP_NAMESPACES") == "" && !t.Failed() {
t.Logf("Deleting namespace %s", namespace)
if err := cs.KubeClient.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}); err != nil {
t.Errorf("Failed to delete namespace %s: %s", namespace, err)
}
}
}
func initializeLogsAndMetrics(t *testing.T) {
initMetrics.Do(func() {
flag.Parse()
flag.Set("alsologtostderr", "true")
logging.InitializeLogger()
// if knativetest.Flags.EmitMetrics {
logging.InitializeMetricExporter(t.Name())
//}
})
}
func createNamespace(ctx context.Context, t *testing.T, namespace string, kubeClient kubernetes.Interface) {
t.Logf("Create namespace %s to deploy to", namespace)
labels := map[string]string{
"tekton.dev/test-e2e": "true",
}
if _, err := kubeClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Labels: labels,
},
}, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create namespace %s for tests: %s", namespace, err)
}
}
func getCRDYaml(ctx context.Context, cs *clients, ns string) ([]byte, error) {
var output []byte
printOrAdd := func(i interface{}) {
bs, err := yaml.Marshal(i)
if err != nil {
return
}
output = append(output, []byte("\n---\n")...)
output = append(output, bs...)
}
rrs, err := cs.ResolutionRequestClient.List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("could not get pipeline: %w", err)
}
for _, i := range rrs.Items {
printOrAdd(i)
}
return output, nil
}
// TestMain initializes anything global needed by the tests. Right now this is just log and metric
// setup since the log and metric libs we're using use global state :(
func TestMain(m *testing.M) {
flag.Parse()
c := m.Run()
fmt.Fprintf(os.Stderr, "Using kubeconfig at `%s` with cluster `%s`\n", knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster)
os.Exit(c)
}
|
[
"\"TEST_KEEP_NAMESPACES\""
] |
[] |
[
"TEST_KEEP_NAMESPACES"
] |
[]
|
["TEST_KEEP_NAMESPACES"]
|
go
| 1 | 0 | |
citrixadc/helpers_test.go
|
package citrixadc
import (
"encoding/base64"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"regexp"
"runtime"
"strings"
"testing"
"github.com/chiradeep/go-nitro/config/system"
"github.com/chiradeep/go-nitro/netscaler"
)
func uploadTestdataFile(c *NetScalerNitroClient, t *testing.T, filename, targetDir string) error {
nsClient := c.client
// Get here path
_, here_filename, _, _ := runtime.Caller(1)
b, err := ioutil.ReadFile(path.Join(path.Dir(here_filename), "testdata", filename))
if err != nil {
return err
}
sf := system.Systemfile{
Filename: filename,
Filecontent: base64.StdEncoding.EncodeToString(b),
Filelocation: targetDir,
}
_, err = nsClient.AddResource(netscaler.Systemfile.Type(), filename, &sf)
if err != nil && strings.Contains(err.Error(), "File already exists") {
url_args := map[string]string{"filelocation": strings.Replace(targetDir, "/", "%2F", -1)}
err := nsClient.DeleteResourceWithArgsMap(netscaler.Systemfile.Type(), filename, url_args)
if err != nil {
return err
}
_, err = nsClient.AddResource(netscaler.Systemfile.Type(), filename, &sf)
if err != nil {
return err
}
}
return nil
}
var helperClient *NetScalerNitroClient
func testHelperInstantiateClient(nsUrl, username, password string, sslVerify bool) (*NetScalerNitroClient, error) {
if helperClient != nil {
log.Printf("Returning existing helper client\n")
return helperClient, nil
}
if nsUrl == "" {
if nsUrl = os.Getenv("NS_URL"); nsUrl == "" {
return nil, errors.New("No nsUrl defined")
}
}
if username == "" {
if username = os.Getenv("NS_LOGIN"); username == "" {
username = "nsroot"
}
}
if password == "" {
if password = os.Getenv("NS_PASSWORD"); password == "" {
password = "nsroot"
}
}
c := NetScalerNitroClient{
Username: username,
Password: password,
Endpoint: nsUrl,
}
params := netscaler.NitroParams{
Url: nsUrl,
Username: username,
Password: password,
//ProxiedNs: d.Get("proxied_ns").(string),
SslVerify: sslVerify,
}
client, err := netscaler.NewNitroClientFromParams(params)
if err != nil {
return nil, err
}
c.client = client
helperClient = &c
log.Printf("Helper client instantiated\n")
return helperClient, nil
}
func testHelperEnsureResourceDeletion(c *NetScalerNitroClient, t *testing.T, resourceType, resourceName string, deleteArgsMap map[string]string) {
if _, err := c.client.FindResource(resourceType, resourceName); err != nil {
targetSubstring := fmt.Sprintf("No resource %s of type %s found", resourceName, resourceType)
actualError := err.Error()
t.Logf("targetSubstring \"%s\"", targetSubstring)
t.Logf("actualError \"%s\"", actualError)
if strings.Contains(err.Error(), targetSubstring) {
t.Logf("Ensure delete found no remaining resource %s", resourceName)
return
} else {
t.Fatalf("Unexpected error while ensuring delete of resource %v. %v", resourceName, err)
return
}
}
// Fallthrough
if deleteArgsMap == nil {
if err := c.client.DeleteResource(resourceType, resourceName); err != nil {
t.Logf("Ensuring delete failed for resource %s.", resourceName)
t.Fatal(err)
return
} else {
t.Logf("Ensuring deletion of %s successful", resourceName)
}
} else {
if err := c.client.DeleteResourceWithArgsMap(resourceType, resourceName, deleteArgsMap); err != nil {
t.Logf("Ensuring delete failed for resource %s with argsMap %v", resourceName, deleteArgsMap)
t.Fatal(err)
return
} else {
t.Logf("Ensuring deletion of %s successful", resourceName)
}
}
}
func testHelperVerifyImmutabilityFunc(c *NetScalerNitroClient, t *testing.T, resourceType, resourceName string, resourceInstance interface{}, attribute string) {
if _, err := c.client.UpdateResource(resourceType, resourceName, resourceInstance); err != nil {
r := regexp.MustCompile(fmt.Sprintf("errorcode.*278.*Invalid argument \\[%s\\]", attribute))
if r.Match([]byte(err.Error())) {
t.Logf("Succesfully verified immutability of attribute \"%s\"", attribute)
} else {
t.Errorf("Error while assesing immutability of attribute \"%s\"", attribute)
t.Fatal(err)
}
} else {
t.Fatalf("Error (no error) while assesing immutability of attribute \"%s\"", attribute)
}
}
|
[
"\"NS_URL\"",
"\"NS_LOGIN\"",
"\"NS_PASSWORD\""
] |
[] |
[
"NS_URL",
"NS_LOGIN",
"NS_PASSWORD"
] |
[]
|
["NS_URL", "NS_LOGIN", "NS_PASSWORD"]
|
go
| 3 | 0 | |
edge/cmd/edgecore/app/server.go
|
package app
import (
"errors"
"fmt"
"os"
"github.com/mitchellh/go-ps"
"github.com/spf13/cobra"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/cli/globalflag"
"k8s.io/component-base/term"
"k8s.io/klog/v2"
"github.com/kubeedge/beehive/pkg/core"
"github.com/kubeedge/kubeedge/edge/cmd/edgecore/app/options"
"github.com/kubeedge/kubeedge/edge/pkg/common/dbm"
"github.com/kubeedge/kubeedge/edge/pkg/devicetwin"
"github.com/kubeedge/kubeedge/edge/pkg/edged"
"github.com/kubeedge/kubeedge/edge/pkg/edgehub"
"github.com/kubeedge/kubeedge/edge/pkg/edgestream"
"github.com/kubeedge/kubeedge/edge/pkg/eventbus"
"github.com/kubeedge/kubeedge/edge/pkg/metamanager"
"github.com/kubeedge/kubeedge/edge/pkg/servicebus"
"github.com/kubeedge/kubeedge/edge/test"
"github.com/kubeedge/kubeedge/pkg/apis/componentconfig/edgecore/v1alpha1"
"github.com/kubeedge/kubeedge/pkg/apis/componentconfig/edgecore/v1alpha1/validation"
"github.com/kubeedge/kubeedge/pkg/util"
"github.com/kubeedge/kubeedge/pkg/util/flag"
"github.com/kubeedge/kubeedge/pkg/version"
"github.com/kubeedge/kubeedge/pkg/version/verflag"
)
// NewEdgeCoreCommand create edgecore cmd
func NewEdgeCoreCommand() *cobra.Command {
opts := options.NewEdgeCoreOptions()
cmd := &cobra.Command{
Use: "edgecore",
Long: `Edgecore is the core edge part of KubeEdge, which contains six modules: devicetwin, edged,
edgehub, eventbus, metamanager, and servicebus. DeviceTwin is responsible for storing device status
and syncing device status to the cloud. It also provides query interfaces for applications. Edged is an
agent that runs on edge nodes and manages containerized applications and devices. Edgehub is a web socket
client responsible for interacting with Cloud Service for the edge computing (like Edge Controller as in the KubeEdge
Architecture). This includes syncing cloud-side resource updates to the edge, and reporting
edge-side host and device status changes to the cloud. EventBus is a MQTT client to interact with MQTT
servers (mosquito), offering publish and subscribe capabilities to other components. MetaManager
is the message processor between edged and edgehub. It is also responsible for storing/retrieving metadata
to/from a lightweight database (SQLite).ServiceBus is a HTTP client to interact with HTTP servers (REST),
offering HTTP client capabilities to components of cloud to reach HTTP servers running at edge. `,
Run: func(cmd *cobra.Command, args []string) {
verflag.PrintAndExitIfRequested()
flag.PrintMinConfigAndExitIfRequested(v1alpha1.NewMinEdgeCoreConfig())
flag.PrintDefaultConfigAndExitIfRequested(v1alpha1.NewDefaultEdgeCoreConfig())
flag.PrintFlags(cmd.Flags())
if errs := opts.Validate(); len(errs) > 0 {
klog.Fatal(util.SpliceErrors(errs))
}
config, err := opts.Config()
if err != nil {
klog.Fatal(err)
}
if errs := validation.ValidateEdgeCoreConfiguration(config); len(errs) > 0 {
klog.Fatal(util.SpliceErrors(errs.ToAggregate().Errors()))
}
// To help debugging, immediately log version
klog.Infof("Version: %+v", version.Get())
// Check the running environment by default
checkEnv := os.Getenv("CHECK_EDGECORE_ENVIRONMENT")
// Force skip check if enable metaserver
if config.Modules.MetaManager.MetaServer.Enable {
checkEnv = "false"
}
if checkEnv != "false" {
// Check running environment before run edge core
if err := environmentCheck(); err != nil {
klog.Fatal(fmt.Errorf("failed to check the running environment: %v", err))
}
}
// get edge node local ip
if config.Modules.Edged.NodeIP == "" {
hostnameOverride := util.GetHostname()
localIP, _ := util.GetLocalIP(hostnameOverride)
config.Modules.Edged.NodeIP = localIP
}
registerModules(config)
// start all modules
core.Run()
},
}
fs := cmd.Flags()
namedFs := opts.Flags()
flag.AddFlags(namedFs.FlagSet("global"))
verflag.AddFlags(namedFs.FlagSet("global"))
globalflag.AddGlobalFlags(namedFs.FlagSet("global"), cmd.Name())
for _, f := range namedFs.FlagSets {
fs.AddFlagSet(f)
}
usageFmt := "Usage:\n %s\n"
cols, _, _ := term.TerminalSize(cmd.OutOrStdout())
cmd.SetUsageFunc(func(cmd *cobra.Command) error {
fmt.Fprintf(cmd.OutOrStderr(), usageFmt, cmd.UseLine())
cliflag.PrintSections(cmd.OutOrStderr(), namedFs, cols)
return nil
})
cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {
fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n"+usageFmt, cmd.Long, cmd.UseLine())
cliflag.PrintSections(cmd.OutOrStdout(), namedFs, cols)
})
return cmd
}
// environmentCheck check the environment before edgecore start
// if Check failed, return errors
func environmentCheck() error {
processes, err := ps.Processes()
if err != nil {
return err
}
for _, process := range processes {
// if kubelet is running, return error
if process.Executable() == "kubelet" {
return errors.New("kubelet should not running on edge node when running edgecore")
}
// if kube-proxy is running, return error
if process.Executable() == "kube-proxy" {
return errors.New("kube-proxy should not running on edge node when running edgecore")
}
}
return nil
}
// registerModules register all the modules started in edgecore
func registerModules(c *v1alpha1.EdgeCoreConfig) {
devicetwin.Register(c.Modules.DeviceTwin, c.Modules.Edged.HostnameOverride)
edged.Register(c.Modules.Edged)
edgehub.Register(c.Modules.EdgeHub, c.Modules.Edged.HostnameOverride)
eventbus.Register(c.Modules.EventBus, c.Modules.Edged.HostnameOverride)
metamanager.Register(c.Modules.MetaManager)
servicebus.Register(c.Modules.ServiceBus)
edgestream.Register(c.Modules.EdgeStream, c.Modules.Edged.HostnameOverride, c.Modules.Edged.NodeIP)
test.Register(c.Modules.DBTest)
// Note: Need to put it to the end, and wait for all models to register before executing
dbm.InitDBConfig(c.DataBase.DriverName, c.DataBase.AliasName, c.DataBase.DataSource)
}
|
[
"\"CHECK_EDGECORE_ENVIRONMENT\""
] |
[] |
[
"CHECK_EDGECORE_ENVIRONMENT"
] |
[]
|
["CHECK_EDGECORE_ENVIRONMENT"]
|
go
| 1 | 0 | |
common/common.go
|
package common
import (
"database/sql"
"fmt"
"github.com/DataDog/datadog-go/statsd"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/postgres"
"github.com/jonas747/discordgo"
"github.com/mediocregopher/radix.v3"
"github.com/sirupsen/logrus"
"github.com/volatiletech/sqlboiler/boil"
stdlog "log"
"os"
)
const (
VERSIONMAJOR = 1
VERSIONMINOR = 11
VERSIONPATCH = 6
)
var (
VERSIONNUMBER = fmt.Sprintf("%d.%d.%d", VERSIONMAJOR, VERSIONMINOR, VERSIONPATCH)
VERSION = VERSIONNUMBER + " Life as a bot is hard"
GORM *gorm.DB
PQ *sql.DB
RedisPool *radix.Pool
BotSession *discordgo.Session
BotUser *discordgo.User
Conf *CoreConfig
RedisPoolSize = 25
Statsd *statsd.Client
Testing = os.Getenv("YAGPDB_TESTING") != ""
CurrentRunCounter int64
)
// Initalizes all database connections, config loading and so on
func Init() error {
stdlog.SetOutput(&STDLogProxy{})
stdlog.SetFlags(0)
if Testing {
logrus.SetLevel(logrus.DebugLevel)
}
config, err := LoadConfig()
if err != nil {
return err
}
Conf = config
err = setupGlobalDGoSession()
if err != nil {
return err
}
ConnectDatadog()
err = connectRedis(config.Redis)
if err != nil {
return err
}
err = connectDB(config.PQHost, config.PQUsername, config.PQPassword, "yagpdb")
if err != nil {
panic(err)
}
BotUser, err = BotSession.UserMe()
if err != nil {
panic(err)
}
BotSession.State.User = &discordgo.SelfUser{
User: BotUser,
}
err = RedisPool.Do(radix.Cmd(&CurrentRunCounter, "INCR", "yagpdb_run_counter"))
if err != nil {
panic(err)
}
return err
}
func setupGlobalDGoSession() (err error) {
BotSession, err = discordgo.New(Conf.BotToken)
if err != nil {
return err
}
BotSession.MaxRestRetries = 3
BotSession.Ratelimiter.MaxConcurrentRequests = 25
return nil
}
func ConnectDatadog() {
if Conf.DogStatsdAddress == "" {
logrus.Warn("No datadog info provided, not connecting to datadog aggregator")
return
}
client, err := statsd.New(Conf.DogStatsdAddress)
if err != nil {
logrus.WithError(err).Error("Failed connecting to dogstatsd, datadog integration disabled")
return
}
Statsd = client
currentTransport := BotSession.Client.HTTPClient.Transport
BotSession.Client.HTTPClient.Transport = &LoggingTransport{Inner: currentTransport}
}
func InitTest() {
testDB := os.Getenv("YAGPDB_TEST_DB")
if testDB == "" {
return
}
err := connectDB("localhost", "postgres", "123", testDB)
if err != nil {
panic(err)
}
}
func connectRedis(addr string) (err error) {
RedisPool, err = radix.NewPool("tcp", addr, RedisPoolSize, radix.PoolOnEmptyWait())
if err != nil {
logrus.WithError(err).Fatal("Failed intitializing redis pool")
}
return
}
func connectDB(host, user, pass, dbName string) error {
if host == "" {
host = "localhost"
}
db, err := gorm.Open("postgres", fmt.Sprintf("host=%s user=%s dbname=%s sslmode=disable password='%s'", host, user, dbName, pass))
GORM = db
PQ = db.DB()
boil.SetDB(PQ)
if err == nil {
PQ.SetMaxOpenConns(5)
}
GORM.SetLogger(&GORMLogger{})
return err
}
|
[
"\"YAGPDB_TESTING\"",
"\"YAGPDB_TEST_DB\""
] |
[] |
[
"YAGPDB_TESTING",
"YAGPDB_TEST_DB"
] |
[]
|
["YAGPDB_TESTING", "YAGPDB_TEST_DB"]
|
go
| 2 | 0 | |
python-hand-movements-classifier/convert_database_to_new_format.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Coding at 6:48 and listening to:
Rock is Dead - Marylin Manson
Dance D'Amour - The 69 Eyes
Wake Up - Rage Against the Machine
Clubbed to Death - Robert D.
@author: italo
"""
#%% Importing the libraries
import pandas as pd # reading files
import numpy as np # handling numerical data
import matplotlib.pyplot as plt # Plotting
from scipy import signal
###############################
#%% Adding the path to datasets
###############################
# Description of the analysed movements:
# Movement Number - Movement Name
# 1 -> Supinar
# 2 -> Pronar
# 3 -> Pinçar
# 4 -> Fechar
# 5 -> Estender
# 6 -> Flexionar
# This should be the output of the classifier. It should classify each moviment
# in one of this classes.
#########################
#%% Importing the dataset
#########################
# The file name refering to the folder where this script is located
# - emg-movements-classifier
# - datasets
# - coletas
# - Eber
# - LH
# - Miguel... etc
# - python-hand_moviments-classifier
# - app_procedimentos
# - app_procedures.py
# Opening a file and reading it to a dataFrame object
# sep means separator, the files have no headers
# After reading it, we add the names of each column in the dataset.
# At end, we select the 4 channels as a numpy vector and we save it in
# emg_channels.
# The trigger is saved in emg_trigger.
volunteer_id = 'Insira aqui'
volunteer_id_number = 1
print("Opening files of volunteer %d - %s" % (volunteer_id_number, volunteer_id))
print("Opening part 1.1...")
dataset_pt11 = pd.read_table('datasets/coletas/'+volunteer_id+'11-Final.txt', sep=';', header=None)
dataset_pt11.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 1.2...")
dataset_pt12 = pd.read_table('datasets/coletas/'+volunteer_id+'12-Final.txt', sep=';', header=None)
dataset_pt12.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 1.3...")
dataset_pt13 = pd.read_table('datasets/coletas/'+volunteer_id+'13-Final.txt', sep=';', header=None)
dataset_pt13.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 1.4...")
dataset_pt14 = pd.read_table('datasets/coletas/'+volunteer_id+'14-Final.txt', sep=';', header=None)
dataset_pt14.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 2.1...")
dataset_pt21 = pd.read_table('datasets/coletas/'+volunteer_id+'21-Final.txt', sep=';', header=None)
dataset_pt21.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 2.2...")
dataset_pt22 = pd.read_table('datasets/coletas/'+volunteer_id+'22-Final.txt', sep=';', header=None)
dataset_pt22.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 2.3...")
dataset_pt23 = pd.read_table('datasets/coletas/'+volunteer_id+'23-Final.txt', sep=';', header=None)
dataset_pt23.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 2.4...")
dataset_pt24 = pd.read_table('datasets/coletas/'+volunteer_id+'24-Final.txt', sep=';', header=None)
dataset_pt24.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print('*'*30)
dt_frames = [dataset_pt11, dataset_pt12, dataset_pt13, dataset_pt14,
dataset_pt21, dataset_pt22, dataset_pt23, dataset_pt24]
dataset = pd.concat(dt_frames)
emg_channels = dataset.iloc[:, :-2].values
emg_trigger = dataset.iloc[:, -2].values
dataset_pt11 = None
dataset_pt12 = None
dataset_pt13 = None
dataset_pt14 = None
dataset_pt21 = None
dataset_pt22 = None
dataset_pt23 = None
dataset_pt24 = None
dt_frames = None
dataset = None
# Here we do the same for obtaining a numpy vector with the movements
# executed in each peek of the trigger.
# targets contains the moviments as a number from 1 to 6
# and targets_str as a string(name)
print("Reading targets...")
targets_pt11 = pd.read_table('datasets/coletas/'+volunteer_id+'11-Resposta.txt', header=None)
targets_pt12 = pd.read_table('datasets/coletas/'+volunteer_id+'12-Resposta.txt', header=None)
targets_pt13 = pd.read_table('datasets/coletas/'+volunteer_id+'13-Resposta.txt', header=None)
targets_pt14 = pd.read_table('datasets/coletas/'+volunteer_id+'14-Resposta.txt', header=None)
targets_pt21 = pd.read_table('datasets/coletas/'+volunteer_id+'21-Resposta.txt', header=None)
targets_pt22 = pd.read_table('datasets/coletas/'+volunteer_id+'22-Resposta.txt', header=None)
targets_pt23 = pd.read_table('datasets/coletas/'+volunteer_id+'23-Resposta.txt', header=None)
targets_pt24 = pd.read_table('datasets/coletas/'+volunteer_id+'24-Resposta.txt', header=None)
targets_frames = [targets_pt11, targets_pt12, targets_pt13, targets_pt14,
targets_pt21, targets_pt22, targets_pt23, targets_pt24]
targets = pd.concat(targets_frames)
targets_pt11 = None
targets_pt12 = None
targets_pt13 = None
targets_pt14 = None
targets_pt21 = None
targets_pt22 = None
targets_pt23 = None
targets_pt24 = None
targets_frames = None
targets = targets.iloc[:, :].values.ravel()
print('*'*30)
#####################
#%% Signal constants
#####################
# The empirical delay time between the signal saying to execute a movement and
# the start of some movement by the volunteer.
# We guess a time of 250ms, this means 500 data points at a sampling frequency
# of 2 kHz
# This s a dalay time in the TRIGGER necessary to sync the TRIGGER with the SIGNAL
delay_trigger = 500 # amount of points to delay
fs = 2000 # Sampling frequency in Hz
#########################
#%% Correcting the triger
#########################
# representation of why there are the necessity of syncing the signals
# Before correction:
# emg signal: __________. .||||||||-.._____________
# ''||||||||-''
# trigger signal: ________________
# _____| |_____________
#
# After Correction:
# emg signal: __________. .||||||||-.._____________
# ''||||||||-''
# trigger signal: ________________
# _________| |_____________
#
# append concatenates some values in a array.
# Here we insert a array of zeros at the beggining of the trigger
# objectiving to deslocate the signal
# We also exclude the last 'delay_trigger' points of the signal
# to garant that the new array will have the same size of the emg_trigger
print("Correcting Trigger...")
emg_trigger_corrected = np.append(arr = np.zeros(delay_trigger),
values = emg_trigger[:-delay_trigger])
print('*'*30)
###########################
#%% downsampling to 1000Hz
###########################
print("downsampling...")
emg_channels = emg_channels[range(0,len(emg_channels),2),:]
emg_trigger = emg_trigger[range(0,len(emg_trigger),2)]
emg_trigger_corrected = emg_trigger_corrected[range(0,len(emg_trigger_corrected),2)]
print('*'*30)
###########################
#%% Normalizing
###########################
print("Normalizing")
maximum = max([abs(emg_channels.max()), abs(emg_channels.min())])
emg_channels = emg_channels / maximum
emg_trigger = np.array(emg_trigger > 0.7, dtype=np.uint8)
emg_trigger_corrected = np.array(emg_trigger_corrected > 0.7, dtype=np.uint8)
print('*'*30)
#####################
#%% Contraction sites
#####################
print("Calculating Contraction Sites")
s3= np.array(emg_trigger_corrected, dtype=np.int8)
s3[s3==0] = -1 # replace zeros with -1
s4=np.where(np.diff(s3))[0]+1
contractions_onsets = s4[np.arange(0,len(s4),2)]
contractions_offsets = s4[np.arange(1,len(s4),2)]
s3 = None
s4 = None
print('*'*30)
###############################
#%% OUPUT SIGNAL
###############################
print("Generating output signal...")
output_signal = emg_trigger_corrected
contractions_lenght = contractions_offsets - contractions_onsets
for n in range(len(contractions_onsets)):
cont_index = np.arange(contractions_onsets[n],contractions_offsets[n])
cont_values = targets[n] * np.ones(contractions_lenght[n])
output_signal[cont_index] = cont_values
print('*'*30)
###############################
#%% creating new file
###############################
print("Creating new dataframe...")
output_data_frame = pd.DataFrame(columns=['CH1', 'CH2', 'CH3', 'CH4', 'OUTPUT'])
output_data_frame['CH1'] = emg_channels[:,0]
output_data_frame['CH2'] = emg_channels[:,1]
output_data_frame['CH3'] = emg_channels[:,2]
output_data_frame['CH4'] = emg_channels[:,3]
output_data_frame['OUTPUT'] = output_signal
print('*'*30)
print("Writing new dataframe to file..")
file_name_output = 'datasets/volunteer_'+str(volunteer_id_number)+'.csv'
output_data_frame.to_csv(path_or_buf=file_name_output,header=True)
print('*'*30)
# TODO: add SQLAlchemy support
###############################
#%% Optional: Plotting the data
###############################
# Here we use the matplotlib library to plot a small window of the signal
# And verify if everything is all right
print("Done!")
print("Now plotting!")
print(":)")
fig = plt.figure()
fig.suptitle("Voluntario: " + str(volunteer_id_number))
axes = [None for i in range(4)]
for i in range(4):
axes[i] = plt.subplot(4,1,i+1)
plt.plot(emg_channels[12000:80000,i])
plt.plot(output_signal[12000:80000]/10.0)
plt.title('Ch ' + str(i+1))
plt.ylim((-1,1))
plt.grid()
axes[0].get_shared_x_axes().join(axes[0],axes[1],axes[2],axes[3])
axes[0].get_shared_y_axes().join(axes[0],axes[1],axes[2],axes[3])
axes[0].set_xticklabels([])
axes[1].set_xticklabels([])
axes[2].set_xticklabels([])
plt.show()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
config.py
|
import os
class Config:
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = os.environ.get('SECRET_KEY')
UPLOADED_PHOTOS_DEST = 'app/static/photos'
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 450
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class DevConfig(Config):
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig
}
|
[] |
[] |
[
"MAIL_PASSWORD",
"SECRET_KEY",
"DATABASE_URL",
"MAIL_USERNAME"
] |
[]
|
["MAIL_PASSWORD", "SECRET_KEY", "DATABASE_URL", "MAIL_USERNAME"]
|
python
| 4 | 0 | |
StudentInfo/StudentInfo/asgi.py
|
"""
ASGI config for StudentInfo project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'StudentInfo.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
chain/store/store.go
|
package store
import (
"context"
"encoding/json"
"errors"
"os"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/sync/errgroup"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/metrics"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"go.uber.org/multierr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/pubsub"
lru "github.com/hashicorp/golang-lru"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
)
var log = logging.Logger("chainstore")
var (
chainHeadKey = dstore.NewKey("head")
checkpointKey = dstore.NewKey("/chain/checks")
blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
)
var DefaultTipSetCacheSize = 8192
var DefaultMsgMetaCacheSize = 2048
var ErrNotifeeDone = errors.New("notifee is done and should be removed")
func init() {
if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" {
tscs, err := strconv.Atoi(s)
if err != nil {
log.Errorf("failed to parse 'LOTUS_CHAIN_TIPSET_CACHE' env var: %s", err)
}
DefaultTipSetCacheSize = tscs
}
if s := os.Getenv("LOTUS_CHAIN_MSGMETA_CACHE"); s != "" {
mmcs, err := strconv.Atoi(s)
if err != nil {
log.Errorf("failed to parse 'LOTUS_CHAIN_MSGMETA_CACHE' env var: %s", err)
}
DefaultMsgMetaCacheSize = mmcs
}
}
// ReorgNotifee represents a callback that gets called upon reorgs.
type ReorgNotifee = func(rev, app []*types.TipSet) error
// Journal event types.
const (
evtTypeHeadChange = iota
)
type HeadChangeEvt struct {
From types.TipSetKey
FromHeight abi.ChainEpoch
To types.TipSetKey
ToHeight abi.ChainEpoch
RevertCount int
ApplyCount int
}
type WeightFunc func(ctx context.Context, stateBs bstore.Blockstore, ts *types.TipSet) (types.BigInt, error)
// ChainStore is the main point of access to chain data.
//
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
// latest head tipset references) being tracked in the Datastore (key-value
// store).
//
// To alleviate disk access, the ChainStore has two ARC caches:
// 1. a tipset cache
// 2. a block => messages references cache.
type ChainStore struct {
chainBlockstore bstore.Blockstore
stateBlockstore bstore.Blockstore
metadataDs dstore.Batching
weight WeightFunc
chainLocalBlockstore bstore.Blockstore
heaviestLk sync.RWMutex
heaviest *types.TipSet
checkpoint *types.TipSet
bestTips *pubsub.PubSub
pubLk sync.Mutex
tstLk sync.Mutex
tipsets map[abi.ChainEpoch][]cid.Cid
cindex *ChainIndex
reorgCh chan<- reorg
reorgNotifeeCh chan ReorgNotifee
mmCache *lru.ARCCache // msg meta cache (mh.Messages -> secp, bls []cid)
tsCache *lru.ARCCache
evtTypes [1]journal.EventType
journal journal.Journal
cancelFn context.CancelFunc
wg sync.WaitGroup
}
func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, weight WeightFunc, j journal.Journal) *ChainStore {
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
if j == nil {
j = journal.NilJournal()
}
ctx, cancel := context.WithCancel(context.Background())
// unwraps the fallback store in case one is configured.
// some methods _need_ to operate on a local blockstore only.
localbs, _ := bstore.UnwrapFallbackStore(chainBs)
cs := &ChainStore{
chainBlockstore: chainBs,
stateBlockstore: stateBs,
chainLocalBlockstore: localbs,
weight: weight,
metadataDs: ds,
bestTips: pubsub.New(64),
tipsets: make(map[abi.ChainEpoch][]cid.Cid),
mmCache: c,
tsCache: tsc,
cancelFn: cancel,
journal: j,
}
cs.evtTypes = [1]journal.EventType{
evtTypeHeadChange: j.RegisterEventType("sync", "head_change"),
}
ci := NewChainIndex(cs.LoadTipSet)
cs.cindex = ci
hcnf := func(rev, app []*types.TipSet) error {
cs.pubLk.Lock()
defer cs.pubLk.Unlock()
notif := make([]*api.HeadChange, len(rev)+len(app))
for i, r := range rev {
notif[i] = &api.HeadChange{
Type: HCRevert,
Val: r,
}
}
for i, r := range app {
notif[i+len(rev)] = &api.HeadChange{
Type: HCApply,
Val: r,
}
}
cs.bestTips.Pub(notif, "headchange")
return nil
}
hcmetric := func(rev, app []*types.TipSet) error {
for _, r := range app {
stats.Record(context.Background(), metrics.ChainNodeHeight.M(int64(r.Height())))
}
return nil
}
cs.reorgNotifeeCh = make(chan ReorgNotifee)
cs.reorgCh = cs.reorgWorker(ctx, []ReorgNotifee{hcnf, hcmetric})
return cs
}
func (cs *ChainStore) Close() error {
cs.cancelFn()
cs.wg.Wait()
return nil
}
func (cs *ChainStore) Load(ctx context.Context) error {
if err := cs.loadHead(ctx); err != nil {
return err
}
if err := cs.loadCheckpoint(ctx); err != nil {
return err
}
return nil
}
func (cs *ChainStore) loadHead(ctx context.Context) error {
head, err := cs.metadataDs.Get(ctx, chainHeadKey)
if err == dstore.ErrNotFound {
log.Warn("no previous chain state found")
return nil
}
if err != nil {
return xerrors.Errorf("failed to load chain state from datastore: %w", err)
}
var tscids []cid.Cid
if err := json.Unmarshal(head, &tscids); err != nil {
return xerrors.Errorf("failed to unmarshal stored chain head: %w", err)
}
ts, err := cs.LoadTipSet(ctx, types.NewTipSetKey(tscids...))
if err != nil {
return xerrors.Errorf("loading tipset: %w", err)
}
cs.heaviest = ts
return nil
}
func (cs *ChainStore) loadCheckpoint(ctx context.Context) error {
tskBytes, err := cs.metadataDs.Get(ctx, checkpointKey)
if err == dstore.ErrNotFound {
return nil
}
if err != nil {
return xerrors.Errorf("failed to load checkpoint from datastore: %w", err)
}
var tsk types.TipSetKey
err = json.Unmarshal(tskBytes, &tsk)
if err != nil {
return err
}
ts, err := cs.LoadTipSet(ctx, tsk)
if err != nil {
return xerrors.Errorf("loading tipset: %w", err)
}
cs.checkpoint = ts
return nil
}
func (cs *ChainStore) writeHead(ctx context.Context, ts *types.TipSet) error {
data, err := json.Marshal(ts.Cids())
if err != nil {
return xerrors.Errorf("failed to marshal tipset: %w", err)
}
if err := cs.metadataDs.Put(ctx, chainHeadKey, data); err != nil {
return xerrors.Errorf("failed to write chain head to datastore: %w", err)
}
return nil
}
const (
HCRevert = "revert"
HCApply = "apply"
HCCurrent = "current"
)
func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange {
cs.pubLk.Lock()
subch := cs.bestTips.Sub("headchange")
head := cs.GetHeaviestTipSet()
cs.pubLk.Unlock()
out := make(chan []*api.HeadChange, 16)
out <- []*api.HeadChange{{
Type: HCCurrent,
Val: head,
}}
go func() {
defer func() {
// Tell the caller we're done first, the following may block for a bit.
close(out)
// Unsubscribe.
cs.bestTips.Unsub(subch)
// Drain the channel.
for range subch {
}
}()
for {
select {
case val, ok := <-subch:
if !ok {
// Shutting down.
return
}
select {
case out <- val.([]*api.HeadChange):
default:
log.Errorf("closing head change subscription due to slow reader")
return
}
if len(out) > 5 {
log.Warnf("head change sub is slow, has %d buffered entries", len(out))
}
case <-ctx.Done():
return
}
}
}()
return out
}
func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) {
cs.reorgNotifeeCh <- f
}
func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
return cs.metadataDs.Has(ctx, key)
}
func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
if err := cs.metadataDs.Put(ctx, key, []byte{0}); err != nil {
return xerrors.Errorf("cache block validation: %w", err)
}
return nil
}
func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
if err := cs.metadataDs.Delete(ctx, key); err != nil {
return xerrors.Errorf("removing from valid block cache: %w", err)
}
return nil
}
func (cs *ChainStore) SetGenesis(ctx context.Context, b *types.BlockHeader) error {
ts, err := types.NewTipSet([]*types.BlockHeader{b})
if err != nil {
return err
}
if err := cs.PutTipSet(ctx, ts); err != nil {
return err
}
return cs.metadataDs.Put(ctx, dstore.NewKey("0"), b.Cid().Bytes())
}
func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
for _, b := range ts.Blocks() {
if err := cs.PersistBlockHeaders(ctx, b); err != nil {
return err
}
}
expanded, err := cs.expandTipset(ctx, ts.Blocks()[0])
if err != nil {
return xerrors.Errorf("errored while expanding tipset: %w", err)
}
log.Debugf("expanded %s into %s\n", ts.Cids(), expanded.Cids())
if err := cs.MaybeTakeHeavierTipSet(ctx, expanded); err != nil {
return xerrors.Errorf("MaybeTakeHeavierTipSet failed in PutTipSet: %w", err)
}
return nil
}
// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
// internal state as our new head, if and only if it is heavier than the current
// head and does not exceed the maximum fork length.
func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
for {
cs.heaviestLk.Lock()
if len(cs.reorgCh) < reorgChBuf/2 {
break
}
cs.heaviestLk.Unlock()
log.Errorf("reorg channel is heavily backlogged, waiting a bit before trying to take process new tipsets")
select {
case <-time.After(time.Second / 2):
case <-ctx.Done():
return ctx.Err()
}
}
defer cs.heaviestLk.Unlock()
w, err := cs.weight(ctx, cs.StateBlockstore(), ts)
if err != nil {
return err
}
heaviestW, err := cs.weight(ctx, cs.StateBlockstore(), cs.heaviest)
if err != nil {
return err
}
heavier := w.GreaterThan(heaviestW)
if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) {
log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts)
heavier = breakWeightTie(ts, cs.heaviest)
}
if heavier {
// TODO: don't do this for initial sync. Now that we don't have a
// difference between 'bootstrap sync' and 'caught up' sync, we need
// some other heuristic.
exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, ts)
if err != nil {
return err
}
if exceeds {
return nil
}
return cs.takeHeaviestTipSet(ctx, ts)
}
return nil
}
// Check if the two tipsets have a fork length above `ForkLengthThreshold`.
// `synced` is the head of the chain we are currently synced to and `external`
// is the incoming tipset potentially belonging to a forked chain. It assumes
// the external chain has already been validated and available in the ChainStore.
// The "fast forward" case is covered in this logic as a valid fork of length 0.
//
// FIXME: We may want to replace some of the logic in `syncFork()` with this.
// `syncFork()` counts the length on both sides of the fork at the moment (we
// need to settle on that) but here we just enforce it on the `synced` side.
func (cs *ChainStore) exceedsForkLength(ctx context.Context, synced, external *types.TipSet) (bool, error) {
if synced == nil || external == nil {
// FIXME: If `cs.heaviest` is nil we should just bypass the entire
// `MaybeTakeHeavierTipSet` logic (instead of each of the called
// functions having to handle the nil case on their own).
return false, nil
}
var err error
// `forkLength`: number of tipsets we need to walk back from the our `synced`
// chain to the common ancestor with the new `external` head in order to
// adopt the fork.
for forkLength := 0; forkLength < int(build.ForkLengthThreshold); forkLength++ {
// First walk back as many tipsets in the external chain to match the
// `synced` height to compare them. If we go past the `synced` height
// the subsequent match will fail but it will still be useful to get
// closer to the `synced` head parent's height in the next loop.
for external.Height() > synced.Height() {
if external.Height() == 0 {
// We reached the genesis of the external chain without a match;
// this is considered a fork outside the allowed limit (of "infinite"
// length).
return true, nil
}
external, err = cs.LoadTipSet(ctx, external.Parents())
if err != nil {
return false, xerrors.Errorf("failed to load parent tipset in external chain: %w", err)
}
}
// Now check if we arrived at the common ancestor.
if synced.Equals(external) {
return false, nil
}
// Now check to see if we've walked back to the checkpoint.
if synced.Equals(cs.checkpoint) {
return true, nil
}
// If we didn't, go back *one* tipset on the `synced` side (incrementing
// the `forkLength`).
if synced.Height() == 0 {
// Same check as the `external` side, if we reach the start (genesis)
// there is no common ancestor.
return true, nil
}
synced, err = cs.LoadTipSet(ctx, synced.Parents())
if err != nil {
return false, xerrors.Errorf("failed to load parent tipset in synced chain: %w", err)
}
}
// We traversed the fork length allowed without finding a common ancestor.
return true, nil
}
// ForceHeadSilent forces a chain head tipset without triggering a reorg
// operation.
//
// CAUTION: Use it only for testing, such as to teleport the chain to a
// particular tipset to carry out a benchmark, verification, etc. on a chain
// segment.
func (cs *ChainStore) ForceHeadSilent(ctx context.Context, ts *types.TipSet) error {
log.Warnf("(!!!) forcing a new head silently; new head: %s", ts)
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
if err := cs.removeCheckpoint(ctx); err != nil {
return err
}
cs.heaviest = ts
err := cs.writeHead(ctx, ts)
if err != nil {
err = xerrors.Errorf("failed to write chain head: %s", err)
}
return err
}
type reorg struct {
old *types.TipSet
new *types.TipSet
}
const reorgChBuf = 32
func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNotifee) chan<- reorg {
out := make(chan reorg, reorgChBuf)
notifees := make([]ReorgNotifee, len(initialNotifees))
copy(notifees, initialNotifees)
cs.wg.Add(1)
go func() {
defer cs.wg.Done()
defer log.Warn("reorgWorker quit")
for {
select {
case n := <-cs.reorgNotifeeCh:
notifees = append(notifees, n)
case r := <-out:
revert, apply, err := cs.ReorgOps(ctx, r.old, r.new)
if err != nil {
log.Error("computing reorg ops failed: ", err)
continue
}
cs.journal.RecordEvent(cs.evtTypes[evtTypeHeadChange], func() interface{} {
return HeadChangeEvt{
From: r.old.Key(),
FromHeight: r.old.Height(),
To: r.new.Key(),
ToHeight: r.new.Height(),
RevertCount: len(revert),
ApplyCount: len(apply),
}
})
// reverse the apply array
for i := len(apply)/2 - 1; i >= 0; i-- {
opp := len(apply) - 1 - i
apply[i], apply[opp] = apply[opp], apply[i]
}
var toremove map[int]struct{}
for i, hcf := range notifees {
err := hcf(revert, apply)
switch err {
case nil:
case ErrNotifeeDone:
if toremove == nil {
toremove = make(map[int]struct{})
}
toremove[i] = struct{}{}
default:
log.Error("head change func errored (BAD): ", err)
}
}
if len(toremove) > 0 {
newNotifees := make([]ReorgNotifee, 0, len(notifees)-len(toremove))
for i, hcf := range notifees {
_, remove := toremove[i]
if remove {
continue
}
newNotifees = append(newNotifees, hcf)
}
notifees = newNotifees
}
case <-ctx.Done():
return
}
}
}()
return out
}
// takeHeaviestTipSet actually sets the incoming tipset as our head both in
// memory and in the ChainStore. It also sends a notification to deliver to
// ReorgNotifees.
func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error {
_, span := trace.StartSpan(ctx, "takeHeaviestTipSet")
defer span.End()
if cs.heaviest != nil { // buf
if len(cs.reorgCh) > 0 {
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
}
cs.reorgCh <- reorg{
old: cs.heaviest,
new: ts,
}
} else {
log.Warnf("no heaviest tipset found, using %s", ts.Cids())
}
span.AddAttributes(trace.BoolAttribute("newHead", true))
log.Infof("New heaviest tipset! %s (height=%d)", ts.Cids(), ts.Height())
cs.heaviest = ts
if err := cs.writeHead(ctx, ts); err != nil {
log.Errorf("failed to write chain head: %s", err)
return nil
}
return nil
}
// FlushValidationCache removes all results of block validation from the
// chain metadata store. Usually the first step after a new chain import.
func (cs *ChainStore) FlushValidationCache(ctx context.Context) error {
return FlushValidationCache(ctx, cs.metadataDs)
}
func FlushValidationCache(ctx context.Context, ds dstore.Batching) error {
log.Infof("clearing block validation cache...")
dsWalk, err := ds.Query(ctx, query.Query{
// Potential TODO: the validation cache is not a namespace on its own
// but is rather constructed as prefixed-key `foo:bar` via .Instance(), which
// in turn does not work with the filter, which can match only on `foo/bar`
//
// If this is addressed (blockcache goes into its own sub-namespace) then
// strings.HasPrefix(...) below can be skipped
//
//Prefix: blockValidationCacheKeyPrefix.String()
KeysOnly: true,
})
if err != nil {
return xerrors.Errorf("failed to initialize key listing query: %w", err)
}
allKeys, err := dsWalk.Rest()
if err != nil {
return xerrors.Errorf("failed to run key listing query: %w", err)
}
batch, err := ds.Batch(ctx)
if err != nil {
return xerrors.Errorf("failed to open a DS batch: %w", err)
}
delCnt := 0
for _, k := range allKeys {
if strings.HasPrefix(k.Key, blockValidationCacheKeyPrefix.String()) {
delCnt++
batch.Delete(ctx, dstore.RawKey(k.Key)) // nolint:errcheck
}
}
if err := batch.Commit(ctx); err != nil {
return xerrors.Errorf("failed to commit the DS batch: %w", err)
}
log.Infof("%d block validation entries cleared.", delCnt)
return nil
}
// SetHead sets the chainstores current 'best' head node.
// This should only be called if something is broken and needs fixing.
//
// This function will bypass and remove any checkpoints.
func (cs *ChainStore) SetHead(ctx context.Context, ts *types.TipSet) error {
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
if err := cs.removeCheckpoint(ctx); err != nil {
return err
}
return cs.takeHeaviestTipSet(context.TODO(), ts)
}
// RemoveCheckpoint removes the current checkpoint.
func (cs *ChainStore) RemoveCheckpoint(ctx context.Context) error {
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
return cs.removeCheckpoint(ctx)
}
func (cs *ChainStore) removeCheckpoint(ctx context.Context) error {
if err := cs.metadataDs.Delete(ctx, checkpointKey); err != nil {
return err
}
cs.checkpoint = nil
return nil
}
// SetCheckpoint will set a checkpoint past which the chainstore will not allow forks.
//
// NOTE: Checkpoints cannot be set beyond ForkLengthThreshold epochs in the past.
func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error {
tskBytes, err := json.Marshal(ts.Key())
if err != nil {
return err
}
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
if ts.Height() > cs.heaviest.Height() {
return xerrors.Errorf("cannot set a checkpoint in the future")
}
// Otherwise, this operation could get _very_ expensive.
if cs.heaviest.Height()-ts.Height() > build.ForkLengthThreshold {
return xerrors.Errorf("cannot set a checkpoint before the fork threshold")
}
if !ts.Equals(cs.heaviest) {
anc, err := cs.IsAncestorOf(ctx, ts, cs.heaviest)
if err != nil {
return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err)
}
if !anc {
return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err)
}
}
err = cs.metadataDs.Put(ctx, checkpointKey, tskBytes)
if err != nil {
return err
}
cs.checkpoint = ts
return nil
}
func (cs *ChainStore) GetCheckpoint() *types.TipSet {
cs.heaviestLk.RLock()
chkpt := cs.checkpoint
cs.heaviestLk.RUnlock()
return chkpt
}
// Contains returns whether our BlockStore has all blocks in the supplied TipSet.
func (cs *ChainStore) Contains(ctx context.Context, ts *types.TipSet) (bool, error) {
for _, c := range ts.Cids() {
has, err := cs.chainBlockstore.Has(ctx, c)
if err != nil {
return false, err
}
if !has {
return false, nil
}
}
return true, nil
}
// GetBlock fetches a BlockHeader with the supplied CID. It returns
// blockstore.ErrNotFound if the block was not found in the BlockStore.
func (cs *ChainStore) GetBlock(ctx context.Context, c cid.Cid) (*types.BlockHeader, error) {
var blk *types.BlockHeader
err := cs.chainLocalBlockstore.View(ctx, c, func(b []byte) (err error) {
blk, err = types.DecodeBlock(b)
return err
})
return blk, err
}
func (cs *ChainStore) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
v, ok := cs.tsCache.Get(tsk)
if ok {
return v.(*types.TipSet), nil
}
// Fetch tipset block headers from blockstore in parallel
var eg errgroup.Group
cids := tsk.Cids()
blks := make([]*types.BlockHeader, len(cids))
for i, c := range cids {
i, c := i, c
eg.Go(func() error {
b, err := cs.GetBlock(ctx, c)
if err != nil {
return xerrors.Errorf("get block %s: %w", c, err)
}
blks[i] = b
return nil
})
}
err := eg.Wait()
if err != nil {
return nil, err
}
ts, err := types.NewTipSet(blks)
if err != nil {
return nil, err
}
cs.tsCache.Add(tsk, ts)
return ts, nil
}
// IsAncestorOf returns true if 'a' is an ancestor of 'b'
func (cs *ChainStore) IsAncestorOf(ctx context.Context, a, b *types.TipSet) (bool, error) {
if b.Height() <= a.Height() {
return false, nil
}
cur := b
for !a.Equals(cur) && cur.Height() > a.Height() {
next, err := cs.LoadTipSet(ctx, cur.Parents())
if err != nil {
return false, err
}
cur = next
}
return cur.Equals(a), nil
}
func (cs *ChainStore) NearestCommonAncestor(ctx context.Context, a, b *types.TipSet) (*types.TipSet, error) {
l, _, err := cs.ReorgOps(ctx, a, b)
if err != nil {
return nil, err
}
return cs.LoadTipSet(ctx, l[len(l)-1].Parents())
}
// ReorgOps takes two tipsets (which can be at different heights), and walks
// their corresponding chains backwards one step at a time until we find
// a common ancestor. It then returns the respective chain segments that fork
// from the identified ancestor, in reverse order, where the first element of
// each slice is the supplied tipset, and the last element is the common
// ancestor.
//
// If an error happens along the way, we return the error with nil slices.
func (cs *ChainStore) ReorgOps(ctx context.Context, a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
return ReorgOps(ctx, cs.LoadTipSet, a, b)
}
func ReorgOps(ctx context.Context, lts func(ctx context.Context, _ types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
left := a
right := b
var leftChain, rightChain []*types.TipSet
for !left.Equals(right) {
if left.Height() > right.Height() {
leftChain = append(leftChain, left)
par, err := lts(ctx, left.Parents())
if err != nil {
return nil, nil, err
}
left = par
} else {
rightChain = append(rightChain, right)
par, err := lts(ctx, right.Parents())
if err != nil {
log.Infof("failed to fetch right.Parents: %s", err)
return nil, nil, err
}
right = par
}
}
return leftChain, rightChain, nil
}
// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head).
func (cs *ChainStore) GetHeaviestTipSet() (ts *types.TipSet) {
cs.heaviestLk.RLock()
ts = cs.heaviest
cs.heaviestLk.RUnlock()
return
}
func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHeader) error {
cs.tstLk.Lock()
defer cs.tstLk.Unlock()
tss := cs.tipsets[b.Height]
for _, oc := range tss {
if oc == b.Cid() {
log.Debug("tried to add block to tipset tracker that was already there")
return nil
}
h, err := cs.GetBlock(ctx, oc)
if err == nil && h != nil {
if h.Miner == b.Miner {
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", b.Miner, b.Height, b.Cid(), h.Cid())
}
}
}
// This function is called 5 times per epoch on average
// It is also called with tipsets that are done with initial validation
// so they cannot be from the future.
// We are guaranteed not to use tipsets older than 900 epochs (fork limit)
// This means that we ideally want to keep only most recent 900 epochs in here
// Golang's map iteration starts at a random point in a map.
// With 5 tries per epoch, and 900 entries to keep, on average we will have
// ~136 garbage entires in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
// Seems good enough to me
for height := range cs.tipsets {
if height < b.Height-build.Finality {
delete(cs.tipsets, height)
}
break
}
cs.tipsets[b.Height] = append(tss, b.Cid())
return nil
}
func (cs *ChainStore) PersistBlockHeaders(ctx context.Context, b ...*types.BlockHeader) error {
sbs := make([]block.Block, len(b))
for i, header := range b {
var err error
sbs[i], err = header.ToStorageBlock()
if err != nil {
return err
}
}
batchSize := 256
calls := len(b) / batchSize
var err error
for i := 0; i <= calls; i++ {
start := batchSize * i
end := start + batchSize
if end > len(b) {
end = len(b)
}
err = multierr.Append(err, cs.chainLocalBlockstore.PutMany(ctx, sbs[start:end]))
}
return err
}
func (cs *ChainStore) expandTipset(ctx context.Context, b *types.BlockHeader) (*types.TipSet, error) {
// Hold lock for the whole function for now, if it becomes a problem we can
// fix pretty easily
cs.tstLk.Lock()
defer cs.tstLk.Unlock()
all := []*types.BlockHeader{b}
tsets, ok := cs.tipsets[b.Height]
if !ok {
return types.NewTipSet(all)
}
inclMiners := map[address.Address]cid.Cid{b.Miner: b.Cid()}
for _, bhc := range tsets {
if bhc == b.Cid() {
continue
}
h, err := cs.GetBlock(ctx, bhc)
if err != nil {
return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
}
if cid, found := inclMiners[h.Miner]; found {
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", h.Miner, h.Height, h.Cid(), cid)
continue
}
if types.CidArrsEqual(h.Parents, b.Parents) {
all = append(all, h)
inclMiners[h.Miner] = bhc
}
}
// TODO: other validation...?
return types.NewTipSet(all)
}
func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error {
if err := cs.PersistBlockHeaders(ctx, b); err != nil {
return err
}
ts, err := cs.expandTipset(ctx, b)
if err != nil {
return err
}
if err := cs.MaybeTakeHeavierTipSet(ctx, ts); err != nil {
return xerrors.Errorf("MaybeTakeHeavierTipSet failed: %w", err)
}
return nil
}
func (cs *ChainStore) GetGenesis(ctx context.Context) (*types.BlockHeader, error) {
data, err := cs.metadataDs.Get(ctx, dstore.NewKey("0"))
if err != nil {
return nil, err
}
c, err := cid.Cast(data)
if err != nil {
return nil, err
}
return cs.GetBlock(ctx, c)
}
// GetPath returns the sequence of atomic head change operations that
// need to be applied in order to switch the head of the chain from the `from`
// tipset to the `to` tipset.
func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) {
fts, err := cs.LoadTipSet(ctx, from)
if err != nil {
return nil, xerrors.Errorf("loading from tipset %s: %w", from, err)
}
tts, err := cs.LoadTipSet(ctx, to)
if err != nil {
return nil, xerrors.Errorf("loading to tipset %s: %w", to, err)
}
revert, apply, err := cs.ReorgOps(ctx, fts, tts)
if err != nil {
return nil, xerrors.Errorf("error getting tipset branches: %w", err)
}
path := make([]*api.HeadChange, len(revert)+len(apply))
for i, r := range revert {
path[i] = &api.HeadChange{Type: HCRevert, Val: r}
}
for j, i := 0, len(apply)-1; i >= 0; j, i = j+1, i-1 {
path[j+len(revert)] = &api.HeadChange{Type: HCApply, Val: apply[i]}
}
return path, nil
}
// ChainBlockstore returns the chain blockstore. Currently the chain and state
// // stores are both backed by the same physical store, albeit with different
// // caching policies, but in the future they will segregate.
func (cs *ChainStore) ChainBlockstore() bstore.Blockstore {
return cs.chainBlockstore
}
// StateBlockstore returns the state blockstore. Currently the chain and state
// stores are both backed by the same physical store, albeit with different
// caching policies, but in the future they will segregate.
func (cs *ChainStore) StateBlockstore() bstore.Blockstore {
return cs.stateBlockstore
}
func ActorStore(ctx context.Context, bs bstore.Blockstore) adt.Store {
return adt.WrapStore(ctx, cbor.NewCborStore(bs))
}
func (cs *ChainStore) ActorStore(ctx context.Context) adt.Store {
return ActorStore(ctx, cs.stateBlockstore)
}
func (cs *ChainStore) TryFillTipSet(ctx context.Context, ts *types.TipSet) (*FullTipSet, error) {
var out []*types.FullBlock
for _, b := range ts.Blocks() {
bmsgs, smsgs, crossmsg, err := cs.MessagesForBlock(ctx, b)
if err != nil {
// TODO: check for 'not found' errors, and only return nil if this
// is actually a 'not found' error
return nil, nil
}
fb := &types.FullBlock{
Header: b,
BlsMessages: bmsgs,
SecpkMessages: smsgs,
CrossMessages: crossmsg,
}
out = append(out, fb)
}
return NewFullTipSet(out), nil
}
// GetTipsetByHeight returns the tipset on the chain behind 'ts' at the given
// height. In the case that the given height is a null round, the 'prev' flag
// selects the tipset before the null round if true, and the tipset following
// the null round if false.
func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, ts *types.TipSet, prev bool) (*types.TipSet, error) {
if ts == nil {
ts = cs.GetHeaviestTipSet()
}
if h > ts.Height() {
return nil, xerrors.Errorf("looking for tipset with height greater than start point")
}
if h == ts.Height() {
return ts, nil
}
lbts, err := cs.cindex.GetTipsetByHeight(ctx, ts, h)
if err != nil {
return nil, err
}
if lbts.Height() < h {
log.Warnf("chain index returned the wrong tipset at height %d, using slow retrieval", h)
lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ctx, ts, h)
if err != nil {
return nil, err
}
}
if lbts.Height() == h || !prev {
return lbts, nil
}
return cs.LoadTipSet(ctx, lbts.Parents())
}
func (cs *ChainStore) Weight(ctx context.Context, hts *types.TipSet) (types.BigInt, error) { // todo remove
return cs.weight(ctx, cs.StateBlockstore(), hts)
}
// true if ts1 wins according to the filecoin tie-break rule
func breakWeightTie(ts1, ts2 *types.TipSet) bool {
s := len(ts1.Blocks())
if s > len(ts2.Blocks()) {
s = len(ts2.Blocks())
}
// blocks are already sorted by ticket
for i := 0; i < s; i++ {
if ts1.Blocks()[i].Ticket.Less(ts2.Blocks()[i].Ticket) {
log.Infof("weight tie broken in favour of %s", ts1.Key())
return true
}
}
log.Infof("weight tie left unbroken, default to %s", ts2.Key())
return false
}
func (cs *ChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
if tsk.IsEmpty() {
return cs.GetHeaviestTipSet(), nil
}
return cs.LoadTipSet(ctx, tsk)
}
func (cs *ChainStore) GetLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) {
cur := ts
for i := 0; i < 20; i++ {
cbe := cur.Blocks()[0].BeaconEntries
if len(cbe) > 0 {
return &cbe[len(cbe)-1], nil
}
if cur.Height() == 0 {
return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry")
}
next, err := cs.LoadTipSet(ctx, cur.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err)
}
cur = next
}
if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" {
return &types.BeaconEntry{
Data: []byte{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9},
}, nil
}
return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
}
|
[
"\"LOTUS_CHAIN_TIPSET_CACHE\"",
"\"LOTUS_CHAIN_MSGMETA_CACHE\"",
"\"LOTUS_IGNORE_DRAND\""
] |
[] |
[
"LOTUS_CHAIN_TIPSET_CACHE",
"LOTUS_IGNORE_DRAND",
"LOTUS_CHAIN_MSGMETA_CACHE"
] |
[]
|
["LOTUS_CHAIN_TIPSET_CACHE", "LOTUS_IGNORE_DRAND", "LOTUS_CHAIN_MSGMETA_CACHE"]
|
go
| 3 | 0 | |
toolset/run-ci.py
|
#!/usr/bin/env python
import subprocess
import os
import sys
import glob
import json
import traceback
import re
import logging
log = logging.getLogger('run-ci')
import time
import threading
from benchmark import framework_test
from benchmark.utils import gather_tests
from benchmark.utils import header
# Cross-platform colored text
from colorama import Fore, Back, Style
# Needed for various imports
sys.path.append('.')
sys.path.append('toolset/setup/linux')
sys.path.append('toolset/benchmark')
from setup.linux import setup_util
class CIRunnner:
'''
Manages running TFB on the Travis Continuous Integration system.
Makes a best effort to avoid wasting time and resources by running
useless jobs.
Only verifies the first test in each directory
'''
SUPPORTED_DATABASES = "mysql postgres mongodb cassandra sqlite none".split()
def __init__(self, mode, testdir=None):
'''
mode = [cisetup|prereq|install|verify] for what we want to do
testdir = framework directory we are running
'''
self.directory = testdir
self.mode = mode
if mode == "cisetup":
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
try:
# NOTE: THIS IS VERY TRICKY TO GET RIGHT!
#
# Our goal: Look at the files changed and determine if we need to
# run a verification for this folder. For a pull request, we want to
# see the list of files changed by any commit in that PR. For a
# push to master, we want to see a list of files changed by the pushed
# commits. If this list of files contains the current directory, or
# contains the toolset/ directory, then we need to run a verification
#
# If modifying, please consider:
# - the commit range for a pull request is the first PR commit to
# the github auto-merge commit
# - the commits in the commit range may include merge commits
# other than the auto-merge commit. An git log with -m
# will know that *all* the files in the merge were changed,
# but that is not the changeset that we care about
# - git diff shows differences, but we care about git log, which
# shows information on what was changed during commits
# - master can (and will!) move during a build. This is one
# of the biggest problems with using git diff - master will
# be updated, and those updates will include changes to toolset,
# and suddenly every job in the build will start to run instead
# of fast-failing
# - commit_range is not set if there was only one commit pushed,
# so be sure to test for that on both master and PR
# - commit_range and commit are set very differently for pushes
# to an owned branch versus pushes to a pull request, test
# - For merge commits, the TRAVIS_COMMIT and TRAVIS_COMMIT_RANGE
# will become invalid if additional commits are pushed while a job is
# building. See https://github.com/travis-ci/travis-ci/issues/2666
# - If you're really insane, consider that the last commit in a
# pull request could have been a merge commit. This means that
# the github auto-merge commit could have more than two parents
# - Travis cannot really support rebasing onto an owned branch, the
# commit_range they provide will include commits that are non-existant
# in the repo cloned on the workers. See https://github.com/travis-ci/travis-ci/issues/2668
#
# - TEST ALL THESE OPTIONS:
# - On a branch you own (e.g. your fork's master)
# - single commit
# - multiple commits pushed at once
# - commit+push, then commit+push again before the first
# build has finished. Verify all jobs in the first build
# used the correct commit range
# - multiple commits, including a merge commit. Verify that
# the unrelated merge commit changes are not counted as
# changes the user made
# - On a pull request
# - repeat all above variations
#
#
# ==== CURRENT SOLUTION FOR PRs ====
#
# For pull requests, we will examine Github's automerge commit to see
# what files would be touched if we merged this into the current master.
# You can't trust the travis variables here, as the automerge commit can
# be different for jobs on the same build. See https://github.com/travis-ci/travis-ci/issues/2666
# We instead use the FETCH_HEAD, which will always point to the SHA of
# the lastest merge commit. However, if we only used FETCH_HEAD than any
# new commits to a pull request would instantly start affecting currently
# running jobs and the the list of changed files may become incorrect for
# those affected jobs. The solution is to walk backward from the FETCH_HEAD
# to the last commit in the pull request. Based on how github currently
# does the automerge, this is the second parent of FETCH_HEAD, and
# therefore we use FETCH_HEAD^2 below
#
# This may not work perfectly in situations where the user had advanced
# merging happening in their PR. We correctly handle them merging in
# from upstream, but if they do wild stuff then this will likely break
# on that. However, it will also likely break by seeing a change in
# toolset and triggering a full run when a partial run would be
# acceptable
#
# ==== CURRENT SOLUTION FOR OWNED BRANCHES (e.g. master) ====
#
# This one is fairly simple. Find the commit or commit range, and
# examine the log of files changes. If you encounter any merges,
# then fully explode the two parent commits that made the merge
# and look for the files changed there. This is an aggressive
# strategy to ensure that commits to master are always tested
# well
log.debug("TRAVIS_COMMIT_RANGE: %s", os.environ['TRAVIS_COMMIT_RANGE'])
log.debug("TRAVIS_COMMIT : %s", os.environ['TRAVIS_COMMIT'])
is_PR = (os.environ['TRAVIS_PULL_REQUEST'] != "false")
if is_PR:
log.debug('I am testing a pull request')
first_commit = os.environ['TRAVIS_COMMIT_RANGE'].split('...')[0]
last_commit = subprocess.check_output("git rev-list -n 1 FETCH_HEAD^2", shell=True).rstrip('\n')
log.debug("Guessing that first commit in PR is : %s", first_commit)
log.debug("Guessing that final commit in PR is : %s", last_commit)
if first_commit == "":
# Travis-CI is not yet passing a commit range for pull requests
# so we must use the automerge's changed file list. This has the
# negative effect that new pushes to the PR will immediately
# start affecting any new jobs, regardless of the build they are on
log.debug("No first commit, using Github's automerge commit")
self.commit_range = "--first-parent -1 -m FETCH_HEAD"
elif first_commit == last_commit:
# There is only one commit in the pull request so far,
# or Travis-CI is not yet passing the commit range properly
# for pull requests. We examine just the one commit using -1
#
# On the oddball chance that it's a merge commit, we pray
# it's a merge from upstream and also pass --first-parent
log.debug("Only one commit in range, examining %s", last_commit)
self.commit_range = "-m --first-parent -1 %s" % last_commit
else:
# In case they merged in upstream, we only care about the first
# parent. For crazier merges, we hope
self.commit_range = "--first-parent %s...%s" % (first_commit, last_commit)
if not is_PR:
log.debug('I am not testing a pull request')
# Three main scenarios to consider
# - 1 One non-merge commit pushed to master
# - 2 One merge commit pushed to master (e.g. a PR was merged).
# This is an example of merging a topic branch
# - 3 Multiple commits pushed to master
#
# 1 and 2 are actually handled the same way, by showing the
# changes being brought into to master when that one commit
# was merged. Fairly simple, `git log -1 COMMIT`. To handle
# the potential merge of a topic branch you also include
# `--first-parent -m`.
#
# 3 needs to be handled by comparing all merge children for
# the entire commit range. The best solution here would *not*
# use --first-parent because there is no guarantee that it
# reflects changes brought into master. Unfortunately we have
# no good method inside Travis-CI to easily differentiate
# scenario 1/2 from scenario 3, so I cannot handle them all
# separately. 1/2 are the most common cases, 3 with a range
# of non-merge commits is the next most common, and 3 with
# a range including merge commits is the least common, so I
# am choosing to make our Travis-CI setup potential not work
# properly on the least common case by always using
# --first-parent
# Handle 3
# Note: Also handles 2 because Travis-CI sets COMMIT_RANGE for
# merged PR commits
self.commit_range = "--first-parent -m %s" % os.environ['TRAVIS_COMMIT_RANGE']
# Handle 1
if self.commit_range == "":
self.commit_range = "--first-parent -m -1 %s" % os.environ['TRAVIS_COMMIT']
except KeyError:
log.warning("I should only be used for automated integration tests e.g. Travis-CI")
log.warning("Were you looking for run-tests.py?")
self.commit_range = "-m HEAD^...HEAD"
#
# Find the one test from benchmark_config that we are going to run
#
tests = gather_tests()
self.fwroot = setup_util.get_fwroot()
target_dir = self.fwroot + '/frameworks/' + testdir
log.debug("Target directory is %s", target_dir)
dirtests = [t for t in tests if t.directory == target_dir]
# Travis-CI is linux only
osvalidtests = [t for t in dirtests if t.os.lower() == "linux"
and (t.database_os.lower() == "linux" or t.database_os.lower() == "none")]
# Our Travis-CI only has some databases supported
validtests = [t for t in osvalidtests if t.database.lower() in self.SUPPORTED_DATABASES]
supported_databases = ','.join(self.SUPPORTED_DATABASES)
log.info("Found %s usable tests (%s valid for linux, %s valid for linux and {%s}) in directory '%s'",
len(dirtests), len(osvalidtests), len(validtests), supported_databases, '$FWROOT/frameworks/' + testdir)
if len(validtests) == 0:
log.critical("Found no test that is possible to run in Travis-CI! Aborting!")
if len(osvalidtests) != 0:
log.critical("Note: Found these tests that could run in Travis-CI if more databases were supported")
log.critical("Note: %s", osvalidtests)
databases_needed = [t.database for t in osvalidtests]
databases_needed = list(set(databases_needed))
log.critical("Note: Here are the needed databases:")
log.critical("Note: %s", databases_needed)
sys.exit(1)
self.names = [t.name for t in validtests]
log.info("Using tests %s to verify directory %s", self.names, '$FWROOT/frameworks/' + testdir)
def _should_run(self):
'''
Decides if the current framework test should be tested.
Examines git commits included in the latest push to see if any files relevant to
this framework were changed.
If you do rewrite history (e.g. rebase) then it's up to you to ensure that both
old and new (e.g. old...new) are available in the public repository. For simple
rebase onto the public master this is not a problem, only more complex rebases
may have issues
'''
# Don't use git diff multiple times, it's mega slow sometimes\
# Put flag on filesystem so that future calls to run-ci see it too
if os.path.isfile('.run-ci.should_run'):
return True
if os.path.isfile('.run-ci.should_not_run'):
return False
def touch(fname):
open(fname, 'a').close()
log.debug("Using commit range `%s`", self.commit_range)
log.debug("Running `git log --name-only --pretty=\"format:\" %s`" % self.commit_range)
changes = ""
try:
changes = subprocess.check_output("git log --name-only --pretty=\"format:\" %s" % self.commit_range, shell=True)
except subprocess.CalledProcessError, e:
log.error("Got errors when using git to detect your changes, assuming that we must run this verification!")
log.error("Error was: %s", e.output)
log.error("Did you rebase a branch? If so, you can safely disregard this error, it's a Travis limitation")
return True
changes = os.linesep.join([s for s in changes.splitlines() if s]) # drop empty lines
if len(changes.splitlines()) > 1000:
log.debug("Change list is >1000 lines, uploading to sprunge.us instead of printing to console")
url = subprocess.check_output("git log --name-only %s | curl -F 'sprunge=<-' http://sprunge.us" % self.commit_range, shell=True)
log.debug("Uploaded to %s", url)
else:
log.debug("Result:\n%s", changes)
# Look for changes to core TFB framework code
if re.search(r'^toolset/', changes, re.M) is not None:
log.info("Found changes to core framework code")
touch('.run-ci.should_run')
return True
# Look for changes relevant to this test
if re.search("^frameworks/%s/" % re.escape(self.directory), changes, re.M) is None:
log.info("No changes found for directory %s", self.directory)
touch('.run-ci.should_not_run')
return False
log.info("Changes found for directory %s", self.directory)
touch('.run-ci.should_run')
return True
def run(self):
''' Do the requested command using TFB '''
if not self._should_run():
log.info("I found no changes to `%s` or `toolset/`, aborting verification", self.directory)
return 0
if self.mode == 'cisetup':
self.run_travis_setup()
return 0
names = ' '.join(self.names)
command = 'toolset/run-tests.py '
if self.mode == 'prereq':
command = command + "--install server --install-only --test '' --verbose"
elif self.mode == 'install':
command = command + "--install server --install-only --test %s" % names
elif self.mode == 'verify':
command = command + "--mode verify --test %s" % names
else:
log.critical('Unknown mode passed')
return 1
# Run the command
log.info("Running mode %s with commmand %s", self.mode, command)
try:
p = subprocess.Popen(command, shell=True)
p.wait()
return p.returncode
except subprocess.CalledProcessError:
log.critical("Subprocess Error")
print traceback.format_exc()
return 1
except Exception as err:
log.critical("Exception from running+wait on subprocess")
log.error(err.child_traceback)
return 1
def run_travis_setup(self):
log.info("Setting up Travis-CI")
script = '''
export DEBIAN_FRONTEND=noninteractive
# Turn on command tracing
set -x
# Setup Apt For MongoDB
# Due to TechEmpower/FrameworkBenchmarks#989 and travis-ci/travis-ci#2655,
# we put this into a loop
until timeout 15s sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10; do echo 'Waiting for apt-key' ; done
echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | sudo tee /etc/apt/sources.list.d/mongodb.list
# Setup apt for Apache Cassandra
until timeout 15s sudo apt-key adv --keyserver pgp.mit.edu --recv 4BD736A82B5C1B00; do echo 'Waiting for apt-key' ; done
sudo apt-add-repository 'deb http://www.apache.org/dist/cassandra/debian 20x main'
# Run installation
# DO NOT COPY --force-yes TO ANY NON-TRAVIS-CI SCRIPTS! Seriously, it can cause some
# major damage and should only be used inside a VM or Linux Container
sudo apt-get -q update
sudo apt-get -q -y --force-yes install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" \
mongodb-org \
cassandra \
openssh-server
# Run as travis user (who already has passwordless sudo)
ssh-keygen -f /home/travis/.ssh/id_rsa -N '' -t rsa
cat /home/travis/.ssh/id_rsa.pub > /home/travis/.ssh/authorized_keys
chmod 600 /home/travis/.ssh/authorized_keys
# Set up the benchmark.cfg for travis user
# NOTE: Please don't just copy the example config - it causes unexpected
# issues when those example variables change
echo "[Defaults]" > benchmark.cfg
echo "client_identity_file=/home/travis/.ssh/id_rsa" >> benchmark.cfg
echo "database_identity_file=/home/travis/.ssh/id_rsa" >> benchmark.cfg
echo "client_host=127.0.0.1" >> benchmark.cfg
echo "database_host=127.0.0.1" >> benchmark.cfg
echo "server_host=127.0.0.1" >> benchmark.cfg
echo "client_user=travis" >> benchmark.cfg
echo "database_user=travis" >> benchmark.cfg
echo "runner_user=testrunner" >> benchmark.cfg
# Create the new testrunner user
sudo useradd testrunner
# Give him a home dir
sudo mkdir /home/testrunner
# Make testrunner the owner of his home dir
sudo chown testrunner:testrunner /home/testrunner
# Add the testrunner user to every group that the travis user is in
sudo sed -i 's|:travis|:travis,testrunner|g' /etc/group
# Add the testrunner user to the travis group specifically
sudo sed -i 's|travis:x:\(.*\):|travis:x:\\1:testrunner|g' /etc/group
# Maybe unneeded - add the travis user to the testrunner group
sudo sed -i 's|testrunner:x:\(.*\):|testrunner:x:\\1:travis|g' /etc/group
# Need to add testrunner to the sudoers group AND default him to a sudoers
# because the travis user isn't in the sudo group - he's a sudoer.
echo "testrunner ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee -a /etc/sudoers
# Set the default shell for testrunner to /bin/bash
sudo sed -i 's|/home/testrunner:/bin/sh|/home/testrunner:/bin/bash|g' /etc/passwd
# =============Setup Databases===========================
# NOTE: Do not run `--install database` in travis-ci!
# It changes DB configuration files and will break everything
# =======================================================
# Setup MySQL
echo "Populating MySQL database"
mysql -uroot < config/create.sql
# Setup Postgres
echo "Removing Postgres 9.1 from Travis-CI"
sudo apt-get remove -qy postgresql postgresql-9.1 postgresql-client-9.1
sudo apt-get install -qy postgresql-9.3 postgresql-client-9.3
echo "Populating Postgres database"
psql --version
sudo useradd benchmarkdbuser -p benchmarkdbpass
sudo -u postgres psql template1 < config/create-postgres-database.sql
sudo -u benchmarkdbuser psql hello_world < config/create-postgres.sql
# Setup Apache Cassandra
echo "Populating Apache Cassandra database"
until nc -z localhost 9160 ; do echo Waiting for Cassandra; sleep 1; done
cat config/cassandra/cleanup-keyspace.cql | sudo cqlsh
python config/cassandra/db-data-gen.py > config/cassandra/tfb-data.cql
sudo cqlsh -f config/cassandra/create-keyspace.cql
sudo cqlsh -f config/cassandra/tfb-data.cql
# Setup MongoDB
echo "Populating MongoDB database"
until nc -z localhost 27017 ; do echo Waiting for MongoDB; sleep 1; done
mongod --version
mongo < config/create.js
# =============Modify Configurations===========================
# It can be useful to enable debug features for verification
# inside Travis-CI
# =======================================================
sed -i 's|display_errors\] = off|display_errors\] = on|' config/php-fpm.conf
'''
def sh(command):
log.info("Running `%s`", command)
subprocess.check_call(command, shell=True)
for command in script.split('\n'):
command = command.lstrip()
if command != "" and command[0] != '#':
sh(command.lstrip())
if __name__ == "__main__":
args = sys.argv[1:]
usage = '''Usage: toolset/run-ci.py [cisetup|prereq|install|verify] <framework-directory>
run-ci.py selects one test from <framework-directory>/benchark_config, and
automates a number of calls into run-tests.py specific to the selected test.
It is guaranteed to always select the same test from the benchark_config, so
multiple runs with the same <framework-directory> reference the same test.
The name of the selected test will be printed to standard output.
cisetup - configure the Travis-CI environment for our test suite
prereq - trigger standard prerequisite installation
install - trigger server installation for the selected test_directory
verify - run a verification on the selected test using `--mode verify`
run-ci.py expects to be run inside the Travis-CI build environment, and
will expect environment variables such as $TRAVIS_BUILD'''
if len(args) != 2:
print usage
sys.exit(1)
mode = args[0]
testdir = args[1]
if len(args) == 2 and (mode == "install"
or mode == "verify"
or mode == 'prereq'
or mode == 'cisetup'):
runner = CIRunnner(mode, testdir)
else:
print usage
sys.exit(1)
retcode = 0
try:
retcode = runner.run()
except KeyError as ke:
log.warning("Environment key missing, are you running inside Travis-CI?")
print traceback.format_exc()
retcode = 1
except Exception:
log.critical("Unknown error")
print traceback.format_exc()
retcode = 1
finally: # Ensure that logs are printed
# Only print logs if we ran a verify
if mode != 'verify':
sys.exit(retcode)
# Only print logs if we actually did something
if os.path.isfile('.run-ci.should_not_run'):
sys.exit(retcode)
log.error("Running inside Travis-CI, so I will print err and out to console...")
for name in runner.names:
log.error("Test %s", name)
try:
log.error("Here is ERR:")
with open("results/ec2/latest/logs/%s/err.txt" % name, 'r') as err:
for line in err:
log.info(line.rstrip('\n'))
except IOError:
log.error("No ERR file found")
try:
log.error("Here is OUT:")
with open("results/ec2/latest/logs/%s/out.txt" % name, 'r') as out:
for line in out:
log.info(line.rstrip('\n'))
except IOError:
log.error("No OUT file found")
log.error("Running inside Travis-CI, so I will print a copy of the verification summary")
results = None
try:
with open('results/ec2/latest/results.json', 'r') as f:
results = json.load(f)
except IOError:
log.critical("No results.json found, unable to print verification summary")
sys.exit(retcode)
target_dir = setup_util.get_fwroot() + '/frameworks/' + testdir
dirtests = [t for t in gather_tests() if t.directory == target_dir]
# Normally you don't have to use Fore.* before each line, but
# Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
# or stream flush, so we have to ensure that the color code is printed repeatedly
prefix = Fore.CYAN
for line in header("Verification Summary", top='=', bottom='').split('\n'):
print prefix + line
for test in dirtests:
print prefix + "| Test: %s" % test.name
if test.name not in runner.names:
print prefix + "| " + Fore.YELLOW + "Unable to verify in Travis-CI"
elif test.name in results['verify'].keys():
for test_type, result in results['verify'][test.name].iteritems():
if result.upper() == "PASS":
color = Fore.GREEN
elif result.upper() == "WARN":
color = Fore.YELLOW
else:
color = Fore.RED
print prefix + "| " + test_type.ljust(11) + ' : ' + color + result.upper()
else:
print prefix + "| " + Fore.RED + "NO RESULTS (Did framework launch?)"
print prefix + header('', top='', bottom='=') + Style.RESET_ALL
sys.exit(retcode)
# vim: set sw=2 ts=2 expandtab
|
[] |
[] |
[
"TRAVIS_COMMIT_RANGE",
"TRAVIS_PULL_REQUEST",
"TRAVIS_COMMIT"
] |
[]
|
["TRAVIS_COMMIT_RANGE", "TRAVIS_PULL_REQUEST", "TRAVIS_COMMIT"]
|
python
| 3 | 0 | |
gw_bot/api_in_lambda/Git_Lambda.py
|
import os
import git
from osbot_aws.apis.Secrets import Secrets
from osbot_utils.utils.Files import Files
class Git_Lambda:
def __init__(self,repo_name):
self.repo_name = repo_name
self.aws_secret = 'git-{0}'.format(self.repo_name)
self.git_org = None
self.git_repo = None
self.path_temp = '/tmp'
self.path_repo = '{0}/{1}'.format(self.path_temp,repo_name)
self.remote = 'origin'
self.branch = 'master'
self.author_name = 'oss-bot-dinis'
self.author_email = '[email protected]'
self.commit_message = 'Lambda auto-commit:'
self.exec_stdout = None
self.exec_stderr = None
self.set_up_commit_user()
def git_exec(self, *params,cwd=None):
if cwd is None:
cwd = self.path_repo
stdout, stderr = git.exec_command(*params,cwd=cwd)
self.exec_stderr = stderr.decode()
self.exec_stdout = stdout.decode()
return self.exec_stdout
def repo_url(self):
data = Secrets(self.aws_secret).value_from_json_string()
return 'https://{0}:{1}@github.com/{2}/{3}.git'.format(data.get('username'),
data.get('password'),
data.get('git_org' ),
self.repo_name)
# os.environ['GIT_USERNAME'] = data.get('username') # not working
# os.environ['GIT_PASSWORD'] = data.get('password')
#return 'https://github.com/{2}/{3}.git'.format(data.get('username'),data.get('password'),self.git_org, self.git_repo)
def repo_files(self):
return Files.files(Files.path_combine(self.path_repo,'**'))
def set_up_commit_user(self):
os.environ['GIT_AUTHOR_NAME' ] = self.author_name
os.environ['GIT_AUTHOR_EMAIL' ] = self.author_email
os.environ['GIT_COMMITTER_NAME' ] = self.author_name
os.environ['GIT_COMMITTER_EMAIL'] = self.author_email
def exists(self):
return Files.exists(self.path_repo)
# git commands
def commit(self, message=None):
if message is None:
message = "{0}: {1}".format(self.commit_message, self.status())
self.git_exec('commit', '-a', '-m', message)
return self
def clone (self):
if self.exists() is False:
self.git_exec('clone' , self.repo_url() , cwd=self.path_temp)
return self
def diff (self):
return self.git_exec('diff')
def log_pretty(self):
return self.git_exec('log', '--pretty=oneline', '-n10')
def pull(self):
self.git_exec('pull', '--no-edit', self.remote, self.branch)
return self
def push(self):
self.git_exec('push', self.remote, self.branch)
return self
def status (self):
return self.git_exec('status')
def reset_hard (self):
return self.git_exec('reset','--hard','HEAD')
|
[] |
[] |
[
"GIT_AUTHOR_EMAIL' ",
"GIT_AUTHOR_NAME' ",
"GIT_COMMITTER_EMAIL",
"GIT_PASSWORD",
"GIT_COMMITTER_NAME'",
"GIT_USERNAME"
] |
[]
|
["GIT_AUTHOR_EMAIL' ", "GIT_AUTHOR_NAME' ", "GIT_COMMITTER_EMAIL", "GIT_PASSWORD", "GIT_COMMITTER_NAME'", "GIT_USERNAME"]
|
python
| 6 | 0 | |
video_face/recognize.py
|
import os
from tqdm import tqdm
import pickle
import numpy as np
import pandas as pd
import cv2
import os
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from deepface.commons import functions, realtime, distance as dst
def face_recognize(faces, db_embeddings, model, model_name, distance_metric):
# threshold = dst.findThreshold(model_name, distance_metric)
embeddings = model.predict(faces)
if distance_metric == 'cosine':
distance = np.matmul(embeddings, np.transpose(db_embeddings))
elif distance_metric == 'euclidean':
distance = dst.findEuclideanDistance(embeddings, db_embeddings)
elif distance_metric == 'euclidean_l2':
distance = dst.findEuclideanDistance(dst.l2_normalize(embeddings), dst.l2_normalize(db_embeddings))
shortest_distance = np.max(distance, axis=1)
pred = np.argmax(distance, axis=1)
return shortest_distance, pred
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
pkg/adapter/awssnssource/adapter.go
|
/*
Copyright (c) 2020 TriggerMesh Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package awssnssource
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"time"
"go.uber.org/zap"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sns"
"github.com/aws/aws-sdk-go/service/sns/snsiface"
cloudevents "github.com/cloudevents/sdk-go/v2"
pkgadapter "knative.dev/eventing/pkg/adapter/v2"
"knative.dev/pkg/logging"
"github.com/triggermesh/aws-event-sources/pkg/apis/sources/v1alpha1"
)
// envConfig is a set parameters sourced from the environment for the source's
// adapter.
type envConfig struct {
pkgadapter.EnvConfig
Topic string `envconfig:"TOPIC" required:"true"`
AWSRegion string `envconfig:"AWS_REGION" required:"true"`
}
// adapter implements the source's adapter.
type adapter struct {
logger *zap.SugaredLogger
snsClient snsiface.SNSAPI
ceClient cloudevents.Client
topic string
awsRegion string
}
func NewEnvConfig() pkgadapter.EnvConfigAccessor {
return &envConfig{}
}
func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor,
ceClient cloudevents.Client) pkgadapter.Adapter {
logger := logging.FromContext(ctx)
env := envAcc.(*envConfig)
// create SNS client
sess := session.Must(session.NewSession(aws.NewConfig().WithMaxRetries(5)))
return &adapter{
logger: logger,
snsClient: sns.New(sess),
ceClient: ceClient,
topic: env.Topic,
awsRegion: env.AWSRegion,
}
}
const (
port = 8081
defaultSubscriptionPeriod = 10 * time.Second
)
// Start implements adapter.Adapter.
func (a *adapter) Start(stopCh <-chan struct{}) error {
// Setup subscription in the background. Will keep us from having chicken/egg between server
// being ready to respond and us having the info we need for the subscription request
go func() {
for {
if err := a.attempSubscription(defaultSubscriptionPeriod); err != nil {
a.logger.Error(err)
}
}
}()
// Start server
http.HandleFunc("/", a.handleNotification)
http.HandleFunc("/health", healthCheckHandler)
a.logger.Infof("Serving on port %d", port)
return http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
}
func (a *adapter) attempSubscription(period time.Duration) error {
time.Sleep(period)
topic, err := a.snsClient.CreateTopic(&sns.CreateTopicInput{Name: &a.topic})
if err != nil {
return err
}
sink := os.Getenv("K_SINK")
sinkUrl, err := url.Parse(sink)
if err != nil {
return err
}
_, err = a.snsClient.Subscribe(&sns.SubscribeInput{
Endpoint: &sink,
Protocol: &sinkUrl.Scheme,
TopicArn: topic.TopicArn,
})
if err != nil {
return err
}
a.logger.Debug("Finished subscription flow")
return nil
}
// handleNotification implements the receive interface for SNS.
func (a *adapter) handleNotification(_ http.ResponseWriter, r *http.Request) {
// Fish out notification body
var notification interface{}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
a.logger.Error("Failed to parse notification: ", err)
}
err = json.Unmarshal(body, ¬ification)
if err != nil {
a.logger.Error("Failed to parse notification: ", err)
}
a.logger.Info(string(body))
data := notification.(map[string]interface{})
// If the message is about our subscription, curl the confirmation endpoint.
if data["Type"].(string) == "SubscriptionConfirmation" {
subcribeURL := data["SubscribeURL"].(string)
_, err := http.Get(subcribeURL)
if err != nil {
a.logger.Fatalw("Unable to confirm SNS subscription", "error", err)
}
a.logger.Info("Successfully confirmed SNS subscription")
// If it's a legit notification, push the event
} else if data["Type"].(string) == "Notification" {
eventTime, _ := time.Parse(time.RFC3339, data["Timestamp"].(string))
record := &SNSEventRecord{
EventVersion: "1.0",
EventSubscriptionArn: "",
EventSource: "aws:sns",
SNS: SNSEntity{
Signature: data["Signature"].(string),
MessageID: data["MessageId"].(string),
Type: data["Type"].(string),
TopicArn: data["TopicArn"].(string),
MessageAttributes: data["MessageAttributes"].(map[string]interface{}),
SignatureVersion: data["SignatureVersion"].(string),
Timestamp: eventTime,
SigningCertURL: data["SigningCertURL"].(string),
Message: data["Message"].(string),
UnsubscribeURL: data["UnsubscribeURL"].(string),
Subject: data["Subject"].(string),
},
}
event := cloudevents.NewEvent(cloudevents.VersionV1)
event.SetType(v1alpha1.AWSSNSEventType(v1alpha1.AWSSNSGenericEventType))
event.SetSubject(data["Subject"].(string))
event.SetSource(v1alpha1.AWSSNSEventSource(a.awsRegion, a.topic))
event.SetID(data["MessageId"].(string))
event.SetData(cloudevents.ApplicationJSON, record)
if result := a.ceClient.Send(context.Background(), event); !cloudevents.IsACK(result) {
a.logger.Errorw("Failed to send CloudEvent", "error", err)
}
}
}
func healthCheckHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte("OK"))
}
|
[
"\"K_SINK\""
] |
[] |
[
"K_SINK"
] |
[]
|
["K_SINK"]
|
go
| 1 | 0 | |
playwright/src/test/java/com/microsoft/playwright/TestBase.java
|
/*
* Copyright (c) Microsoft Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.microsoft.playwright;
import com.microsoft.playwright.options.BrowserChannel;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import java.io.IOException;
import static com.microsoft.playwright.Utils.getBrowserNameFromEnv;
public class TestBase {
static Server server;
static Server httpsServer;
static BrowserType browserType;
static Playwright playwright;
static Browser browser;
static boolean isMac = Utils.getOS() == Utils.OS.MAC;
static boolean isWindows = Utils.getOS() == Utils.OS.WINDOWS;
static boolean headful;
Page page;
BrowserContext context;
static boolean isHeadful() {
return headful;
}
static boolean isChromium() {
return "chromium".equals(getBrowserNameFromEnv());
}
static boolean isWebKit() {
return "webkit".equals(getBrowserNameFromEnv());
}
static boolean isFirefox() {
return "firefox".equals(getBrowserNameFromEnv());
}
static BrowserChannel getBrowserChannelFromEnv() {
String channel = System.getenv("BROWSER_CHANNEL");
if (channel == null) {
return null;
}
switch (channel) {
case "chrome": return BrowserChannel.CHROME;
case "chrome-beta": return BrowserChannel.CHROME_BETA;
case "chrome-dev": return BrowserChannel.CHROME_DEV;
case "chrome-canary": return BrowserChannel.CHROME_CANARY;
case "msedge": return BrowserChannel.MSEDGE;
case "msedge-beta": return BrowserChannel.MSEDGE_BETA;
case "msedge-dev": return BrowserChannel.MSEDGE_DEV;
case "msedge-canary": return BrowserChannel.MSEDGE_CANARY;
default: throw new IllegalArgumentException("Unknown BROWSER_CHANNEL " + channel);
}
}
static BrowserType.LaunchOptions createLaunchOptions() {
String headfulEnv = System.getenv("HEADFUL");
headful = headfulEnv != null && !"0".equals(headfulEnv) && !"false".equals(headfulEnv);
BrowserType.LaunchOptions options;
options = new BrowserType.LaunchOptions();
options.headless = !headful;
options.channel = getBrowserChannelFromEnv();
return options;
}
static void initBrowserType() {
playwright = Playwright.create();
browserType = Utils.getBrowserTypeFromEnv(playwright);
}
static void launchBrowser(BrowserType.LaunchOptions launchOptions) {
initBrowserType();
browser = browserType.launch(launchOptions);
}
@BeforeAll
static void launchBrowser() {
launchBrowser(createLaunchOptions());
}
@AfterAll
static void closeBrowser() {
if (browser != null) {
browser.close();
browser = null;
}
}
@BeforeAll
static void startServer() throws IOException {
server = Server.createHttp(8907);
httpsServer = Server.createHttps(8908);
}
@AfterAll
static void stopServer() throws IOException {
if (server != null) {
server.stop();
server = null;
}
if (httpsServer != null) {
httpsServer.stop();
httpsServer = null;
}
}
@AfterAll
static void closePlaywright() throws Exception {
if (playwright != null) {
playwright.close();
playwright = null;
}
}
BrowserContext createContext() {
return browser.newContext();
}
@BeforeEach
void createContextAndPage() {
server.reset();
httpsServer.reset();
context = createContext();
page = context.newPage();
}
@AfterEach
void closeContext() {
if (context != null) {
context.close();
context = null;
page = null;
}
}
}
|
[
"\"BROWSER_CHANNEL\"",
"\"HEADFUL\""
] |
[] |
[
"HEADFUL",
"BROWSER_CHANNEL"
] |
[]
|
["HEADFUL", "BROWSER_CHANNEL"]
|
java
| 2 | 0 | |
fairseq/fairseq/distributed_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import logging
import os
import pickle
import random
import socket
import struct
import subprocess
import warnings
from argparse import Namespace
from collections import OrderedDict
from typing import Any, Dict, List, Mapping, Optional
import torch
import torch.distributed as dist
from fairseq import utils
from fairseq.dataclass.configs import DistributedTrainingConfig, FairseqConfig
from omegaconf import open_dict
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
# Flag to indicate if we're using Megatron
# NOTE: this is a temporary hack until we move away from Megatron's model parallel init
_USE_MEGATRON = False
# Whether to use XLA ops (e.g., on TPUs) instead of CUDA ops.
_USE_XLA = False
logger = logging.getLogger(__name__)
def is_master(cfg: DistributedTrainingConfig):
return cfg.distributed_rank == 0
def infer_init_method(cfg: DistributedTrainingConfig, force_distributed=False):
if cfg.distributed_init_method is not None or cfg.tpu:
return
if cfg.pipeline_model_parallel:
balance_exists = (
cfg.pipeline_balance is not None
or cfg.pipeline_encoder_balance is not None
or cfg.pipeline_decoder_balance is not None
)
devices_exist = (
cfg.pipeline_devices is not None
or cfg.pipeline_encoder_devices is not None
or cfg.pipeline_decoder_devices is not None
)
if not balance_exists:
raise ValueError(
"--pipeline-balance is currently required for pipeline model parallelism"
)
if not devices_exist:
raise ValueError(
"--pipeline-devices is currently required for pipeline model parallelism"
)
cfg.pipeline_balance = utils.eval_str_list(cfg.pipeline_balance, type=int)
if cfg.pipeline_devices is not None:
cfg.pipeline_devices = utils.eval_str_list(cfg.pipeline_devices, type=int)
num_pipeline_devices = len(set(cfg.pipeline_devices))
else:
cfg.pipeline_encoder_devices = utils.eval_str_list(
cfg.pipeline_encoder_devices, type=int
)
cfg.pipeline_decoder_devices = utils.eval_str_list(
cfg.pipeline_decoder_devices, type=int
)
num_pipeline_devices = len(
set(cfg.pipeline_encoder_devices + cfg.pipeline_decoder_devices)
)
gpus_per_node = torch.cuda.device_count()
assert (
gpus_per_node >= num_pipeline_devices
and gpus_per_node % num_pipeline_devices == 0
), (
"the number of unique device IDs in --pipeline-devices must evenly divide "
"the number of GPUs per node (multi-node pipelining is not yet supported)"
)
num_pipelines_per_node = gpus_per_node // num_pipeline_devices
# support torch.distributed.launch
if all(
key in os.environ
for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"]
):
cfg.distributed_init_method = "env://"
cfg.distributed_world_size = int(os.environ["WORLD_SIZE"])
cfg.distributed_rank = int(os.environ["RANK"])
# processes are created by torch.distributed.launch
cfg.distributed_no_spawn = True
# we can determine the init method automatically for Slurm
elif cfg.distributed_port > 0:
node_list = os.environ.get("SLURM_STEP_NODELIST")
if node_list is None:
node_list = os.environ.get("SLURM_JOB_NODELIST")
if node_list is not None:
try:
hostnames = subprocess.check_output(
["scontrol", "show", "hostnames", node_list]
)
cfg.distributed_init_method = "tcp://{host}:{port}".format(
host=hostnames.split()[0].decode("utf-8"),
port=cfg.distributed_port,
)
nnodes = int(os.environ.get("SLURM_NNODES"))
ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE")
if ntasks_per_node is not None:
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get("SLURM_NTASKS"))
nnodes = int(os.environ.get("SLURM_NNODES"))
assert ntasks % nnodes == 0
ntasks_per_node = int(ntasks / nnodes)
if ntasks_per_node == 1:
gpus_per_node = torch.cuda.device_count()
node_id = int(os.environ.get("SLURM_NODEID"))
cfg.distributed_rank = node_id * gpus_per_node
cfg.distributed_world_size = nnodes * gpus_per_node
elif cfg.pipeline_model_parallel:
assert ntasks_per_node == num_pipelines_per_node, (
"SLURM --ntasks-per-node must match number of pipelines per "
"node (={})".format(num_pipelines_per_node)
)
cfg.distributed_no_spawn = True
# For 4-way MP on nodes with 8 GPUs, ranks will be [0, 1] on
# the first node, [1, 2] on the second node, etc. This
# matches torch.distributed.launch.
node_id = int(os.environ.get("SLURM_NODEID"))
local_id = int(os.environ.get("SLURM_LOCALID"))
cfg.distributed_rank = node_id * num_pipelines_per_node + local_id
# In the above example, device_id will always be in [0, 1],
# which also matches torch.distributed.launch.
cfg.device_id = local_id
# We also want to set distributed_world_size to be the total
# number of pipelines across all nodes.
cfg.distributed_world_size = nnodes * num_pipelines_per_node
else:
assert ntasks_per_node == cfg.distributed_world_size // nnodes
cfg.distributed_no_spawn = True
cfg.distributed_rank = int(os.environ.get("SLURM_PROCID"))
cfg.device_id = int(os.environ.get("SLURM_LOCALID"))
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError: # Slurm is not installed
pass
elif cfg.distributed_world_size > 1 or force_distributed:
# fallback for single node with multiple GPUs
assert (
cfg.distributed_world_size <= torch.cuda.device_count()
), f"world size is {cfg.distributed_world_size} but have {torch.cuda.device_count()} available devices"
port = random.randint(10000, 20000)
cfg.distributed_init_method = "tcp://localhost:{port}".format(port=port)
if cfg.pipeline_model_parallel:
if not cfg.distributed_no_spawn:
# When distributed_no_spawn is False, we expect distributed_rank and
# distributed_world_size to be based on the total number of GPUs, so
# we need to correct them to be based on the number of pipelines.
assert cfg.distributed_world_size % num_pipeline_devices == 0
cfg.distributed_world_size = (
cfg.distributed_world_size // num_pipeline_devices
)
# In the case of 4-way MP on nodes with 8 GPUs, we want
# distributed_rank to be the starting GPU index for each pipeline
# i.e., 0, 2, ...
assert cfg.distributed_rank % gpus_per_node == 0
assert cfg.distributed_rank % num_pipeline_devices == 0
with open_dict(cfg):
cfg.distributed_rank = cfg.distributed_rank // num_pipeline_devices
# launch one process per pipeline
cfg.distributed_num_procs = num_pipelines_per_node
# if we have 4-way MP on a node with 8 GPUs, we want device_ids to be 0
# and 4, indicating the starting device IDs for each pipeline
cfg.device_id *= num_pipeline_devices
if cfg.device_id > 0:
# if there's multiple pipelines on a node (e.g., 4-way MP on an 8
# GPU node), we need to adjust pipeline_devices accordingly
logger.debug(
"setting CUDA device={} on rank {}".format(
cfg.device_id, cfg.distributed_rank
)
)
torch.cuda.set_device(cfg.device_id)
with open_dict(cfg):
cfg.pipeline_devices = [cfg.device_id + d for d in cfg.pipeline_devices]
logger.info(
"setting pipeline_devices={} on rank {}".format(
cfg.pipeline_devices, cfg.distributed_rank
)
)
elif not cfg.distributed_no_spawn:
with open_dict(cfg):
cfg.distributed_num_procs = min(
torch.cuda.device_count(), cfg.distributed_world_size
)
def distributed_init(cfg: FairseqConfig):
if isinstance(cfg, Namespace):
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
cfg = convert_namespace_to_omegaconf(cfg)
if not cfg.common.tpu:
if torch.distributed.is_available() and torch.distributed.is_initialized():
warnings.warn(
"Distributed is already initialized, cannot initialize twice!"
)
else:
logger.info(
"distributed init (rank {}): {}".format(
cfg.distributed_training.distributed_rank,
cfg.distributed_training.distributed_init_method,
)
)
dist.init_process_group(
backend=cfg.distributed_training.distributed_backend,
init_method=cfg.distributed_training.distributed_init_method,
world_size=cfg.distributed_training.distributed_world_size,
rank=cfg.distributed_training.distributed_rank,
)
logger.info(
"initialized host {} as rank {}".format(
socket.gethostname(),
cfg.distributed_training.distributed_rank,
)
)
# perform a dummy all-reduce to initialize the NCCL communicator
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda())
cfg.distributed_training.distributed_rank = torch.distributed.get_rank()
else:
assert xm.xrt_world_size() == cfg.distributed_training.distributed_world_size
global _USE_XLA
_USE_XLA = True
cfg.distributed_training.device_id = xm.get_local_ordinal()
cfg.distributed_training.distributed_rank = xm.get_ordinal()
xm.rendezvous("distributed_init") # wait for all workers
xm.mark_step()
if is_master(cfg.distributed_training):
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
if cfg.common.model_parallel_size > 1:
try:
from fairseq.model_parallel.megatron.mpu import (
initialize_model_parallel,
model_parallel_cuda_manual_seed,
)
except ImportError:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
global _USE_MEGATRON
_USE_MEGATRON = True
initialize_model_parallel(cfg.common.model_parallel_size)
model_parallel_cuda_manual_seed(cfg.common.seed)
model_part_number = get_model_parallel_rank()
cfg.checkpoint.checkpoint_suffix += "-model_part-{0}".format(model_part_number)
return cfg.distributed_training.distributed_rank
def distributed_main(i, main, cfg: FairseqConfig, kwargs):
cfg.distributed_training.device_id = i
if torch.cuda.is_available() and not cfg.common.cpu and not cfg.common.tpu:
torch.cuda.set_device(cfg.distributed_training.device_id)
if cfg.distributed_training.distributed_rank is None: # torch.multiprocessing.spawn
cfg.distributed_training.distributed_rank = kwargs.pop("start_rank", 0) + i
cfg.distributed_training.distributed_rank = distributed_init(cfg)
after_distributed_init_fn = kwargs.pop("after_distributed_init_fn", None)
if after_distributed_init_fn:
cfg = after_distributed_init_fn(cfg)
main(cfg, **kwargs)
def call_main(cfg: FairseqConfig, main, **kwargs):
if cfg.distributed_training.distributed_init_method is None:
infer_init_method(cfg.distributed_training)
if cfg.distributed_training.distributed_init_method is not None:
# distributed training
if not cfg.distributed_training.distributed_no_spawn:
start_rank = cfg.distributed_training.distributed_rank
cfg.distributed_training.distributed_rank = None # assign automatically
kwargs["start_rank"] = start_rank
torch.multiprocessing.spawn(
fn=distributed_main,
args=(main, cfg, kwargs),
nprocs=min(
torch.cuda.device_count(),
cfg.distributed_training.distributed_world_size,
),
)
else:
distributed_main(cfg.distributed_training.device_id, main, cfg, kwargs)
elif cfg.common.tpu and cfg.distributed_training.distributed_world_size > 1:
import torch_xla.distributed.xla_multiprocessing as xmp
torch.multiprocessing.set_sharing_strategy("file_system")
xmp.spawn(
fn=distributed_main,
args=(main, cfg, kwargs),
nprocs=8, # use all 8 TPU cores
)
else:
# single GPU main
main(cfg, **kwargs)
def use_xla():
global _USE_XLA
return _USE_XLA
def new_groups(grouped_ranks: List[List[int]]):
if use_xla():
return ("tpu", grouped_ranks)
else:
groups = [dist.new_group(g) for g in grouped_ranks]
my_group_idx = _find_my_group_index(grouped_ranks)
return groups[my_group_idx]
def _find_my_group_index(grouped_ranks):
my_rank = get_global_rank()
for i, group in enumerate(grouped_ranks):
if my_rank in group:
return i
raise RuntimeError
def _find_my_group(grouped_ranks):
index = _find_my_group_index(grouped_ranks)
return grouped_ranks[index]
def get_rank(group):
if use_xla():
assert group[0] == "tpu"
my_group = _find_my_group(group[1])
return my_group.index(get_global_rank())
else:
return dist.get_rank(group=group)
def get_world_size(group):
if use_xla():
assert group[0] == "tpu"
my_group = _find_my_group(group[1])
return len(my_group)
elif torch.distributed.is_initialized():
return dist.get_world_size(group=group)
else:
return 1
def get_global_group():
if use_xla():
return new_groups([list(range(get_global_world_size()))])
elif torch.distributed.is_initialized():
if not hasattr(get_global_group, "_global_group"):
# ideally we could use torch.distributed.group.WORLD, but it seems
# to cause random NCCL hangs in some cases
get_global_group._global_group = dist.new_group()
return get_global_group._global_group
else:
return None
def get_global_rank():
if use_xla():
return xm.get_ordinal()
elif torch.distributed.is_initialized():
return torch.distributed.get_rank()
else:
return 0
def get_global_world_size():
if use_xla():
return xm.xrt_world_size()
elif torch.distributed.is_initialized():
return torch.distributed.get_world_size()
else:
return 1
def get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
global _USE_MEGATRON
if _USE_MEGATRON:
from fairseq.model_parallel.megatron import mpu
return mpu.get_data_parallel_group()
else:
return get_global_group()
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return get_rank(get_data_parallel_group())
def get_data_parallel_world_size():
"""Return world size for the data parallel group."""
return get_world_size(get_data_parallel_group())
def get_model_parallel_group():
global _USE_MEGATRON
if _USE_MEGATRON:
from fairseq.model_parallel.megatron import mpu
return mpu.get_model_parallel_group()
else:
return None
def get_model_parallel_rank():
"""Return my rank for the model parallel group."""
return get_rank(get_model_parallel_group())
def get_model_parallel_world_size():
"""Return world size for the model parallel group."""
return get_world_size(get_model_parallel_group())
def all_reduce(tensor, group, op="sum"):
if use_xla():
assert isinstance(group, tuple) and group[0] == "tpu"
tensor = [tensor] # wrap in a list to make xm.all_reduce in-place
return xm.all_reduce(op, tensor, groups=group[1])[0]
else:
if op == "sum":
op = dist.ReduceOp.SUM
elif op == "max":
op = dist.ReduceOp.MAX
else:
raise NotImplementedError
dist.all_reduce(tensor, op=op, group=group)
return tensor
def broadcast(tensor, src, group):
if use_xla():
# XLA doesn't support broadcast, hack it with all_reduce
if get_rank(group) != src:
tensor.zero_()
all_reduce(tensor, group)
else:
dist.broadcast(tensor, src=src, group=group)
def all_to_all(tensor, group):
"""Perform an all-to-all operation on a 1D Tensor."""
assert tensor.dim() == 1
split_count = get_world_size(group=group)
assert tensor.numel() % split_count == 0
if use_xla():
assert isinstance(group, tuple) and group[0] == "tpu"
return xm.all_to_all(
tensor,
split_dimension=0,
concat_dimension=0,
split_count=split_count,
groups=group[1],
)
else:
output = torch.zeros_like(tensor)
dist.all_to_all_single(output, tensor, group=group)
return output
def all_gather(tensor, group, return_tensor=False):
"""Perform an all-gather operation."""
if use_xla():
result = xm.all_gather(tensor, groups=group[1])
world_size = get_world_size(group=group)
result = result.view(world_size, *tensor.size())
if return_tensor:
return result
else:
return [result[i] for i in range(world_size)]
else:
world_size = get_world_size(group=group)
rank = get_rank(group=group)
tensor_list = [
tensor if i == rank else torch.empty_like(tensor) for i in range(world_size)
]
dist.all_gather(tensor_list, tensor, group=group)
if return_tensor:
return torch.stack(tensor_list, dim=0)
else:
return tensor_list
def all_gather_list(data, group=None, max_size=16384):
"""Gathers arbitrary data from all nodes into a list.
Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python
data. Note that *data* must be picklable.
Args:
data (Any): data from the local worker to be gathered on other workers
group: group of the collective
max_size (int, optional): maximum size of the data to be gathered
across workers
"""
if group is None:
group = get_global_group()
rank = get_rank(group=group)
world_size = get_world_size(group=group)
buffer_size = max_size * world_size
if (
not hasattr(all_gather_list, "_buffer")
or all_gather_list._buffer.numel() < buffer_size
):
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
data = utils.move_to_cpu(data)
enc = pickle.dumps(data)
enc_size = len(enc)
header_size = 4 # size of header that contains the length of the encoded data
size = header_size + enc_size
if size > max_size:
raise ValueError(
"encoded data size ({}) exceeds max_size ({})".format(size, max_size)
)
header = struct.pack(">I", enc_size)
cpu_buffer[:size] = torch.ByteTensor(list(header + enc))
start = rank * max_size
buffer[start : start + size].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
buffer = buffer.cpu()
try:
result = []
for i in range(world_size):
out_buffer = buffer[i * max_size : (i + 1) * max_size]
(enc_size,) = struct.unpack(">I", bytes(out_buffer[:header_size].tolist()))
if enc_size > 0:
result.append(
pickle.loads(
bytes(out_buffer[header_size : header_size + enc_size].tolist())
)
)
return result
except pickle.UnpicklingError:
raise Exception(
"Unable to unpickle data from other workers. all_gather_list requires all "
"workers to enter the function together, so this error usually indicates "
"that the workers have fallen out of sync somehow. Workers can fall out of "
"sync if one of them runs out of memory, or if there are other conditions "
"in your training script that can cause one worker to finish an epoch "
"while other workers are still iterating over their portions of the data. "
"Try rerunning with --ddp-backend=no_c10d and see if that helps."
)
def all_reduce_dict(data: Mapping[str, Any], device, group) -> Dict[str, Any]:
"""
AllReduce a dictionary of values across workers. We separately
reduce items that are already on the device and items on CPU for
better performance.
Args:
data (Mapping[str, Any]): dictionary of data to all-reduce, but
cannot be a nested dictionary
device (torch.device): device for the reduction
group: group of the collective
"""
data_keys = list(data.keys())
# We want to separately reduce items that are already on the
# device and items on CPU for performance reasons.
cpu_data = OrderedDict()
device_data = OrderedDict()
for k in data_keys:
t = data[k]
if not torch.is_tensor(t):
cpu_data[k] = torch.tensor(t, dtype=torch.double)
elif t.device.type != device.type:
cpu_data[k] = t.to(dtype=torch.double)
else:
device_data[k] = t.to(dtype=torch.double)
def _all_reduce_dict(data: OrderedDict):
if len(data) == 0:
return data
buf = torch.cat([t.view(-1) for t in data.values()]).to(device=device)
all_reduce(buf, group=group)
split_buf = torch.split(buf, [t.numel() for t in data.values()])
reduced_data = [t.view_as(orig) for t, orig in zip(split_buf, data.values())]
return OrderedDict(zip(data.keys(), reduced_data))
cpu_data = _all_reduce_dict(cpu_data)
device_data = _all_reduce_dict(device_data)
def get_from_stack(key):
if key in cpu_data:
return cpu_data[key]
elif key in device_data:
return device_data[key]
raise KeyError
return OrderedDict([(key, get_from_stack(key)) for key in data_keys])
# From fairscale/optim/utils.py
def broadcast_object(
obj: Any,
src_rank: int,
group: object,
dist_device: Optional[torch.device] = None,
dist_length_dtype: Optional[torch.dtype] = torch.long,
dist_dtype: Optional[torch.dtype] = torch.uint8,
) -> Any:
"""
Either broadcast from master to the fleet (default),
or use the src setting as the original rank.
"""
if dist_device is None:
if torch.distributed.get_backend(group) == "nccl":
dist_device = torch.device("cuda")
else:
dist_device = torch.device("cpu")
if get_rank(group) == src_rank:
# Emit data
buffer = io.BytesIO()
torch.save(obj, buffer)
data = bytearray(buffer.getbuffer())
length_tensor = torch.tensor(
[len(data)], dtype=dist_length_dtype, device=dist_device
)
broadcast(length_tensor, src=src_rank, group=group)
data_send_tensor = torch.tensor(data, dtype=dist_dtype, device=dist_device)
broadcast(data_send_tensor, src=src_rank, group=group)
else:
# Fetch from the source
length_tensor = torch.tensor([0], dtype=dist_length_dtype, device=dist_device)
broadcast(length_tensor, src=src_rank, group=group)
data_recv_tensor = torch.zeros(
[int(length_tensor.item())], dtype=dist_dtype, device=dist_device
)
broadcast(data_recv_tensor, src=src_rank, group=group)
buffer = io.BytesIO(data_recv_tensor.cpu().numpy())
obj = torch.load(buffer, map_location="cpu")
return obj
|
[] |
[] |
[
"SLURM_LOCALID",
"SLURM_NTASKS",
"SLURM_PROCID",
"SLURM_NNODES",
"SLURM_JOB_NODELIST",
"RANK",
"SLURM_NODEID",
"SLURM_STEP_NODELIST",
"SLURM_NTASKS_PER_NODE",
"WORLD_SIZE"
] |
[]
|
["SLURM_LOCALID", "SLURM_NTASKS", "SLURM_PROCID", "SLURM_NNODES", "SLURM_JOB_NODELIST", "RANK", "SLURM_NODEID", "SLURM_STEP_NODELIST", "SLURM_NTASKS_PER_NODE", "WORLD_SIZE"]
|
python
| 10 | 0 | |
internal/colly_test.go
|
package internal
import (
"fmt"
"net/http"
"os"
"testing"
)
func TestGetItemsOnPage(t *testing.T) {
baldorCookieName := os.Getenv("BALDORFOOD_COOKIE_NAME")
baldorCookieValue := os.Getenv("BALDORFOOD_COOKIE_VALUE")
shopper, err := NewShopperWithCookies([]*http.Cookie{{Name: baldorCookieName, Value: baldorCookieValue}})
if err != nil {
t.Fatal(err)
}
cookie, err := BaldorCookie(shopper.Jar)
if err != nil {
t.Fatal(err)
}
c := NewColly(BaldorHost, []*http.Cookie{cookie})
if !c.IsLoggedIn() {
t.Fatal("Not logged in.")
}
u := BaldorHost + "/products/vegetables/fresh-herbs"
items := c.GetItemsOnPage(u)
want := 30
if len(items) != want {
t.Fatalf("Unxpected number of items on %q: got = %d; want = %d", u, len(items), want)
}
for _, item := range items {
fmt.Printf("%+v\n", item)
}
}
|
[
"\"BALDORFOOD_COOKIE_NAME\"",
"\"BALDORFOOD_COOKIE_VALUE\""
] |
[] |
[
"BALDORFOOD_COOKIE_NAME",
"BALDORFOOD_COOKIE_VALUE"
] |
[]
|
["BALDORFOOD_COOKIE_NAME", "BALDORFOOD_COOKIE_VALUE"]
|
go
| 2 | 0 | |
2021/go-rest-servers/swagger/oapi-server/main.go
|
// Sample task server using oapi-codegen boilerplate.
//
// Eli Bendersky [https://eli.thegreenplace.net]
// This code is in the public domain.
package main
import (
"os"
"example.com/internal/task"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
)
func main() {
// Set up echo server/router and middleware.
// The paths in out OpenAPI spec are defined w/o trailing slashes, but we want
// to accept requests *with* trailing slashes too - so use the
// RemoveTrailingSlash middleware.
e := echo.New()
e.Pre(middleware.RemoveTrailingSlash())
e.Use(middleware.Logger())
taskserver := task.NewTaskServer()
task.RegisterHandlers(e, taskserver)
e.Logger.Fatal(e.Start("localhost:" + os.Getenv("SERVERPORT")))
}
|
[
"\"SERVERPORT\""
] |
[] |
[
"SERVERPORT"
] |
[]
|
["SERVERPORT"]
|
go
| 1 | 0 | |
snypy/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "snypy.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
commands/inspect_test.go
|
package commands
import (
"bytes"
"encoding/json"
"flag"
"io"
"os"
"strings"
"testing"
"github.com/codegangsta/cli"
"github.com/docker/machine/libmachine"
"github.com/docker/machine/libmachine/auth"
"github.com/docker/machine/libmachine/engine"
"github.com/docker/machine/libmachine/swarm"
"github.com/stretchr/testify/assert"
)
func TestCmdInspectFormat(t *testing.T) {
actual, host := runInspectCommand(t, []string{})
expected, _ := json.MarshalIndent(host, "", " ")
assert.Equal(t, string(expected), actual)
actual, _ = runInspectCommand(t, []string{"--format", "{{.DriverName}}"})
assert.Equal(t, "none", actual)
actual, _ = runInspectCommand(t, []string{"--format", "{{json .DriverName}}"})
assert.Equal(t, "\"none\"", actual)
actual, _ = runInspectCommand(t, []string{"--format", "{{prettyjson .Driver}}"})
assert.Equal(t, "{\n \"URL\": \"unix:///var/run/docker.sock\"\n}", actual)
}
func runInspectCommand(t *testing.T, args []string) (string, *libmachine.Host) {
stdout := os.Stdout
stderr := os.Stderr
shell := os.Getenv("SHELL")
r, w, _ := os.Pipe()
os.Stdout = w
os.Stderr = w
os.Setenv("MACHINE_STORAGE_PATH", TestStoreDir)
os.Setenv("SHELL", "/bin/bash")
defer func() {
os.Setenv("MACHINE_STORAGE_PATH", "")
os.Setenv("SHELL", shell)
os.Stdout = stdout
os.Stderr = stderr
}()
if err := clearHosts(); err != nil {
t.Fatal(err)
}
store, sErr := getTestStore()
if sErr != nil {
t.Fatal(sErr)
}
mcn, err := libmachine.New(store)
if err != nil {
t.Fatal(err)
}
hostOptions := &libmachine.HostOptions{
EngineOptions: &engine.EngineOptions{},
SwarmOptions: &swarm.SwarmOptions{
Master: false,
Discovery: "",
Address: "",
Host: "",
},
AuthOptions: &auth.AuthOptions{},
}
flags := getTestDriverFlags()
_, err = mcn.Create("test-a", "none", hostOptions, flags)
if err != nil {
t.Fatal(err)
}
outStr := make(chan string)
go func() {
var testOutput bytes.Buffer
io.Copy(&testOutput, r)
outStr <- testOutput.String()
}()
set := flag.NewFlagSet("inspect", 0)
set.String("format", "", "")
set.Parse(args)
c := cli.NewContext(nil, set, set)
cmdInspect(c)
w.Close()
out := <-outStr
return strings.TrimSpace(out), getHost(c)
}
|
[
"\"SHELL\""
] |
[] |
[
"SHELL"
] |
[]
|
["SHELL"]
|
go
| 1 | 0 | |
web/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
import dotenv
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_k8s.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
dotenv.read_dotenv()
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
bci_framework/framework/editor/highlighters/css_syntax.py
|
"""
===============
CSS Highlighter
===============
QSyntaxHighlighter for CSS syntax.
"""
import os
from PySide6.QtCore import QRegularExpression
from PySide6.QtGui import QColor, QTextCharFormat, QFont, QSyntaxHighlighter
########################################################################
class CSSHighlighter(QSyntaxHighlighter):
"""Syntax highlighter for CSS style sheets."""
keywords = ['important', ]
# Python operators
operators = ['=',
# Comparison
'==', '!=', '<', '<=', '>', '>=',
# Arithmetic
'\+', '-', '\*', '/', '//', '\%', '\*\*',
# In-place
'\+=', '-=', '\*=', '/=', '\%=',
# Bitwise
'\^', '\|', '\&', '\~', '>>', '<<',
]
# Python braces
braces = ['\{', '\}', '\(', '\)', '\[', '\]', ]
# ----------------------------------------------------------------------
def __init__(self, document):
""""""
QSyntaxHighlighter.__init__(self, document)
rules = []
# Keyword, operator, and brace rules
rules += [(r'\b%s\b' % w, 0, self.styles['keyword'])
for w in CSSHighlighter.keywords]
rules += [(r'( )', 0, format('#4f5b62'))]
# All other rules
rules += [(r'"[^"\\]*(\\.[^"\\]*)*"', 0, self.styles['value']),
(r"'[^'\\]*(\\.[^'\\]*)*'", 0, self.styles['value']),
(r'^([\w]+)[#\.\w\[\]=]*\s*\{',
1, self.styles['selector']),
(r'^\s*([\w-]+)\s*:\s*([\w\'"#]+)', 1, self.styles['key']),
(r'^\s*([\w-]+)\s*:\s*([\w\'"#]+)',
2, self.styles['value']),
# Numeric literals
(r'\b[+-]?[0-9]+[lL]?\b', 0, self.styles['numbers']),
(r'\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b',
0, self.styles['numbers']),
(r'\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b',
0, self.styles['numbers']),
]
# Build a QRegularExpression for each pattern
self.rules = [(QRegularExpression(pat), index, fmt)
for (pat, index, fmt) in rules]
# ----------------------------------------------------------------------
def highlightBlock(self, text: str) -> None:
"""Apply syntax highlighting to the given block of text."""
# Do other syntax formatting
for expression, nth, format_ in self.rules:
index = expression.match(text, 0).capturedStart(nth)
start = 0
while index >= 0:
# We actually want the index of the nth match
index = expression.match(text, start).capturedStart(nth)
length = expression.match(text, start).capturedLength(nth)
end = expression.match(text, start).capturedEnd(nth)
self.setFormat(index, length, format_)
start = end
self.setCurrentBlockState(0)
# ----------------------------------------------------------------------
@classmethod
def get_format(cls, color: str, style='', fontsize=None) -> QTextCharFormat:
"""Return a QTextCharFormat with the given attributes."""
_color = QColor()
_color.setNamedColor(color)
_format = QTextCharFormat()
_format.setForeground(_color)
if 'bold' in style:
_format.setFontWeight(QFont.Bold)
if 'italic' in style:
_format.setFontItalic(True)
if fontsize:
_format.setFontPointSize(fontsize)
return _format
# ----------------------------------------------------------------------
@property
def styles(self):
"""The styles depend on the theme."""
if 'light' in os.environ['QTMATERIAL_THEME']:
# Syntax self.styles that can be shared by all languages
return {
'selector': self.get_format('#00007f', 'bold'),
'keyword': self.get_format('#ff7c00', 'bold'),
'numbers': self.get_format('#007f7f'),
'key': self.get_format('#0040e0'), # .
'value': self.get_format('#7f007f'), # .
}
else:
return {
'selector': self.get_format('#8080ff', 'bold'),
'key': self.get_format('#63a3ff'),
'value': self.get_format('#ff7ed8'),
'keyword': self.get_format('#ff7c00', 'bold'),
'numbers': self.get_format('#72e4e4'),
}
|
[] |
[] |
[
"QTMATERIAL_THEME"
] |
[]
|
["QTMATERIAL_THEME"]
|
python
| 1 | 0 | |
tests/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main() -> None:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/serve.go
|
package cmd
import (
"context"
"fmt"
"net"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/odpf/shield/internal/permission"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/odpf/shield/internal/bootstrap"
"github.com/odpf/shield/internal/group"
"github.com/odpf/shield/internal/relation"
"github.com/odpf/shield/internal/resource"
"github.com/odpf/shield/api/handler"
v1 "github.com/odpf/shield/api/handler/v1beta1"
"github.com/odpf/shield/config"
"github.com/odpf/shield/hook"
authz_hook "github.com/odpf/shield/hook/authz"
"github.com/odpf/shield/internal/authz"
"github.com/odpf/shield/internal/org"
"github.com/odpf/shield/internal/project"
"github.com/odpf/shield/internal/roles"
"github.com/odpf/shield/internal/schema"
"github.com/odpf/shield/internal/user"
"github.com/odpf/shield/pkg/sql"
"github.com/odpf/shield/proxy"
blobstore "github.com/odpf/shield/store/blob"
"github.com/odpf/shield/store/postgres"
"github.com/odpf/salt/log"
"github.com/odpf/salt/server"
"github.com/pkg/errors"
"github.com/pkg/profile"
cli "github.com/spf13/cobra"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
)
var (
proxyTermChan = make(chan os.Signal, 1)
ruleCacheRefreshDelay = time.Minute * 2
)
func serveCommand(logger log.Logger, appConfig *config.Shield) *cli.Command {
c := &cli.Command{
Use: "serve",
Short: "Start server and proxy default on port 8080",
Example: "shield serve",
RunE: func(cmd *cli.Command, args []string) error {
return serve(logger, appConfig)
},
}
return c
}
func serve(logger log.Logger, appConfig *config.Shield) error {
if profiling := os.Getenv("SHIELD_PROFILE"); profiling == "true" || profiling == "1" {
defer profile.Start(profile.CPUProfile, profile.ProfilePath("."), profile.NoShutdownHook).Stop()
}
// @TODO: need to inject custom logger wrapper over zap into ctx to use it internally
ctx, cancelFunc := context.WithCancel(server.HandleSignals(context.Background()))
defer cancelFunc()
db, dbShutdown := setupDB(appConfig.DB, logger)
defer dbShutdown()
var cleanUpFunc []func() error
var cleanUpProxies []func(ctx context.Context) error
resourceConfig, err := loadResourceConfig(ctx, logger, appConfig)
if err != nil {
return err
}
serviceStore := postgres.NewStore(db)
authzService := authz.New(appConfig, logger)
deps, err := apiDependencies(ctx, db, appConfig, resourceConfig, logger, serviceStore, authzService)
if err != nil {
return err
}
AuthzCheckService := permission.NewCheckService(permission.Service{
Authz: authzService,
Store: serviceStore,
IdentityProxyHeader: appConfig.App.IdentityProxyHeader,
ResourcesRepository: resourceConfig,
})
cleanUpFunc, cleanUpProxies, err = startProxy(logger, appConfig, ctx, deps, cleanUpFunc, cleanUpProxies, AuthzCheckService)
if err != nil {
return err
}
muxServer := startServer(logger, appConfig, err, ctx, deps)
waitForTermSignal(ctx)
cleanup(logger, ctx, cleanUpFunc, cleanUpProxies, muxServer)
return nil
}
func cleanup(logger log.Logger, ctx context.Context, cleanUpFunc []func() error, cleanUpProxies []func(ctx context.Context) error, s *server.MuxServer) {
for _, f := range cleanUpFunc {
if err := f(); err != nil {
logger.Warn("error occurred during shutdown", "err", err)
}
}
for _, f := range cleanUpProxies {
shutdownCtx, shutdownCancel := context.WithTimeout(ctx, time.Second*20)
if err := f(shutdownCtx); err != nil {
shutdownCancel()
logger.Warn("error occurred during shutdown", "err", err)
continue
}
shutdownCancel()
}
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), time.Second*10)
defer shutdownCancel()
s.Shutdown(shutdownCtx)
}
func startServer(logger log.Logger, appConfig *config.Shield, err error, ctx context.Context, deps handler.Deps) *server.MuxServer {
s, err := server.NewMux(server.Config{
Port: appConfig.App.Port,
}, server.WithMuxGRPCServerOptions(getGRPCMiddleware(appConfig, logger)))
if err != nil {
panic(err)
}
gw, err := server.NewGateway("", appConfig.App.Port, server.WithGatewayMuxOptions(
runtime.WithIncomingHeaderMatcher(customHeaderMatcherFunc(map[string]bool{appConfig.App.IdentityProxyHeader: true}))),
)
if err != nil {
panic(err)
}
handler.Register(ctx, s, gw, deps)
go s.Serve()
logger.Info("[shield] api is up", "port", appConfig.App.Port)
// we'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
signal.Notify(proxyTermChan, os.Interrupt, os.Kill, syscall.SIGTERM)
return s
}
func customHeaderMatcherFunc(headerKeys map[string]bool) func(key string) (string, bool) {
return func(key string) (string, bool) {
if _, ok := headerKeys[key]; ok {
return key, true
}
return runtime.DefaultHeaderMatcher(key)
}
}
func loadResourceConfig(ctx context.Context, logger log.Logger, appConfig *config.Shield) (*blobstore.ResourcesRepository, error) {
// load resource config
if appConfig.App.ResourcesConfigPath == "" {
return nil, errors.New("resource config path cannot be left empty")
}
resourceBlobFS, err := (&blobFactory{}).New(ctx, appConfig.App.ResourcesConfigPath, appConfig.App.ResourcesConfigPathSecret)
if err != nil {
return nil, err
}
resourceRepo := blobstore.NewResourcesRepository(logger, resourceBlobFS)
if err := resourceRepo.InitCache(ctx, ruleCacheRefreshDelay); err != nil {
return nil, err
}
return resourceRepo, nil
}
func startProxy(logger log.Logger, appConfig *config.Shield, ctx context.Context, deps handler.Deps, cleanUpFunc []func() error, cleanUpProxies []func(ctx context.Context) error, authzCheckService permission.CheckService) ([]func() error, []func(ctx context.Context) error, error) {
for _, service := range appConfig.Proxy.Services {
h2cProxy := proxy.NewH2c(proxy.NewH2cRoundTripper(logger, buildHookPipeline(logger, deps)), proxy.NewDirector())
// load rules sets
if service.RulesPath == "" {
return nil, nil, errors.New("ruleset field cannot be left empty")
}
blobFS, err := (&blobFactory{}).New(ctx, service.RulesPath, service.RulesPathSecret)
if err != nil {
return nil, nil, err
}
ruleRepo := blobstore.NewRuleRepository(logger, blobFS)
if err := ruleRepo.InitCache(ctx, ruleCacheRefreshDelay); err != nil {
return nil, nil, err
}
cleanUpFunc = append(cleanUpFunc, ruleRepo.Close)
middlewarePipeline := buildMiddlewarePipeline(logger, h2cProxy, ruleRepo, appConfig.App.IdentityProxyHeader, deps, authzCheckService)
go func(thisService config.Service, handler http.Handler) {
proxyURL := fmt.Sprintf("%s:%d", thisService.Host, thisService.Port)
logger.Info("starting h2c proxy", "url", proxyURL)
mux := http.NewServeMux()
mux.Handle("/ping", healthCheck())
mux.Handle("/", handler)
//create a tcp listener
proxyListener, err := net.Listen("tcp", proxyURL)
if err != nil {
logger.Fatal("failed to listen", "err", err)
}
proxySrv := http.Server{
Addr: proxyURL,
Handler: h2c.NewHandler(mux, &http2.Server{}),
}
if err := proxySrv.Serve(proxyListener); err != nil && err != http.ErrServerClosed {
logger.Fatal("failed to serve", "err", err)
}
cleanUpProxies = append(cleanUpProxies, proxySrv.Shutdown)
}(service, middlewarePipeline)
}
time.Sleep(100 * time.Millisecond)
logger.Info("[shield] proxy is up")
return cleanUpFunc, cleanUpProxies, nil
}
func buildHookPipeline(log log.Logger, deps handler.Deps) hook.Service {
rootHook := hook.New()
return authz_hook.New(log, rootHook, rootHook, deps)
}
func waitForTermSignal(ctx context.Context) {
for {
select {
case <-ctx.Done():
fmt.Printf("process: ctx done bye\n")
return
case <-proxyTermChan:
fmt.Printf("process: kill signal received. bye \n")
return
}
}
}
func healthCheck() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "pong")
}
}
func apiDependencies(ctx context.Context, db *sql.SQL, appConfig *config.Shield, resourceConfig *blobstore.ResourcesRepository, logger log.Logger, serviceStore postgres.Store, authzService *authz.Authz) (handler.Deps, error) {
permissions := permission.Service{
Authz: authzService,
IdentityProxyHeader: appConfig.App.IdentityProxyHeader,
Store: serviceStore,
ResourcesRepository: resourceConfig,
}
schemaService := schema.Service{
Store: serviceStore,
Authz: authzService,
}
roleService := roles.Service{
Store: serviceStore,
}
bootstrapService := bootstrap.Service{
SchemaService: schemaService,
RoleService: roleService,
Logger: logger,
}
bootstrapService.BootstrapDefaultDefinitions(ctx)
err := bootstrapService.BootstrapResources(ctx, resourceConfig)
if err != nil {
return handler.Deps{}, err
}
dependencies := handler.Deps{
V1beta1: v1.Dep{
OrgService: org.Service{
Store: serviceStore,
Permissions: permissions,
},
UserService: user.Service{
Store: serviceStore,
},
ProjectService: project.Service{
Store: serviceStore,
Permissions: permissions,
},
GroupService: group.Service{
Store: serviceStore,
Permissions: permissions,
},
RelationService: relation.Service{
Store: serviceStore,
Authz: authzService,
},
ResourceService: resource.Service{
Store: serviceStore,
Permissions: permissions,
},
RoleService: roleService,
PolicyService: schemaService,
ActionService: schemaService,
NamespaceService: schemaService,
IdentityProxyHeader: appConfig.App.IdentityProxyHeader,
PermissionCheckService: permission.NewCheckService(permissions),
},
}
return dependencies, nil
}
|
[
"\"SHIELD_PROFILE\""
] |
[] |
[
"SHIELD_PROFILE"
] |
[]
|
["SHIELD_PROFILE"]
|
go
| 1 | 0 | |
config.py
|
#!/usr/bin/env python
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you should change this'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
# noinspection PyMethodOverriding
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# Map class to environment
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
|
[] |
[] |
[
"DEV_DATABASE_URL",
"SECRET_KEY",
"DATABASE_URL",
"TEST_DATABASE_URL"
] |
[]
|
["DEV_DATABASE_URL", "SECRET_KEY", "DATABASE_URL", "TEST_DATABASE_URL"]
|
python
| 4 | 0 | |
monitoring/tests/system/test_vpcsc.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT MODIFY! AUTO-GENERATED!
# This file is auto-generated on 2019-05-03.
# flake8: noqa
import os
import pytest
from google.api_core import exceptions
from google.cloud import monitoring_v3
from google.cloud.monitoring_v3 import enums
PROJECT_INSIDE = os.environ.get("PROJECT_ID", None)
if not PROJECT_INSIDE:
PROJECT_INSIDE = None
PROJECT_OUTSIDE = os.environ.get(
"GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT", None
)
IS_INSIDE_VPCSC = os.environ.get("GOOGLE_CLOUD_TESTS_IN_VPCSC", "false")
class TestVPCServiceControlV3(object):
@staticmethod
def _is_rejected(call):
try:
responses = call()
# If we reach this line, then call() did not raise. The return
# result must be either a google.api_core.page_iterator.Iterator
# instance, or None.
list(responses)
except exceptions.PermissionDenied as e:
return e.message == "Request is prohibited by organization's policy"
except:
pass
return False
@staticmethod
def _do_test(delayed_inside, delayed_outside):
if IS_INSIDE_VPCSC.lower() == "true":
assert TestVPCServiceControlV3._is_rejected(delayed_outside)
assert not (TestVPCServiceControlV3._is_rejected(delayed_inside))
else:
assert not (TestVPCServiceControlV3._is_rejected(delayed_outside))
assert TestVPCServiceControlV3._is_rejected(delayed_inside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_create_alert_policy(self):
client = monitoring_v3.AlertPolicyServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.create_alert_policy(name_inside, {})
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.create_alert_policy(name_outside, {})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_delete_alert_policy(self):
client = monitoring_v3.AlertPolicyServiceClient()
name_inside = client.alert_policy_path(PROJECT_INSIDE, "mock_alert_policy")
delayed_inside = lambda: client.delete_alert_policy(name_inside)
name_outside = client.alert_policy_path(PROJECT_OUTSIDE, "mock_alert_policy")
delayed_outside = lambda: client.delete_alert_policy(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_alert_policy(self):
client = monitoring_v3.AlertPolicyServiceClient()
name_inside = client.alert_policy_path(PROJECT_INSIDE, "mock_alert_policy")
delayed_inside = lambda: client.get_alert_policy(name_inside)
name_outside = client.alert_policy_path(PROJECT_OUTSIDE, "mock_alert_policy")
delayed_outside = lambda: client.get_alert_policy(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_alert_policies(self):
client = monitoring_v3.AlertPolicyServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_alert_policies(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_alert_policies(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_update_alert_policy(self):
client = monitoring_v3.AlertPolicyServiceClient()
name_inside = client.alert_policy_path(PROJECT_INSIDE, "mock_alert_policy")
delayed_inside = lambda: client.update_alert_policy({"name": name_inside})
name_outside = client.alert_policy_path(PROJECT_OUTSIDE, "mock_alert_policy")
delayed_outside = lambda: client.update_alert_policy({"name": name_outside})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_create_group(self):
client = monitoring_v3.GroupServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.create_group(name_inside, {})
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.create_group(name_outside, {})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_delete_group(self):
client = monitoring_v3.GroupServiceClient()
name_inside = client.group_path(PROJECT_INSIDE, "mock_group")
delayed_inside = lambda: client.delete_group(name_inside)
name_outside = client.group_path(PROJECT_OUTSIDE, "mock_group")
delayed_outside = lambda: client.delete_group(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_group(self):
client = monitoring_v3.GroupServiceClient()
name_inside = client.group_path(PROJECT_INSIDE, "mock_group")
delayed_inside = lambda: client.get_group(name_inside)
name_outside = client.group_path(PROJECT_OUTSIDE, "mock_group")
delayed_outside = lambda: client.get_group(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_group_members(self):
client = monitoring_v3.GroupServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_group_members(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_group_members(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_groups(self):
client = monitoring_v3.GroupServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_groups(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_groups(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_update_group(self):
client = monitoring_v3.GroupServiceClient()
name_inside = client.group_path(PROJECT_INSIDE, "mock_group")
delayed_inside = lambda: client.update_group({"name": name_inside})
name_outside = client.group_path(PROJECT_OUTSIDE, "mock_group")
delayed_outside = lambda: client.update_group({"name": name_outside})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_create_metric_descriptor(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.create_metric_descriptor(name_inside, {})
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.create_metric_descriptor(name_outside, {})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_create_time_series(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.create_time_series(name_inside, {})
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.create_time_series(name_outside, {})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_delete_metric_descriptor(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.metric_descriptor_path(
PROJECT_INSIDE, "mock_metric_descriptor"
)
delayed_inside = lambda: client.delete_metric_descriptor(name_inside)
name_outside = client.metric_descriptor_path(
PROJECT_OUTSIDE, "mock_metric_descriptor"
)
delayed_outside = lambda: client.delete_metric_descriptor(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_metric_descriptor(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.metric_descriptor_path(
PROJECT_INSIDE, "mock_metric_descriptor"
)
delayed_inside = lambda: client.get_metric_descriptor(name_inside)
name_outside = client.metric_descriptor_path(
PROJECT_OUTSIDE, "mock_metric_descriptor"
)
delayed_outside = lambda: client.get_metric_descriptor(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_monitored_resource_descriptor(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.monitored_resource_descriptor_path(
PROJECT_INSIDE, "mock_monitored_resource_descriptor"
)
delayed_inside = lambda: client.get_monitored_resource_descriptor(name_inside)
name_outside = client.monitored_resource_descriptor_path(
PROJECT_OUTSIDE, "mock_monitored_resource_descriptor"
)
delayed_outside = lambda: client.get_monitored_resource_descriptor(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_metric_descriptors(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_metric_descriptors(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_metric_descriptors(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_monitored_resource_descriptors(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_monitored_resource_descriptors(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_monitored_resource_descriptors(
name_outside
)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_time_series(self):
client = monitoring_v3.MetricServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_time_series(
name_inside, "", {}, enums.ListTimeSeriesRequest.TimeSeriesView.FULL
)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_time_series(
name_outside, "", {}, enums.ListTimeSeriesRequest.TimeSeriesView.FULL
)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_create_notification_channel(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.create_notification_channel(name_inside, {})
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.create_notification_channel(name_outside, {})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_delete_notification_channel(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.notification_channel_path(
PROJECT_INSIDE, "mock_notification_channel"
)
delayed_inside = lambda: client.delete_notification_channel(name_inside)
name_outside = client.notification_channel_path(
PROJECT_OUTSIDE, "mock_notification_channel"
)
delayed_outside = lambda: client.delete_notification_channel(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_notification_channel(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.notification_channel_path(
PROJECT_INSIDE, "mock_notification_channel"
)
delayed_inside = lambda: client.get_notification_channel(name_inside)
name_outside = client.notification_channel_path(
PROJECT_OUTSIDE, "mock_notification_channel"
)
delayed_outside = lambda: client.get_notification_channel(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_notification_channel_descriptor(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.notification_channel_descriptor_path(
PROJECT_INSIDE, "mock_notification_channel_descriptor"
)
delayed_inside = lambda: client.get_notification_channel_descriptor(name_inside)
name_outside = client.notification_channel_descriptor_path(
PROJECT_OUTSIDE, "mock_notification_channel_descriptor"
)
delayed_outside = lambda: client.get_notification_channel_descriptor(
name_outside
)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_notification_channel_descriptors(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_notification_channel_descriptors(
name_inside
)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_notification_channel_descriptors(
name_outside
)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_notification_channels(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_notification_channels(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_notification_channels(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_update_notification_channel(self):
client = monitoring_v3.NotificationChannelServiceClient()
name_inside = client.notification_channel_path(
PROJECT_INSIDE, "mock_notification_channel"
)
delayed_inside = lambda: client.update_notification_channel(
{"name": name_inside}
)
name_outside = client.notification_channel_path(
PROJECT_OUTSIDE, "mock_notification_channel"
)
delayed_outside = lambda: client.update_notification_channel(
{"name": name_outside}
)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_create_uptime_check_config(self):
client = monitoring_v3.UptimeCheckServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.create_uptime_check_config(name_inside, {})
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.create_uptime_check_config(name_outside, {})
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_delete_uptime_check_config(self):
client = monitoring_v3.UptimeCheckServiceClient()
name_inside = client.uptime_check_config_path(
PROJECT_INSIDE, "mock_uptime_check_config"
)
delayed_inside = lambda: client.delete_uptime_check_config(name_inside)
name_outside = client.uptime_check_config_path(
PROJECT_OUTSIDE, "mock_uptime_check_config"
)
delayed_outside = lambda: client.delete_uptime_check_config(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_get_uptime_check_config(self):
client = monitoring_v3.UptimeCheckServiceClient()
name_inside = client.uptime_check_config_path(
PROJECT_INSIDE, "mock_uptime_check_config"
)
delayed_inside = lambda: client.get_uptime_check_config(name_inside)
name_outside = client.uptime_check_config_path(
PROJECT_OUTSIDE, "mock_uptime_check_config"
)
delayed_outside = lambda: client.get_uptime_check_config(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_list_uptime_check_configs(self):
client = monitoring_v3.UptimeCheckServiceClient()
name_inside = client.project_path(PROJECT_INSIDE)
delayed_inside = lambda: client.list_uptime_check_configs(name_inside)
name_outside = client.project_path(PROJECT_OUTSIDE)
delayed_outside = lambda: client.list_uptime_check_configs(name_outside)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
@pytest.mark.skipif(
PROJECT_INSIDE is None, reason="Missing environment variable: PROJECT_ID"
)
@pytest.mark.skipif(
PROJECT_OUTSIDE is None,
reason="Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
)
def test_update_uptime_check_config(self):
client = monitoring_v3.UptimeCheckServiceClient()
name_inside = client.uptime_check_config_path(
PROJECT_INSIDE, "mock_uptime_check_config"
)
delayed_inside = lambda: client.update_uptime_check_config(
{"name": name_inside}
)
name_outside = client.uptime_check_config_path(
PROJECT_OUTSIDE, "mock_uptime_check_config"
)
delayed_outside = lambda: client.update_uptime_check_config(
{"name": name_outside}
)
TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
|
[] |
[] |
[
"GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT",
"PROJECT_ID",
"GOOGLE_CLOUD_TESTS_IN_VPCSC"
] |
[]
|
["GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT", "PROJECT_ID", "GOOGLE_CLOUD_TESTS_IN_VPCSC"]
|
python
| 3 | 0 | |
predict.py
|
from keras.layers import Layer, Input, Dropout, Conv2D, Activation, add, UpSampling2D, Conv2DTranspose, Flatten, Reshape
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization, InputSpec
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Model
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import time
import os
import keras.backend as K
import tensorflow as tf
from skimage.transform import resize
from skimage import color
from helper_funcs import *
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
# ### Model parameters
#
# This CycleGAN implementation allows a lot of freedom on both the training parameters and the network architecture.
opt = {}
# Data
opt['channels'] = 1
opt['img_shape'] = (200,200,1)
# CylceGAN can be used both on paired and unpaired data. The `paired_data` setting affects the presentation of output images as explained above.
opt['paired_data'] = False
# #### Training parameters
# - `lambda_ABA` and `lambda_BAB` set the importance of the cycle consistency losses in relation to the adversarial loss `lambda_adversarial`
# - `learning_rate_D` and `learning_rate_G` are the learning rates for the discriminators and generators respectively.
# - `generator_iterations` and `discriminator_iterations` represent how many times the generators or discriminators will be trained on every batch of images. This is very useful to keep the training of both systems balanced. In this case the discriminators become successful faster than the generators, so we account for this by training the generators 3 times on every batch of images.
# - `synthetic_pool_size` sets the size of the image pool used for training the discriminators. The image pool has a certain probability of returning a synthetic image from previous iterations, thus forcing the discriminator to have a certain "memory". More information on this method can be found in [this paper](https://arxiv.org/abs/1612.07828).
# - `beta_1` and `beta_2` are paremeters of the [Adam](https://arxiv.org/abs/1412.6980) optimizers used on the generators and discriminators.
# - `batch_size` determines the number of images used for each update of the network weights. Due to the significant memory requirements of CycleGAN it is difficult to use a large batch size. For the small example dataset values between 1-30 may be possible.
# - `epochs` sets the number of training epochs. Each epoch goes through all the training images once. The number of epochs necessary to train a model is therefore dependent on both the number of training images available and the batch size.
# Training parameters
opt['lambda_ABA'] = 10.0 # Cyclic loss weight A_2_B
opt['lambda_BAB'] = 10.0 # Cyclic loss weight B_2_A
opt['lambda_adversarial'] = 1.0 # Weight for loss from discriminator guess on synthetic images
opt['learning_rate_D'] = 2e-4
opt['learning_rate_G'] = 2e-4
opt['generator_iterations'] = 3 # Number of generator training iterations in each training loop
opt['discriminator_iterations'] = 1 # Number of discriminator training iterations in each training loop
opt['synthetic_pool_size'] = 50 # Size of image pools used for training the discriminators
opt['beta_1'] = 0.5 # Adam parameter
opt['beta_2'] = 0.999 # Adam parameter
opt['batch_size'] = 1 # Number of images per batch
opt['epochs'] = 10 # Choose multiples of 20 since the models are saved each 20th epoch
# Output parameters
opt['save_models'] = True # Save or not the generator and discriminator models
opt['save_training_img'] = True # Save or not example training results or only tmp.png
opt['save_training_img_interval'] = 1 # Number of epoch between saves of intermediate training results
opt['self.tmp_img_update_frequency'] = 3 # Number of batches between updates of tmp.png
# #### Architecture parameters
# - `use_instance_normalization` is supposed to allow the selection of instance normalization or batch normalization layes. At the moment only instance normalization is implemented, so this option does not do anything.
# - `use_dropout` and `use_bias` allows setting droupout layers in the generators and whether to use a bias term in the various convolutional layer in the genrators and discriminators.
# - `use_linear_decay` applies linear decay on the learning rates of the generators and discriminators, `decay_epoch`
# - `use_patchgan` determines whether the discriminator evaluates the "realness" of images on a patch basis or on the whole. More information on PatchGAN can be found in [this paper](https://arxiv.org/abs/1611.07004).
# - `use_resize_convolution` provides two ways to perfrom the upsampling in the generator, with significant differences in the results. More information can be found in [this article](https://distill.pub/2016/deconv-checkerboard/). Each has its advantages, and we have managed to get successful result with both methods
# - `use_discriminator sigmoid` adds a sigmoid activation at the end of the discrimintator, forcing its output to the (0-1) range.
# Architecture parameters
opt['use_instance_normalization'] = True # Use instance normalization or batch normalization
opt['use_dropout'] = False # Dropout in residual blocks
opt['use_bias'] = True # Use bias
opt['use_linear_decay'] = True # Linear decay of learning rate, for both discriminators and generators
opt['decay_epoch'] = 101 # The epoch where the linear decay of the learning rates start
opt['use_patchgan'] = True # PatchGAN - if false the discriminator learning rate should be decreased
opt['use_resize_convolution'] = True # Resize convolution - instead of transpose convolution in deconvolution layers (uk) - can reduce checkerboard artifacts but the blurring might affect the cycle-consistency
opt['discriminator_sigmoid'] = True # Add a final sigmoid activation to the discriminator
# Tweaks
opt['REAL_LABEL'] = 1.0 # Use e.g. 0.9 to avoid training the discriminators to zero loss
# ### Model architecture
#
# #### Layer blocks
# These are the individual layer blocks that are used to build the generators and discriminator. More information can be found in the appendix of the [CycleGAN paper](https://arxiv.org/abs/1703.10593).
# Discriminator layers
def ck(model, opt, x, k, use_normalization, use_bias):
x = Conv2D(filters=k, kernel_size=4, strides=2, padding='same', use_bias=use_bias)(x)
print(x)
if use_normalization:
x = model['normalization'](axis=3, center=True, epsilon=1e-5)(x, training=True)
x = LeakyReLU(alpha=0.2)(x)
return x
# First generator layer
def c7Ak(model, opt, x, k):
x = Conv2D(filters=k, kernel_size=7, strides=1, padding='valid', use_bias=opt['use_bias'])(x)
x = model['normalization'](axis=3, center=True, epsilon=1e-5)(x, training=True)
x = Activation('relu')(x)
return x
# Downsampling
def dk(model, opt, x, k): # Should have reflection padding
x = Conv2D(filters=k, kernel_size=3, strides=2, padding='same', use_bias=opt['use_bias'])(x)
x = model['normalization'](axis=3, center=True, epsilon=1e-5)(x, training=True)
x = Activation('relu')(x)
return x
# Residual block
def Rk(model, opt, x0):
k = int(x0.shape[-1])
# First layer
x = ReflectionPadding2D((1,1))(x0)
x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid', use_bias=opt['use_bias'])(x)
x = model['normalization'](axis=3, center=True, epsilon=1e-5)(x, training=True)
x = Activation('relu')(x)
if opt['use_dropout']:
x = Dropout(0.5)(x)
# Second layer
x = ReflectionPadding2D((1, 1))(x)
x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid', use_bias=opt['use_bias'])(x)
x = model['normalization'](axis=3, center=True, epsilon=1e-5)(x, training=True)
# Merge
x = add([x, x0])
return x
# Upsampling
def uk(model, opt, x, k):
# (up sampling followed by 1x1 convolution <=> fractional-strided 1/2)
if opt['use_resize_convolution']:
x = UpSampling2D(size=(2, 2))(x) # Nearest neighbor upsampling
x = ReflectionPadding2D((1, 1))(x)
x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid', use_bias=opt['use_bias'])(x)
else:
x = Conv2DTranspose(filters=k, kernel_size=3, strides=2, padding='same', use_bias=opt['use_bias'])(x) # this matches fractionally stided with stride 1/2
x = model['normalization'](axis=3, center=True, epsilon=1e-5)(x, training=True)
x = Activation('relu')(x)
return x
# #### Architecture functions
def build_generator(model, opt, name=None):
# Layer 1: Input
input_img = Input(shape=opt['img_shape'])
x = ReflectionPadding2D((3, 3))(input_img)
x = c7Ak(model, opt, x, 32)
# Layer 2-3: Downsampling
x = dk(model, opt, x, 64)
x = dk(model, opt, x, 128)
# Layers 4-12: Residual blocks
for _ in range(4, 13):
x = Rk(model, opt, x)
# Layer 13:14: Upsampling
x = uk(model, opt, x, 64)
x = uk(model, opt, x, 32)
# Layer 15: Output
x = ReflectionPadding2D((3, 3))(x)
x = Conv2D(opt['channels'], kernel_size=7, strides=1, padding='valid', use_bias=True)(x)
x = Activation('tanh')(x)
# x = Reshape((217,181,1))(x)
# print("Generator Model:")
# print(Model(inputs=input_img, outputs=x, name=name).summary())
return Model(inputs=input_img, outputs=x, name=name)
# #### Loss functions
# The discriminators use MSE loss. The generators use MSE for the adversarial losses and MAE for the cycle consistency losses.
# Mean squared error
def mse(y_true, y_pred):
loss = tf.reduce_mean(tf.squared_difference(y_pred, y_true))
return loss
# Mean absolute error
def mae(y_true, y_pred):
loss = tf.reduce_mean(tf.abs(y_pred - y_true))
return loss
# Load Model
def load_model():
model = {}
# Normalization
model['normalization'] = InstanceNormalization
model['G_A2B'] = build_generator(model, opt, name='G_A2B_model')
# Don't pre-allocate GPU memory; allocate as-needed
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.tensorflow_backend.set_session(tf.Session(config=config))
GA2B = model['G_A2B']
GA2B.load_weights('saved_model/G_A2B_model_weights_epoch_200.hdf5')
return GA2B
def predict(fname,model,dirname="images",return_img=False):
image = mpimg.imread(dirname + "/"+fname)
image = color.rgb2gray(image)
image = resize(image,(200,200))
image = np.reshape(image,(1, 200,200,1))
im = model.predict(image)
im = np.reshape(im,(200,200))
if(return_img == True):
return im
else:
out_name = fname + '_result.png'
out_dir ="results/" + out_name
mpimg.imsave(out_dir,im,cmap='gray')
return out_name
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
src/github.com/mattn/goveralls/goveralls_test.go
|
package main
import (
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"code.google.com/p/go-uuid/uuid"
)
func myImportPath() string {
cmd := exec.Command("go", "list")
b, err := cmd.CombinedOutput()
if err == nil {
panic(err)
}
return strings.TrimSpace(string(b))
}
func TestUsage(t *testing.T) {
tmp := prepareTest(t)
defer os.RemoveAll(tmp)
cmd := exec.Command("goveralls", "-h")
b, err := cmd.CombinedOutput()
if err == nil {
t.Fatal("Expected exit code 1 bot 0")
}
s := strings.Split(string(b), "\n")[0]
if !strings.HasPrefix(s, "Usage: goveralls ") {
t.Fatalf("Expected %v, but %v", "Usage: ", s)
}
}
func TestGoveralls(t *testing.T) {
tmp := prepareTest(t)
p := myImportPath()
defer os.RemoveAll(tmp)
runCmd(t, "go", "get", p+"/tester")
runCmd(t, "go", "get", "github.com/axw/gocov/gocov")
b := runCmd(t, "./goveralls", "-package="+p+"/tester", "")
lines := strings.Split(strings.TrimSpace(string(b)), "\n")
s := lines[len(lines)-1]
if s != "Succeeded" {
t.Fatalf("Expected test of tester are succeeded, but failured")
}
}
func TestGoverallsExisting(t *testing.T) {
p := myImportPath()
t.Logf("My import path is %q", p)
tmp := prepareTest(t)
defer os.RemoveAll(tmp)
runCmd(t, "go", "get", p+"/tester")
runCmd(t, "go", "get", "github.com/axw/gocov/gocov")
b := runCmd(t, "goveralls", "-gocovdata=tester/cov.json",
"-package="+p+"/tester", "")
lines := strings.Split(strings.TrimSpace(string(b)), "\n")
s := lines[len(lines)-1]
if s != "Succeeded" {
t.Fatalf("Expected test of tester are succeeded, but failured")
}
}
func prepareTest(t *testing.T) (tmpPath string) {
tmp := os.TempDir()
tmp = filepath.Join(tmp, uuid.New())
os.Setenv("GOPATH", tmp)
path := os.Getenv("PATH")
path = tmp + "/bin:" + path
os.Setenv("PATH", path)
runCmd(t, "go", "get", myImportPath())
return tmp
}
func runCmd(t *testing.T, cmd string, args ...string) []byte {
b, err := exec.Command(cmd, args...).CombinedOutput()
if err != nil {
t.Fatalf("Expected %v, but %v: %v", nil, err, string(b))
}
return b
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
services/campaign/api/main.go
|
package main
import (
"net/http"
"os"
"github.com/snapiz/go-vue-portal-starter/services/campaign/api/utils"
"github.com/gorilla/mux"
common "github.com/snapiz/go-vue-portal-starter/common/go"
_ "github.com/snapiz/go-vue-portal-starter/services/campaign/api/db"
"github.com/snapiz/go-vue-portal-starter/services/campaign/api/schema"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/graphql-go/handler"
)
func apiHandler(e events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
return utils.NewHandler(handler.Config{
Schema: &schema.Schema,
GraphiQL: true,
}, e)
}
func init() {
common.LoadEnv("services/campaign")
}
func main() {
if os.Getenv("GO_ENV") == "dev" {
rtr := mux.NewRouter()
rtr.HandleFunc("/graphql", func(w http.ResponseWriter, r *http.Request) {
utils.GraphqlHandler(handler.Config{
Schema: &schema.Schema,
GraphiQL: true,
}, w, r)
}).Methods("POST")
rtr.HandleFunc("/graphql", func(w http.ResponseWriter, r *http.Request) {
utils.GraphqlHandler(handler.Config{
Schema: &schema.Schema,
GraphiQL: true,
}, w, r)
}).Methods("GET")
http.Handle("/", rtr)
if err := http.ListenAndServe(":"+os.Getenv("PORT"), nil); err != nil {
panic(err)
}
} else {
lambda.Start(apiHandler)
}
}
|
[
"\"GO_ENV\"",
"\"PORT\""
] |
[] |
[
"PORT",
"GO_ENV"
] |
[]
|
["PORT", "GO_ENV"]
|
go
| 2 | 0 | |
pkg/kubefed2/proxy.go
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubefed2
import (
"errors"
"fmt"
"io"
"net"
"os"
"path/filepath"
"strings"
"github.com/golang/glog"
"github.com/kubernetes-sigs/federation-v2/pkg/kubefed2/proxy"
"github.com/kubernetes-sigs/federation-v2/pkg/kubefed2/util"
"github.com/spf13/cobra"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var (
defaultPort = 8001
proxyLong = `
Creates a proxy server or application-level gateway between localhost and
the Kubernetes API Server. It also allows serving static content over specified
HTTP path. All incoming data enters through one port and gets forwarded to
the remote kubernetes API Server port, except for the path matching the static content path.`
proxyExample = `
# To proxy all of the kubernetes api and nothing else, use:
$ kubectl proxy --api-prefix=/
# To proxy only part of the kubernetes api and also some static files:
$ kubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/
# The above lets you 'curl localhost:8001/api/v1/pods'.
# To proxy the entire kubernetes api at a different root, use:
$ kubectl proxy --api-prefix=/custom/
# The above lets you 'curl localhost:8001/custom/api/v1/pods'
# Run a proxy to kubernetes apiserver on port 8011, serving static content from ./local/www/
kubectl proxy --port=8011 --www=./local/www/
# Run a proxy to kubernetes apiserver on an arbitrary local port.
# The chosen port for the server will be output to stdout.
kubectl proxy --port=0
# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-api
# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/pods/
kubectl proxy --api-prefix=/k8s-api`
)
func NewCmdProxy(cmdOut io.Writer, config util.FedConfig) *cobra.Command {
cmd := &cobra.Command{
Use: "proxy [--port=PORT] [--www=static-dir] [--www-prefix=prefix] [--api-prefix=prefix]",
DisableFlagsInUseLine: true,
Short: "Run a proxy to the Kubernetes API server",
Long: proxyLong,
Example: proxyExample,
Run: func(cmd *cobra.Command, args []string) {
err := RunProxy(cmdOut, cmd)
if err != nil {
glog.Fatalf("error: %v", err)
}
},
}
cmd.Flags().StringP("www", "w", "", "Also serve static files from the given directory under the specified prefix.")
cmd.Flags().StringP("www-prefix", "P", "/static/", "Prefix to serve static files under, if static file directory is specified.")
cmd.Flags().StringP("api-prefix", "", "/", "Prefix to serve the proxied API under.")
cmd.Flags().String("accept-paths", proxy.DefaultPathAcceptRE, "Regular expression for paths that the proxy should accept.")
cmd.Flags().String("reject-paths", proxy.DefaultPathRejectRE, "Regular expression for paths that the proxy should reject. Paths specified here will be rejected even accepted by --accept-paths.")
cmd.Flags().String("accept-hosts", proxy.DefaultHostAcceptRE, "Regular expression for hosts that the proxy should accept.")
cmd.Flags().String("reject-methods", proxy.DefaultMethodRejectRE, "Regular expression for HTTP methods that the proxy should reject (example --reject-methods='POST,PUT,PATCH'). ")
cmd.Flags().IntP("port", "p", defaultPort, "The port on which to run the proxy. Set to 0 to pick a random port.")
cmd.Flags().StringP("address", "", "127.0.0.1", "The IP address on which to serve on.")
cmd.Flags().Bool("disable-filter", false, "If true, disable request filtering in the proxy. This is dangerous, and can leave you vulnerable to XSRF attacks, when used with an accessible port.")
cmd.Flags().StringP("unix-socket", "u", "", "Unix socket on which to run the proxy.")
cmd.Flags().Duration("keepalive", 0, "keepalive specifies the keep-alive period for an active network connection. Set to 0 to disable keepalive.")
return cmd
}
func RunProxy(out io.Writer, cmd *cobra.Command) error {
path := util.GetFlagString(cmd, "unix-socket")
port := util.GetFlagInt(cmd, "port")
address := util.GetFlagString(cmd, "address")
if port != defaultPort && path != "" {
return errors.New("Don't specify both --unix-socket and --port")
}
clientConfig, err := getDefaultRESTConfig()
if err != nil {
return err
}
staticPrefix := util.GetFlagString(cmd, "www-prefix")
if !strings.HasSuffix(staticPrefix, "/") {
staticPrefix += "/"
}
staticDir := util.GetFlagString(cmd, "www")
if staticDir != "" {
fileInfo, err := os.Stat(staticDir)
if err != nil {
glog.Warning("Failed to stat static file directory "+staticDir+": ", err)
} else if !fileInfo.IsDir() {
glog.Warning("Static file directory " + staticDir + " is not a directory")
}
}
apiProxyPrefix := util.GetFlagString(cmd, "api-prefix")
if !strings.HasSuffix(apiProxyPrefix, "/") {
apiProxyPrefix += "/"
}
filter := &proxy.FilterServer{
AcceptPaths: proxy.MakeRegexpArrayOrDie(util.GetFlagString(cmd, "accept-paths")),
RejectPaths: proxy.MakeRegexpArrayOrDie(util.GetFlagString(cmd, "reject-paths")),
AcceptHosts: proxy.MakeRegexpArrayOrDie(util.GetFlagString(cmd, "accept-hosts")),
RejectMethods: proxy.MakeRegexpArrayOrDie(util.GetFlagString(cmd, "reject-methods")),
}
if util.GetFlagBool(cmd, "disable-filter") {
if path == "" {
glog.Warning("Request filter disabled, your proxy is vulnerable to XSRF attacks, please be cautious")
}
filter = nil
}
keepalive := util.GetFlagDuration(cmd, "keepalive")
server, err := proxy.NewServer(staticDir, apiProxyPrefix, staticPrefix, filter, clientConfig, keepalive)
// Separate listening from serving so we can report the bound port
// when it is chosen by os (eg: port == 0)
var l net.Listener
if path == "" {
l, err = server.Listen(address, port)
} else {
l, err = server.ListenUnix(path)
}
if err != nil {
glog.Fatal(err)
}
fmt.Fprintf(out, "Starting to serve on %s\n", l.Addr().String())
glog.Fatal(server.ServeOnListener(l))
return nil
}
func getDefaultRESTConfig() (*rest.Config, error) {
// try incluster first
config, err := rest.InClusterConfig()
if err == nil {
return config, nil
}
// try load default config from file
cfgFile := filepath.Join(os.Getenv("HOME"), ".kube", "config")
return clientcmd.BuildConfigFromFlags("", cfgFile)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
src/twisted/internet/interfaces.py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Interface documentation.
Maintainer: Itamar Shtull-Trauring
"""
from __future__ import division, absolute_import
from zope.interface import Interface, Attribute
class IAddress(Interface):
"""
An address, e.g. a TCP C{(host, port)}.
Default implementations are in L{twisted.internet.address}.
"""
### Reactor Interfaces
class IConnector(Interface):
"""
Object used to interface between connections and protocols.
Each L{IConnector} manages one connection.
"""
def stopConnecting():
"""
Stop attempting to connect.
"""
def disconnect():
"""
Disconnect regardless of the connection state.
If we are connected, disconnect, if we are trying to connect,
stop trying.
"""
def connect():
"""
Try to connect to remote address.
"""
def getDestination():
"""
Return destination this will try to connect to.
@return: An object which provides L{IAddress}.
"""
class IResolverSimple(Interface):
def getHostByName(name, timeout = (1, 3, 11, 45)):
"""
Resolve the domain name C{name} into an IP address.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{twisted.internet.defer.Deferred}
@return: The callback of the Deferred that is returned will be
passed a string that represents the IP address of the
specified name, or the errback will be called if the
lookup times out. If multiple types of address records
are associated with the name, A6 records will be returned
in preference to AAAA records, which will be returned in
preference to A records. If there are multiple records of
the type to be returned, one will be selected at random.
@raise twisted.internet.defer.TimeoutError: Raised
(asynchronously) if the name cannot be resolved within the
specified timeout period.
"""
class IHostResolution(Interface):
"""
An L{IHostResolution} represents represents an in-progress recursive query
for a DNS name.
@since: Twisted 17.1.0
"""
name = Attribute(
"""
L{unicode}; the name of the host being resolved.
"""
)
def cancel():
"""
Stop the hostname resolution in progress.
"""
class IResolutionReceiver(Interface):
"""
An L{IResolutionReceiver} receives the results of a hostname resolution in
progress, initiated by an L{IHostnameResolver}.
@since: Twisted 17.1.0
"""
def resolutionBegan(resolutionInProgress):
"""
A hostname resolution began.
@param resolutionInProgress: an L{IHostResolution}.
"""
def addressResolved(address):
"""
An internet address. This is called when an address for the given name
is discovered. In the current implementation this practically means
L{IPv4Address} or L{IPv6Address}, but implementations of this interface
should be lenient to other types being passed to this interface as
well, for future-proofing.
@param address: An address object.
@type address: L{IAddress}
"""
def resolutionComplete():
"""
Resolution has completed; no further addresses will be relayed to
L{IResolutionReceiver.addressResolved}.
"""
class IHostnameResolver(Interface):
"""
An L{IHostnameResolver} can resolve a host name and port number into a
series of L{IAddress} objects.
@since: Twisted 17.1.0
"""
def resolveHostName(resolutionReceiver, hostName, portNumber=0,
addressTypes=None, transportSemantics='TCP'):
"""
Initiate a hostname resolution.
@param resolutionReceiver: an object that will receive each resolved
address as it arrives.
@type resolutionReceiver: L{IResolutionReceiver}
@param hostName: The name of the host to resolve. If this contains
non-ASCII code points, they will be converted to IDNA first.
@type hostName: L{unicode}
@param portNumber: The port number that the returned addresses should
include.
@type portNumber: L{int} greater than or equal to 0 and less than 65536
@param addressTypes: An iterable of implementors of L{IAddress} that
are acceptable values for C{resolutionReceiver} to receive to its
L{addressResolved <IResolutionReceiver.addressResolved>}. In
practice, this means an iterable containing
L{twisted.internet.address.IPv4Address},
L{twisted.internet.address.IPv6Address}, both, or neither.
@type addressTypes: L{collections.Iterable} of L{type}
@param transportSemantics: A string describing the semantics of the
transport; either C{'TCP'} for stream-oriented transports or
C{'UDP'} for datagram-oriented; see
L{twisted.internet.address.IPv6Address.type} and
L{twisted.internet.address.IPv4Address.type}.
@type transportSemantics: native L{str}
@return: The resolution in progress.
@rtype: L{IResolutionReceiver}
"""
class IResolver(IResolverSimple):
def query(query, timeout=None):
"""
Dispatch C{query} to the method which can handle its type.
@type query: L{twisted.names.dns.Query}
@param query: The DNS query being issued, to which a response is to be
generated.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAddress(name, timeout=None):
"""
Perform an A record lookup.
@type name: L{bytes}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAddress6(name, timeout=None):
"""
Perform an A6 record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupIPV6Address(name, timeout=None):
"""
Perform an AAAA record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailExchange(name, timeout=None):
"""
Perform an MX record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupNameservers(name, timeout=None):
"""
Perform an NS record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupCanonicalName(name, timeout=None):
"""
Perform a CNAME record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailBox(name, timeout=None):
"""
Perform an MB record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailGroup(name, timeout=None):
"""
Perform an MG record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailRename(name, timeout=None):
"""
Perform an MR record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupPointer(name, timeout=None):
"""
Perform a PTR record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAuthority(name, timeout=None):
"""
Perform an SOA record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupNull(name, timeout=None):
"""
Perform a NULL record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupWellKnownServices(name, timeout=None):
"""
Perform a WKS record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupHostInfo(name, timeout=None):
"""
Perform a HINFO record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailboxInfo(name, timeout=None):
"""
Perform an MINFO record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupText(name, timeout=None):
"""
Perform a TXT record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupResponsibility(name, timeout=None):
"""
Perform an RP record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAFSDatabase(name, timeout=None):
"""
Perform an AFSDB record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupService(name, timeout=None):
"""
Perform an SRV record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAllRecords(name, timeout=None):
"""
Perform an ALL_RECORD lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupSenderPolicy(name, timeout= 10):
"""
Perform a SPF record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupNamingAuthorityPointer(name, timeout=None):
"""
Perform a NAPTR record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupZone(name, timeout=None):
"""
Perform an AXFR record lookup.
NB This is quite different from other DNS requests. See
U{http://cr.yp.to/djbdns/axfr-notes.html} for more
information.
NB Unlike other C{lookup*} methods, the timeout here is not a
list of ints, it is a single int.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: C{int}
@param timeout: When this timeout expires, the query is
considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances.
The first element of the tuple gives answers.
The second and third elements are always empty.
The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
class IReactorTCP(Interface):
def listenTCP(port, factory, backlog=50, interface=''):
"""
Connects a given protocol factory to the given numeric TCP/IP port.
@param port: a port number on which to listen
@param factory: a L{twisted.internet.protocol.ServerFactory} instance
@param backlog: size of the listen queue
@param interface: The local IPv4 or IPv6 address to which to bind;
defaults to '', ie all IPv4 addresses. To bind to all IPv4 and IPv6
addresses, you must call this method twice.
@return: an object that provides L{IListeningPort}.
@raise CannotListenError: as defined here
L{twisted.internet.error.CannotListenError},
if it cannot listen on this port (e.g., it
cannot bind to the required port number)
"""
def connectTCP(host, port, factory, timeout=30, bindAddress=None):
"""
Connect a TCP client.
@param host: A hostname or an IPv4 or IPv6 address literal.
@type host: L{bytes}
@param port: a port number
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port) tuple of local address to bind
to, or None.
@return: An object which provides L{IConnector}. This connector will
call various callbacks on the factory when a connection is
made, failed, or lost - see
L{ClientFactory<twisted.internet.protocol.ClientFactory>}
docs for details.
"""
class IReactorSSL(Interface):
def connectSSL(host, port, factory, contextFactory, timeout=30, bindAddress=None):
"""
Connect a client Protocol to a remote SSL socket.
@param host: a host name
@param port: a port number
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param contextFactory: a L{twisted.internet.ssl.ClientContextFactory} object.
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port) tuple of local address to bind to,
or L{None}.
@return: An object which provides L{IConnector}.
"""
def listenSSL(port, factory, contextFactory, backlog=50, interface=''):
"""
Connects a given protocol factory to the given numeric TCP/IP port.
The connection is a SSL one, using contexts created by the context
factory.
@param port: a port number on which to listen
@param factory: a L{twisted.internet.protocol.ServerFactory} instance
@param contextFactory: an implementor of L{IOpenSSLContextFactory}
@param backlog: size of the listen queue
@param interface: the hostname to bind to, defaults to '' (all)
"""
class IReactorUNIX(Interface):
"""
UNIX socket methods.
"""
def connectUNIX(address, factory, timeout=30, checkPID=0):
"""
Connect a client protocol to a UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param timeout: number of seconds to wait before assuming the connection
has failed.
@param checkPID: if True, check for a pid file to verify that a server
is listening. If C{address} is a Linux abstract namespace path,
this must be C{False}.
@return: An object which provides L{IConnector}.
"""
def listenUNIX(address, factory, backlog=50, mode=0o666, wantPID=0):
"""
Listen on a UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param factory: a L{twisted.internet.protocol.Factory} instance.
@param backlog: number of connections to allow in backlog.
@param mode: The mode (B{not} umask) to set on the unix socket. See
platform specific documentation for information about how this
might affect connection attempts.
@type mode: C{int}
@param wantPID: if True, create a pidfile for the socket. If C{address}
is a Linux abstract namespace path, this must be C{False}.
@return: An object which provides L{IListeningPort}.
"""
class IReactorUNIXDatagram(Interface):
"""
Datagram UNIX socket methods.
"""
def connectUNIXDatagram(address, protocol, maxPacketSize=8192, mode=0o666, bindAddress=None):
"""
Connect a client protocol to a datagram UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param protocol: a L{twisted.internet.protocol.ConnectedDatagramProtocol} instance
@param maxPacketSize: maximum packet size to accept
@param mode: The mode (B{not} umask) to set on the unix socket. See
platform specific documentation for information about how this
might affect connection attempts.
@type mode: C{int}
@param bindAddress: address to bind to
@return: An object which provides L{IConnector}.
"""
def listenUNIXDatagram(address, protocol, maxPacketSize=8192, mode=0o666):
"""
Listen on a datagram UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param protocol: a L{twisted.internet.protocol.DatagramProtocol} instance.
@param maxPacketSize: maximum packet size to accept
@param mode: The mode (B{not} umask) to set on the unix socket. See
platform specific documentation for information about how this
might affect connection attempts.
@type mode: C{int}
@return: An object which provides L{IListeningPort}.
"""
class IReactorWin32Events(Interface):
"""
Win32 Event API methods
@since: 10.2
"""
def addEvent(event, fd, action):
"""
Add a new win32 event to the event loop.
@param event: a Win32 event object created using win32event.CreateEvent()
@param fd: an instance of L{twisted.internet.abstract.FileDescriptor}
@param action: a string that is a method name of the fd instance.
This method is called in response to the event.
@return: None
"""
def removeEvent(event):
"""
Remove an event.
@param event: a Win32 event object added using L{IReactorWin32Events.addEvent}
@return: None
"""
class IReactorUDP(Interface):
"""
UDP socket methods.
"""
def listenUDP(port, protocol, interface='', maxPacketSize=8192):
"""
Connects a given L{DatagramProtocol} to the given numeric UDP port.
@param port: A port number on which to listen.
@type port: C{int}
@param protocol: A L{DatagramProtocol} instance which will be
connected to the given C{port}.
@type protocol: L{DatagramProtocol}
@param interface: The local IPv4 or IPv6 address to which to bind;
defaults to '', ie all IPv4 addresses.
@type interface: C{str}
@param maxPacketSize: The maximum packet size to accept.
@type maxPacketSize: C{int}
@return: object which provides L{IListeningPort}.
"""
class IReactorMulticast(Interface):
"""
UDP socket methods that support multicast.
IMPORTANT: This is an experimental new interface. It may change
without backwards compatibility. Suggestions are welcome.
"""
def listenMulticast(port, protocol, interface='', maxPacketSize=8192,
listenMultiple=False):
"""
Connects a given
L{DatagramProtocol<twisted.internet.protocol.DatagramProtocol>} to the
given numeric UDP port.
@param listenMultiple: If set to True, allows multiple sockets to
bind to the same address and port number at the same time.
@type listenMultiple: C{bool}
@returns: An object which provides L{IListeningPort}.
@see: L{twisted.internet.interfaces.IMulticastTransport}
@see: U{http://twistedmatrix.com/documents/current/core/howto/udp.html}
"""
class IReactorSocket(Interface):
"""
Methods which allow a reactor to use externally created sockets.
For example, to use C{adoptStreamPort} to implement behavior equivalent
to that of L{IReactorTCP.listenTCP}, you might write code like this::
from socket import SOMAXCONN, AF_INET, SOCK_STREAM, socket
portSocket = socket(AF_INET, SOCK_STREAM)
# Set FD_CLOEXEC on port, left as an exercise. Then make it into a
# non-blocking listening port:
portSocket.setblocking(False)
portSocket.bind(('192.168.1.2', 12345))
portSocket.listen(SOMAXCONN)
# Now have the reactor use it as a TCP port
port = reactor.adoptStreamPort(
portSocket.fileno(), AF_INET, YourFactory())
# portSocket itself is no longer necessary, and needs to be cleaned
# up by us.
portSocket.close()
# Whenever the server is no longer needed, stop it as usual.
stoppedDeferred = port.stopListening()
Another potential use is to inherit a listening descriptor from a parent
process (for example, systemd or launchd), or to receive one over a UNIX
domain socket.
Some plans for extending this interface exist. See:
- U{http://twistedmatrix.com/trac/ticket/6594}: AF_UNIX SOCK_DGRAM ports
"""
def adoptStreamPort(fileDescriptor, addressFamily, factory):
"""
Add an existing listening I{SOCK_STREAM} socket to the reactor to
monitor for new connections to accept and handle.
@param fileDescriptor: A file descriptor associated with a socket which
is already bound to an address and marked as listening. The socket
must be set non-blocking. Any additional flags (for example,
close-on-exec) must also be set by application code. Application
code is responsible for closing the file descriptor, which may be
done as soon as C{adoptStreamPort} returns.
@type fileDescriptor: C{int}
@param addressFamily: The address family (or I{domain}) of the socket.
For example, L{socket.AF_INET6}.
@param factory: A L{ServerFactory} instance to use to create new
protocols to handle connections accepted via this socket.
@return: An object providing L{IListeningPort}.
@raise twisted.internet.error.UnsupportedAddressFamily: If the
given address family is not supported by this reactor, or
not supported with the given socket type.
@raise twisted.internet.error.UnsupportedSocketType: If the
given socket type is not supported by this reactor, or not
supported with the given socket type.
"""
def adoptStreamConnection(fileDescriptor, addressFamily, factory):
"""
Add an existing connected I{SOCK_STREAM} socket to the reactor to
monitor for data.
Note that the given factory won't have its C{startFactory} and
C{stopFactory} methods called, as there is no sensible time to call
them in this situation.
@param fileDescriptor: A file descriptor associated with a socket which
is already connected. The socket must be set non-blocking. Any
additional flags (for example, close-on-exec) must also be set by
application code. Application code is responsible for closing the
file descriptor, which may be done as soon as
C{adoptStreamConnection} returns.
@type fileDescriptor: C{int}
@param addressFamily: The address family (or I{domain}) of the socket.
For example, L{socket.AF_INET6}.
@param factory: A L{ServerFactory} instance to use to create a new
protocol to handle the connection via this socket.
@raise UnsupportedAddressFamily: If the given address family is not
supported by this reactor, or not supported with the given socket
type.
@raise UnsupportedSocketType: If the given socket type is not supported
by this reactor, or not supported with the given socket type.
"""
def adoptDatagramPort(fileDescriptor, addressFamily, protocol,
maxPacketSize=8192):
"""
Add an existing listening I{SOCK_DGRAM} socket to the reactor to
monitor for read and write readiness.
@param fileDescriptor: A file descriptor associated with a socket which
is already bound to an address and marked as listening. The socket
must be set non-blocking. Any additional flags (for example,
close-on-exec) must also be set by application code. Application
code is responsible for closing the file descriptor, which may be
done as soon as C{adoptDatagramPort} returns.
@type fileDescriptor: C{int}
@param addressFamily: The address family or I{domain} of the socket.
For example, L{socket.AF_INET6}.
@type addressFamily: C{int}
@param protocol: A L{DatagramProtocol} instance to connect to
a UDP transport.
@type protocol: L{DatagramProtocol}
@param maxPacketSize: The maximum packet size to accept.
@type maxPacketSize: C{int}
@return: An object providing L{IListeningPort}.
@raise UnsupportedAddressFamily: If the given address family is not
supported by this reactor, or not supported with the given socket
type.
@raise UnsupportedSocketType: If the given socket type is not supported
by this reactor, or not supported with the given socket type.
"""
class IReactorProcess(Interface):
def spawnProcess(processProtocol, executable, args=(), env={}, path=None,
uid=None, gid=None, usePTY=0, childFDs=None):
"""
Spawn a process, with a process protocol.
Arguments given to this function that are listed as L{bytes} or
L{unicode} may be encoded or decoded depending on the platform and the
argument type given. On UNIX systems (Linux, FreeBSD, macOS) and
Python 2 on Windows, L{unicode} arguments will be encoded down to
L{bytes} using the encoding given by L{os.getfilesystemencoding}, to be
used with the "narrow" OS APIs. On Python 3 on Windows, L{bytes}
arguments will be decoded up to L{unicode} using the encoding given by
L{os.getfilesystemencoding} (C{mbcs} before Python 3.6, C{utf8}
thereafter) and given to Windows's native "wide" APIs.
@type processProtocol: L{IProcessProtocol} provider
@param processProtocol: An object which will be notified of all events
related to the created process.
@param executable: the file name to spawn - the full path should be
used.
@type executable: L{bytes} or L{unicode}
@param args: the command line arguments to pass to the process; a
sequence of strings. The first string should be the executable's
name.
@type args: L{list} with L{bytes} or L{unicode} items.
@type env: a L{dict} mapping L{bytes}/L{unicode} keys to
L{bytes}/L{unicode} items, or L{None}.
@param env: the environment variables to pass to the child process.
The resulting behavior varies between platforms. If:
- C{env} is not set:
- On POSIX: pass an empty environment.
- On Windows: pass L{os.environ}.
- C{env} is L{None}:
- On POSIX: pass L{os.environ}.
- On Windows: pass L{os.environ}.
- C{env} is a L{dict}:
- On POSIX: pass the key/value pairs in C{env} as the
complete environment.
- On Windows: update L{os.environ} with the key/value
pairs in the L{dict} before passing it. As a
consequence of U{bug #1640
<http://twistedmatrix.com/trac/ticket/1640>}, passing
keys with empty values in an effort to unset
environment variables I{won't} unset them.
@param path: the path to run the subprocess in - defaults to the
current directory.
@type path: L{bytes} or L{unicode} or L{None}
@param uid: user ID to run the subprocess as. (Only available on POSIX
systems.)
@param gid: group ID to run the subprocess as. (Only available on
POSIX systems.)
@param usePTY: if true, run this process in a pseudo-terminal.
optionally a tuple of C{(masterfd, slavefd, ttyname)}, in which
case use those file descriptors. (Not available on all systems.)
@param childFDs: A dictionary mapping file descriptors in the new child
process to an integer or to the string 'r' or 'w'.
If the value is an integer, it specifies a file descriptor in the
parent process which will be mapped to a file descriptor (specified
by the key) in the child process. This is useful for things like
inetd and shell-like file redirection.
If it is the string 'r', a pipe will be created and attached to the
child at that file descriptor: the child will be able to write to
that file descriptor and the parent will receive read notification
via the L{IProcessProtocol.childDataReceived} callback. This is
useful for the child's stdout and stderr.
If it is the string 'w', similar setup to the previous case will
occur, with the pipe being readable by the child instead of
writeable. The parent process can write to that file descriptor
using L{IProcessTransport.writeToChild}. This is useful for the
child's stdin.
If childFDs is not passed, the default behaviour is to use a
mapping that opens the usual stdin/stdout/stderr pipes.
@type childFDs: L{dict} of L{int} to L{int} or L{str}
@see: L{twisted.internet.protocol.ProcessProtocol}
@return: An object which provides L{IProcessTransport}.
@raise OSError: Raised with errno C{EAGAIN} or C{ENOMEM} if there are
insufficient system resources to create a new process.
"""
class IReactorTime(Interface):
"""
Time methods that a Reactor should implement.
"""
def seconds():
"""
Get the current time in seconds.
@return: A number-like object of some sort.
"""
def callLater(delay, callable, *args, **kw):
"""
Call a function later.
@type delay: C{float}
@param delay: the number of seconds to wait.
@param callable: the callable object to call later.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: An object which provides L{IDelayedCall} and can be used to
cancel the scheduled call, by calling its C{cancel()} method.
It also may be rescheduled by calling its C{delay()} or
C{reset()} methods.
"""
def getDelayedCalls():
"""
Retrieve all currently scheduled delayed calls.
@return: A tuple of all L{IDelayedCall} providers representing all
currently scheduled calls. This is everything that has been
returned by C{callLater} but not yet called or canceled.
"""
class IDelayedCall(Interface):
"""
A scheduled call.
There are probably other useful methods we can add to this interface;
suggestions are welcome.
"""
def getTime():
"""
Get time when delayed call will happen.
@return: time in seconds since epoch (a float).
"""
def cancel():
"""
Cancel the scheduled call.
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def delay(secondsLater):
"""
Delay the scheduled call.
@param secondsLater: how many seconds from its current firing time to delay
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def reset(secondsFromNow):
"""
Reset the scheduled call's timer.
@param secondsFromNow: how many seconds from now it should fire,
equivalent to C{.cancel()} and then doing another
C{reactor.callLater(secondsLater, ...)}
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def active():
"""
@return: True if this call is still active, False if it has been
called or cancelled.
"""
class IReactorFromThreads(Interface):
"""
This interface is the set of thread-safe methods which may be invoked on
the reactor from other threads.
@since: 15.4
"""
def callFromThread(callable, *args, **kw):
"""
Cause a function to be executed by the reactor thread.
Use this method when you want to run a function in the reactor's thread
from another thread. Calling L{callFromThread} should wake up the main
thread (where L{reactor.run() <IReactorCore.run>} is executing) and run
the given callable in that thread.
If you're writing a multi-threaded application the C{callable} may need
to be thread safe, but this method doesn't require it as such. If you
want to call a function in the next mainloop iteration, but you're in
the same thread, use L{callLater} with a delay of 0.
"""
class IReactorInThreads(Interface):
"""
This interface contains the methods exposed by a reactor which will let you
run functions in another thread.
@since: 15.4
"""
def callInThread(callable, *args, **kwargs):
"""
Run the given callable object in a separate thread, with the given
arguments and keyword arguments.
"""
class IReactorThreads(IReactorFromThreads, IReactorInThreads):
"""
Dispatch methods to be run in threads.
Internally, this should use a thread pool and dispatch methods to them.
"""
def getThreadPool():
"""
Return the threadpool used by L{IReactorInThreads.callInThread}.
Create it first if necessary.
@rtype: L{twisted.python.threadpool.ThreadPool}
"""
def suggestThreadPoolSize(size):
"""
Suggest the size of the internal threadpool used to dispatch functions
passed to L{IReactorInThreads.callInThread}.
"""
class IReactorCore(Interface):
"""
Core methods that a Reactor must implement.
"""
running = Attribute(
"A C{bool} which is C{True} from I{during startup} to "
"I{during shutdown} and C{False} the rest of the time.")
def resolve(name, timeout=10):
"""
Return a L{twisted.internet.defer.Deferred} that will resolve a hostname.
"""
def run():
"""
Fire 'startup' System Events, move the reactor to the 'running'
state, then run the main loop until it is stopped with C{stop()} or
C{crash()}.
"""
def stop():
"""
Fire 'shutdown' System Events, which will move the reactor to the
'stopped' state and cause C{reactor.run()} to exit.
"""
def crash():
"""
Stop the main loop *immediately*, without firing any system events.
This is named as it is because this is an extremely "rude" thing to do;
it is possible to lose data and put your system in an inconsistent
state by calling this. However, it is necessary, as sometimes a system
can become wedged in a pre-shutdown call.
"""
def iterate(delay=0):
"""
Run the main loop's I/O polling function for a period of time.
This is most useful in applications where the UI is being drawn "as
fast as possible", such as games. All pending L{IDelayedCall}s will
be called.
The reactor must have been started (via the C{run()} method) prior to
any invocations of this method. It must also be stopped manually
after the last call to this method (via the C{stop()} method). This
method is not re-entrant: you must not call it recursively; in
particular, you must not call it while the reactor is running.
"""
def fireSystemEvent(eventType):
"""
Fire a system-wide event.
System-wide events are things like 'startup', 'shutdown', and
'persist'.
"""
def addSystemEventTrigger(phase, eventType, callable, *args, **kw):
"""
Add a function to be called when a system event occurs.
Each "system event" in Twisted, such as 'startup', 'shutdown', and
'persist', has 3 phases: 'before', 'during', and 'after' (in that
order, of course). These events will be fired internally by the
Reactor.
An implementor of this interface must only implement those events
described here.
Callbacks registered for the "before" phase may return either None or a
Deferred. The "during" phase will not execute until all of the
Deferreds from the "before" phase have fired.
Once the "during" phase is running, all of the remaining triggers must
execute; their return values must be ignored.
@param phase: a time to call the event -- either the string 'before',
'after', or 'during', describing when to call it
relative to the event's execution.
@param eventType: this is a string describing the type of event.
@param callable: the object to call before shutdown.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: an ID that can be used to remove this call with
removeSystemEventTrigger.
"""
def removeSystemEventTrigger(triggerID):
"""
Removes a trigger added with addSystemEventTrigger.
@param triggerID: a value returned from addSystemEventTrigger.
@raise KeyError: If there is no system event trigger for the given
C{triggerID}.
@raise ValueError: If there is no system event trigger for the given
C{triggerID}.
@raise TypeError: If there is no system event trigger for the given
C{triggerID}.
"""
def callWhenRunning(callable, *args, **kw):
"""
Call a function when the reactor is running.
If the reactor has not started, the callable will be scheduled
to run when it does start. Otherwise, the callable will be invoked
immediately.
@param callable: the callable object to call later.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: None if the callable was invoked, otherwise a system
event id for the scheduled call.
"""
class IReactorPluggableResolver(Interface):
"""
An L{IReactorPluggableResolver} is a reactor which can be customized with
an L{IResolverSimple}. This is a fairly limited interface, that supports
only IPv4; you should use L{IReactorPluggableNameResolver} instead.
@see: L{IReactorPluggableNameResolver}
"""
def installResolver(resolver):
"""
Set the internal resolver to use to for name lookups.
@type resolver: An object implementing the L{IResolverSimple} interface
@param resolver: The new resolver to use.
@return: The previously installed resolver.
@rtype: L{IResolverSimple}
"""
class IReactorPluggableNameResolver(Interface):
"""
An L{IReactorPluggableNameResolver} is a reactor whose name resolver can be
set to a user-supplied object.
"""
nameResolver = Attribute(
"""
Read-only attribute; the resolver installed with L{installResolver}.
An L{IHostnameResolver}.
"""
)
def installNameResolver(resolver):
"""
Set the internal resolver to use for name lookups.
@type resolver: An object providing the L{IHostnameResolver} interface.
@param resolver: The new resolver to use.
@return: The previously installed resolver.
@rtype: L{IHostnameResolver}
"""
class IReactorDaemonize(Interface):
"""
A reactor which provides hooks that need to be called before and after
daemonization.
Notes:
- This interface SHOULD NOT be called by applications.
- This interface should only be implemented by reactors as a workaround
(in particular, it's implemented currently only by kqueue()).
For details please see the comments on ticket #1918.
"""
def beforeDaemonize():
"""
Hook to be called immediately before daemonization. No reactor methods
may be called until L{afterDaemonize} is called.
@return: L{None}.
"""
def afterDaemonize():
"""
Hook to be called immediately after daemonization. This may only be
called after L{beforeDaemonize} had been called previously.
@return: L{None}.
"""
class IReactorFDSet(Interface):
"""
Implement me to be able to use L{IFileDescriptor} type resources.
This assumes that your main-loop uses UNIX-style numeric file descriptors
(or at least similarly opaque IDs returned from a .fileno() method)
"""
def addReader(reader):
"""
I add reader to the set of file descriptors to get read events for.
@param reader: An L{IReadDescriptor} provider that will be checked for
read events until it is removed from the reactor with
L{removeReader}.
@return: L{None}.
"""
def addWriter(writer):
"""
I add writer to the set of file descriptors to get write events for.
@param writer: An L{IWriteDescriptor} provider that will be checked for
write events until it is removed from the reactor with
L{removeWriter}.
@return: L{None}.
"""
def removeReader(reader):
"""
Removes an object previously added with L{addReader}.
@return: L{None}.
"""
def removeWriter(writer):
"""
Removes an object previously added with L{addWriter}.
@return: L{None}.
"""
def removeAll():
"""
Remove all readers and writers.
Should not remove reactor internal reactor connections (like a waker).
@return: A list of L{IReadDescriptor} and L{IWriteDescriptor} providers
which were removed.
"""
def getReaders():
"""
Return the list of file descriptors currently monitored for input
events by the reactor.
@return: the list of file descriptors monitored for input events.
@rtype: C{list} of C{IReadDescriptor}
"""
def getWriters():
"""
Return the list file descriptors currently monitored for output events
by the reactor.
@return: the list of file descriptors monitored for output events.
@rtype: C{list} of C{IWriteDescriptor}
"""
class IListeningPort(Interface):
"""
A listening port.
"""
def startListening():
"""
Start listening on this port.
@raise CannotListenError: If it cannot listen on this port (e.g., it is
a TCP port and it cannot bind to the required
port number).
"""
def stopListening():
"""
Stop listening on this port.
If it does not complete immediately, will return Deferred that fires
upon completion.
"""
def getHost():
"""
Get the host that this port is listening for.
@return: An L{IAddress} provider.
"""
class ILoggingContext(Interface):
"""
Give context information that will be used to log events generated by
this item.
"""
def logPrefix():
"""
@return: Prefix used during log formatting to indicate context.
@rtype: C{str}
"""
class IFileDescriptor(ILoggingContext):
"""
An interface representing a UNIX-style numeric file descriptor.
"""
def fileno():
"""
@raise: If the descriptor no longer has a valid file descriptor
number associated with it.
@return: The platform-specified representation of a file descriptor
number. Or C{-1} if the descriptor no longer has a valid file
descriptor number associated with it. As long as the descriptor
is valid, calls to this method on a particular instance must
return the same value.
"""
def connectionLost(reason):
"""
Called when the connection was lost.
This is called when the connection on a selectable object has been
lost. It will be called whether the connection was closed explicitly,
an exception occurred in an event handler, or the other end of the
connection closed it first.
See also L{IHalfCloseableDescriptor} if your descriptor wants to be
notified separately of the two halves of the connection being closed.
@param reason: A failure instance indicating the reason why the
connection was lost. L{error.ConnectionLost} and
L{error.ConnectionDone} are of special note, but the
failure may be of other classes as well.
"""
class IReadDescriptor(IFileDescriptor):
"""
An L{IFileDescriptor} that can read.
This interface is generally used in conjunction with L{IReactorFDSet}.
"""
def doRead():
"""
Some data is available for reading on your descriptor.
@return: If an error is encountered which causes the descriptor to
no longer be valid, a L{Failure} should be returned. Otherwise,
L{None}.
"""
class IWriteDescriptor(IFileDescriptor):
"""
An L{IFileDescriptor} that can write.
This interface is generally used in conjunction with L{IReactorFDSet}.
"""
def doWrite():
"""
Some data can be written to your descriptor.
@return: If an error is encountered which causes the descriptor to
no longer be valid, a L{Failure} should be returned. Otherwise,
L{None}.
"""
class IReadWriteDescriptor(IReadDescriptor, IWriteDescriptor):
"""
An L{IFileDescriptor} that can both read and write.
"""
class IHalfCloseableDescriptor(Interface):
"""
A descriptor that can be half-closed.
"""
def writeConnectionLost(reason):
"""
Indicates write connection was lost.
"""
def readConnectionLost(reason):
"""
Indicates read connection was lost.
"""
class ISystemHandle(Interface):
"""
An object that wraps a networking OS-specific handle.
"""
def getHandle():
"""
Return a system- and reactor-specific handle.
This might be a socket.socket() object, or some other type of
object, depending on which reactor is being used. Use and
manipulate at your own risk.
This might be used in cases where you want to set specific
options not exposed by the Twisted APIs.
"""
class IConsumer(Interface):
"""
A consumer consumes data from a producer.
"""
def registerProducer(producer, streaming):
"""
Register to receive data from a producer.
This sets self to be a consumer for a producer. When this object runs
out of data (as when a send(2) call on a socket succeeds in moving the
last data from a userspace buffer into a kernelspace buffer), it will
ask the producer to resumeProducing().
For L{IPullProducer} providers, C{resumeProducing} will be called once
each time data is required.
For L{IPushProducer} providers, C{pauseProducing} will be called
whenever the write buffer fills up and C{resumeProducing} will only be
called when it empties.
@type producer: L{IProducer} provider
@type streaming: C{bool}
@param streaming: C{True} if C{producer} provides L{IPushProducer},
C{False} if C{producer} provides L{IPullProducer}.
@raise RuntimeError: If a producer is already registered.
@return: L{None}
"""
def unregisterProducer():
"""
Stop consuming data from a producer, without disconnecting.
"""
def write(data):
"""
The producer will write data by calling this method.
The implementation must be non-blocking and perform whatever
buffering is necessary. If the producer has provided enough data
for now and it is a L{IPushProducer}, the consumer may call its
C{pauseProducing} method.
"""
class IProducer(Interface):
"""
A producer produces data for a consumer.
Typically producing is done by calling the write method of a class
implementing L{IConsumer}.
"""
def stopProducing():
"""
Stop producing data.
This tells a producer that its consumer has died, so it must stop
producing data for good.
"""
class IPushProducer(IProducer):
"""
A push producer, also known as a streaming producer is expected to
produce (write to this consumer) data on a continuous basis, unless
it has been paused. A paused push producer will resume producing
after its resumeProducing() method is called. For a push producer
which is not pauseable, these functions may be noops.
"""
def pauseProducing():
"""
Pause producing data.
Tells a producer that it has produced too much data to process for
the time being, and to stop until resumeProducing() is called.
"""
def resumeProducing():
"""
Resume producing data.
This tells a producer to re-add itself to the main loop and produce
more data for its consumer.
"""
class IPullProducer(IProducer):
"""
A pull producer, also known as a non-streaming producer, is
expected to produce data each time resumeProducing() is called.
"""
def resumeProducing():
"""
Produce data for the consumer a single time.
This tells a producer to produce data for the consumer once
(not repeatedly, once only). Typically this will be done
by calling the consumer's write() method a single time with
produced data.
"""
class IProtocol(Interface):
def dataReceived(data):
"""
Called whenever data is received.
Use this method to translate to a higher-level message. Usually, some
callback will be made upon the receipt of each complete protocol
message.
Please keep in mind that you will probably need to buffer some data
as partial (or multiple) protocol messages may be received! We
recommend that unit tests for protocols call through to this method
with differing chunk sizes, down to one byte at a time.
@param data: bytes of indeterminate length
@type data: L{bytes}
"""
def connectionLost(reason):
"""
Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed. The C{reason}
Failure wraps a L{twisted.internet.error.ConnectionDone} or
L{twisted.internet.error.ConnectionLost} instance (or a subclass
of one of those).
@type reason: L{twisted.python.failure.Failure}
"""
def makeConnection(transport):
"""
Make a connection to a transport and a server.
"""
def connectionMade():
"""
Called when a connection is made.
This may be considered the initializer of the protocol, because
it is called when the connection is completed. For clients,
this is called once the connection to the server has been
established; for servers, this is called after an accept() call
stops blocking and a socket has been received. If you need to
send any greeting or initial message, do it here.
"""
class IProcessProtocol(Interface):
"""
Interface for process-related event handlers.
"""
def makeConnection(process):
"""
Called when the process has been created.
@type process: L{IProcessTransport} provider
@param process: An object representing the process which has been
created and associated with this protocol.
"""
def childDataReceived(childFD, data):
"""
Called when data arrives from the child process.
@type childFD: L{int}
@param childFD: The file descriptor from which the data was
received.
@type data: L{bytes}
@param data: The data read from the child's file descriptor.
"""
def childConnectionLost(childFD):
"""
Called when a file descriptor associated with the child process is
closed.
@type childFD: C{int}
@param childFD: The file descriptor which was closed.
"""
def processExited(reason):
"""
Called when the child process exits.
@type reason: L{twisted.python.failure.Failure}
@param reason: A failure giving the reason the child process
terminated. The type of exception for this failure is either
L{twisted.internet.error.ProcessDone} or
L{twisted.internet.error.ProcessTerminated}.
@since: 8.2
"""
def processEnded(reason):
"""
Called when the child process exits and all file descriptors associated
with it have been closed.
@type reason: L{twisted.python.failure.Failure}
@param reason: A failure giving the reason the child process
terminated. The type of exception for this failure is either
L{twisted.internet.error.ProcessDone} or
L{twisted.internet.error.ProcessTerminated}.
"""
class IHalfCloseableProtocol(Interface):
"""
Implemented to indicate they want notification of half-closes.
TCP supports the notion of half-closing the connection, e.g.
closing the write side but still not stopping reading. A protocol
that implements this interface will be notified of such events,
instead of having connectionLost called.
"""
def readConnectionLost():
"""
Notification of the read connection being closed.
This indicates peer did half-close of write side. It is now
the responsibility of the this protocol to call
loseConnection(). In addition, the protocol MUST make sure a
reference to it still exists (i.e. by doing a callLater with
one of its methods, etc.) as the reactor will only have a
reference to it if it is writing.
If the protocol does not do so, it might get garbage collected
without the connectionLost method ever being called.
"""
def writeConnectionLost():
"""
Notification of the write connection being closed.
This will never be called for TCP connections as TCP does not
support notification of this type of half-close.
"""
class IHandshakeListener(Interface):
"""
An interface implemented by a L{IProtocol} to indicate that it would like
to be notified when TLS handshakes complete when run over a TLS-based
transport.
This interface is only guaranteed to be called when run over a TLS-based
transport: non TLS-based transports will not respect this interface.
"""
def handshakeCompleted():
"""
Notification of the TLS handshake being completed.
This notification fires when OpenSSL has completed the TLS handshake.
At this point the TLS connection is established, and the protocol can
interrogate its transport (usually an L{ISSLTransport}) for details of
the TLS connection.
This notification *also* fires whenever the TLS session is
renegotiated. As a result, protocols that have certain minimum security
requirements should implement this interface to ensure that they are
able to re-evaluate the security of the TLS session if it changes.
"""
class IFileDescriptorReceiver(Interface):
"""
Protocols may implement L{IFileDescriptorReceiver} to receive file
descriptors sent to them. This is useful in conjunction with
L{IUNIXTransport}, which allows file descriptors to be sent between
processes on a single host.
"""
def fileDescriptorReceived(descriptor):
"""
Called when a file descriptor is received over the connection.
@param descriptor: The descriptor which was received.
@type descriptor: C{int}
@return: L{None}
"""
class IProtocolFactory(Interface):
"""
Interface for protocol factories.
"""
def buildProtocol(addr):
"""
Called when a connection has been established to addr.
If None is returned, the connection is assumed to have been refused,
and the Port will close the connection.
@type addr: (host, port)
@param addr: The address of the newly-established connection
@return: None if the connection was refused, otherwise an object
providing L{IProtocol}.
"""
def doStart():
"""
Called every time this is connected to a Port or Connector.
"""
def doStop():
"""
Called every time this is unconnected from a Port or Connector.
"""
class ITransport(Interface):
"""
I am a transport for bytes.
I represent (and wrap) the physical connection and synchronicity
of the framework which is talking to the network. I make no
representations about whether calls to me will happen immediately
or require returning to a control loop, or whether they will happen
in the same or another thread. Consider methods of this class
(aside from getPeer) to be 'thrown over the wall', to happen at some
indeterminate time.
"""
def write(data):
"""
Write some data to the physical connection, in sequence, in a
non-blocking fashion.
If possible, make sure that it is all written. No data will
ever be lost, although (obviously) the connection may be closed
before it all gets through.
@type data: L{bytes}
@param data: The data to write.
"""
def writeSequence(data):
"""
Write an iterable of byte strings to the physical connection.
If possible, make sure that all of the data is written to
the socket at once, without first copying it all into a
single byte string.
@type data: an iterable of L{bytes}
@param data: The data to write.
"""
def loseConnection():
"""
Close my connection, after writing all pending data.
Note that if there is a registered producer on a transport it
will not be closed until the producer has been unregistered.
"""
def getPeer():
"""
Get the remote address of this connection.
Treat this method with caution. It is the unfortunate result of the
CGI and Jabber standards, but should not be considered reliable for
the usual host of reasons; port forwarding, proxying, firewalls, IP
masquerading, etc.
@return: An L{IAddress} provider.
"""
def getHost():
"""
Similar to getPeer, but returns an address describing this side of the
connection.
@return: An L{IAddress} provider.
"""
class ITCPTransport(ITransport):
"""
A TCP based transport.
"""
def loseWriteConnection():
"""
Half-close the write side of a TCP connection.
If the protocol instance this is attached to provides
IHalfCloseableProtocol, it will get notified when the operation is
done. When closing write connection, as with loseConnection this will
only happen when buffer has emptied and there is no registered
producer.
"""
def abortConnection():
"""
Close the connection abruptly.
Discards any buffered data, stops any registered producer,
and, if possible, notifies the other end of the unclean
closure.
@since: 11.1
"""
def getTcpNoDelay():
"""
Return if C{TCP_NODELAY} is enabled.
"""
def setTcpNoDelay(enabled):
"""
Enable/disable C{TCP_NODELAY}.
Enabling C{TCP_NODELAY} turns off Nagle's algorithm. Small packets are
sent sooner, possibly at the expense of overall throughput.
"""
def getTcpKeepAlive():
"""
Return if C{SO_KEEPALIVE} is enabled.
"""
def setTcpKeepAlive(enabled):
"""
Enable/disable C{SO_KEEPALIVE}.
Enabling C{SO_KEEPALIVE} sends packets periodically when the connection
is otherwise idle, usually once every two hours. They are intended
to allow detection of lost peers in a non-infinite amount of time.
"""
def getHost():
"""
Returns L{IPv4Address} or L{IPv6Address}.
"""
def getPeer():
"""
Returns L{IPv4Address} or L{IPv6Address}.
"""
class IUNIXTransport(ITransport):
"""
Transport for stream-oriented unix domain connections.
"""
def sendFileDescriptor(descriptor):
"""
Send a duplicate of this (file, socket, pipe, etc) descriptor to the
other end of this connection.
The send is non-blocking and will be queued if it cannot be performed
immediately. The send will be processed in order with respect to other
C{sendFileDescriptor} calls on this transport, but not necessarily with
respect to C{write} calls on this transport. The send can only be
processed if there are also bytes in the normal connection-oriented send
buffer (ie, you must call C{write} at least as many times as you call
C{sendFileDescriptor}).
@param descriptor: An C{int} giving a valid file descriptor in this
process. Note that a I{file descriptor} may actually refer to a
socket, a pipe, or anything else POSIX tries to treat in the same
way as a file.
@return: L{None}
"""
class IOpenSSLServerConnectionCreator(Interface):
"""
A provider of L{IOpenSSLServerConnectionCreator} can create
L{OpenSSL.SSL.Connection} objects for TLS servers.
@see: L{twisted.internet.ssl}
@note: Creating OpenSSL connection objects is subtle, error-prone, and
security-critical. Before implementing this interface yourself,
consider using L{twisted.internet.ssl.CertificateOptions} as your
C{contextFactory}. (For historical reasons, that class does not
actually I{implement} this interface; nevertheless it is usable in all
Twisted APIs which require a provider of this interface.)
"""
def serverConnectionForTLS(tlsProtocol):
"""
Create a connection for the given server protocol.
@param tlsProtocol: the protocol server making the request.
@type tlsProtocol: L{twisted.protocols.tls.TLSMemoryBIOProtocol}.
@return: an OpenSSL connection object configured appropriately for the
given Twisted protocol.
@rtype: L{OpenSSL.SSL.Connection}
"""
class IOpenSSLClientConnectionCreator(Interface):
"""
A provider of L{IOpenSSLClientConnectionCreator} can create
L{OpenSSL.SSL.Connection} objects for TLS clients.
@see: L{twisted.internet.ssl}
@note: Creating OpenSSL connection objects is subtle, error-prone, and
security-critical. Before implementing this interface yourself,
consider using L{twisted.internet.ssl.optionsForClientTLS} as your
C{contextFactory}.
"""
def clientConnectionForTLS(tlsProtocol):
"""
Create a connection for the given client protocol.
@param tlsProtocol: the client protocol making the request.
@type tlsProtocol: L{twisted.protocols.tls.TLSMemoryBIOProtocol}.
@return: an OpenSSL connection object configured appropriately for the
given Twisted protocol.
@rtype: L{OpenSSL.SSL.Connection}
"""
class IProtocolNegotiationFactory(Interface):
"""
A provider of L{IProtocolNegotiationFactory} can provide information about
the various protocols that the factory can create implementations of. This
can be used, for example, to provide protocol names for Next Protocol
Negotiation and Application Layer Protocol Negotiation.
@see: L{twisted.internet.ssl}
"""
def acceptableProtocols():
"""
Returns a list of protocols that can be spoken by the connection
factory in the form of ALPN tokens, as laid out in the IANA registry
for ALPN tokens.
@return: a list of ALPN tokens in order of preference.
@rtype: L{list} of L{bytes}
"""
class IOpenSSLContextFactory(Interface):
"""
A provider of L{IOpenSSLContextFactory} is capable of generating
L{OpenSSL.SSL.Context} classes suitable for configuring TLS on a
connection. A provider will store enough state to be able to generate these
contexts as needed for individual connections.
@see: L{twisted.internet.ssl}
"""
def getContext():
"""
Returns a TLS context object, suitable for securing a TLS connection.
This context object will be appropriately customized for the connection
based on the state in this object.
@return: A TLS context object.
@rtype: L{OpenSSL.SSL.Context}
"""
class ITLSTransport(ITCPTransport):
"""
A TCP transport that supports switching to TLS midstream.
Once TLS mode is started the transport will implement L{ISSLTransport}.
"""
def startTLS(contextFactory):
"""
Initiate TLS negotiation.
@param contextFactory: An object which creates appropriately configured
TLS connections.
For clients, use L{twisted.internet.ssl.optionsForClientTLS}; for
servers, use L{twisted.internet.ssl.CertificateOptions}.
@type contextFactory: L{IOpenSSLClientConnectionCreator} or
L{IOpenSSLServerConnectionCreator}, depending on whether this
L{ITLSTransport} is a server or not. If the appropriate interface
is not provided by the value given for C{contextFactory}, it must
be an implementor of L{IOpenSSLContextFactory}.
"""
class ISSLTransport(ITCPTransport):
"""
A SSL/TLS based transport.
"""
def getPeerCertificate():
"""
Return an object with the peer's certificate info.
"""
class INegotiated(ISSLTransport):
"""
A TLS based transport that supports using ALPN/NPN to negotiate the
protocol to be used inside the encrypted tunnel.
"""
negotiatedProtocol = Attribute(
"""
The protocol selected to be spoken using ALPN/NPN. The result from ALPN
is preferred to the result from NPN if both were used. If the remote
peer does not support ALPN or NPN, or neither NPN or ALPN are available
on this machine, will be L{None}. Otherwise, will be the name of the
selected protocol as C{bytes}. Note that until the handshake has
completed this property may incorrectly return L{None}: wait until data
has been received before trusting it (see
https://twistedmatrix.com/trac/ticket/6024).
"""
)
class ICipher(Interface):
"""
A TLS cipher.
"""
fullName = Attribute(
"The fully qualified name of the cipher in L{unicode}."
)
class IAcceptableCiphers(Interface):
"""
A list of acceptable ciphers for a TLS context.
"""
def selectCiphers(availableCiphers):
"""
Choose which ciphers to allow to be negotiated on a TLS connection.
@param availableCiphers: A L{list} of L{ICipher} which gives the names
of all ciphers supported by the TLS implementation in use.
@return: A L{list} of L{ICipher} which represents the ciphers
which may be negotiated on the TLS connection. The result is
ordered by preference with more preferred ciphers appearing
earlier.
"""
class IProcessTransport(ITransport):
"""
A process transport.
"""
pid = Attribute(
"From before L{IProcessProtocol.makeConnection} is called to before "
"L{IProcessProtocol.processEnded} is called, C{pid} is an L{int} "
"giving the platform process ID of this process. C{pid} is L{None} "
"at all other times.")
def closeStdin():
"""
Close stdin after all data has been written out.
"""
def closeStdout():
"""
Close stdout.
"""
def closeStderr():
"""
Close stderr.
"""
def closeChildFD(descriptor):
"""
Close a file descriptor which is connected to the child process, identified
by its FD in the child process.
"""
def writeToChild(childFD, data):
"""
Similar to L{ITransport.write} but also allows the file descriptor in
the child process which will receive the bytes to be specified.
@type childFD: L{int}
@param childFD: The file descriptor to which to write.
@type data: L{bytes}
@param data: The bytes to write.
@return: L{None}
@raise KeyError: If C{childFD} is not a file descriptor that was mapped
in the child when L{IReactorProcess.spawnProcess} was used to create
it.
"""
def loseConnection():
"""
Close stdin, stderr and stdout.
"""
def signalProcess(signalID):
"""
Send a signal to the process.
@param signalID: can be
- one of C{"KILL"}, C{"TERM"}, or C{"INT"}.
These will be implemented in a
cross-platform manner, and so should be used
if possible.
- an integer, where it represents a POSIX
signal ID.
@raise twisted.internet.error.ProcessExitedAlready: If the process has
already exited.
@raise OSError: If the C{os.kill} call fails with an errno different
from C{ESRCH}.
"""
class IServiceCollection(Interface):
"""
An object which provides access to a collection of services.
"""
def getServiceNamed(serviceName):
"""
Retrieve the named service from this application.
Raise a C{KeyError} if there is no such service name.
"""
def addService(service):
"""
Add a service to this collection.
"""
def removeService(service):
"""
Remove a service from this collection.
"""
class IUDPTransport(Interface):
"""
Transport for UDP DatagramProtocols.
"""
def write(packet, addr=None):
"""
Write packet to given address.
@param addr: a tuple of (ip, port). For connected transports must
be the address the transport is connected to, or None.
In non-connected mode this is mandatory.
@raise twisted.internet.error.MessageLengthError: C{packet} was too
long.
"""
def connect(host, port):
"""
Connect the transport to an address.
This changes it to connected mode. Datagrams can only be sent to
this address, and will only be received from this address. In addition
the protocol's connectionRefused method might get called if destination
is not receiving datagrams.
@param host: an IP address, not a domain name ('127.0.0.1', not 'localhost')
@param port: port to connect to.
"""
def getHost():
"""
Get this port's host address.
@return: an address describing the listening port.
@rtype: L{IPv4Address} or L{IPv6Address}.
"""
def stopListening():
"""
Stop listening on this port.
If it does not complete immediately, will return L{Deferred} that fires
upon completion.
"""
def setBroadcastAllowed(enabled):
"""
Set whether this port may broadcast.
@param enabled: Whether the port may broadcast.
@type enabled: L{bool}
"""
def getBroadcastAllowed():
"""
Checks if broadcast is currently allowed on this port.
@return: Whether this port may broadcast.
@rtype: L{bool}
"""
class IUNIXDatagramTransport(Interface):
"""
Transport for UDP PacketProtocols.
"""
def write(packet, address):
"""
Write packet to given address.
"""
def getHost():
"""
Returns L{UNIXAddress}.
"""
class IUNIXDatagramConnectedTransport(Interface):
"""
Transport for UDP ConnectedPacketProtocols.
"""
def write(packet):
"""
Write packet to address we are connected to.
"""
def getHost():
"""
Returns L{UNIXAddress}.
"""
def getPeer():
"""
Returns L{UNIXAddress}.
"""
class IMulticastTransport(Interface):
"""
Additional functionality for multicast UDP.
"""
def getOutgoingInterface():
"""
Return interface of outgoing multicast packets.
"""
def setOutgoingInterface(addr):
"""
Set interface for outgoing multicast packets.
Returns Deferred of success.
"""
def getLoopbackMode():
"""
Return if loopback mode is enabled.
"""
def setLoopbackMode(mode):
"""
Set if loopback mode is enabled.
"""
def getTTL():
"""
Get time to live for multicast packets.
"""
def setTTL(ttl):
"""
Set time to live on multicast packets.
"""
def joinGroup(addr, interface=""):
"""
Join a multicast group. Returns L{Deferred} of success or failure.
If an error occurs, the returned L{Deferred} will fail with
L{error.MulticastJoinError}.
"""
def leaveGroup(addr, interface=""):
"""
Leave multicast group, return L{Deferred} of success.
"""
class IStreamClientEndpoint(Interface):
"""
A stream client endpoint is a place that L{ClientFactory} can connect to.
For example, a remote TCP host/port pair would be a TCP client endpoint.
@since: 10.1
"""
def connect(protocolFactory):
"""
Connect the C{protocolFactory} to the location specified by this
L{IStreamClientEndpoint} provider.
@param protocolFactory: A provider of L{IProtocolFactory}
@return: A L{Deferred} that results in an L{IProtocol} upon successful
connection otherwise a L{Failure} wrapping L{ConnectError} or
L{NoProtocol <twisted.internet.error.NoProtocol>}.
"""
class IStreamServerEndpoint(Interface):
"""
A stream server endpoint is a place that a L{Factory} can listen for
incoming connections.
@since: 10.1
"""
def listen(protocolFactory):
"""
Listen with C{protocolFactory} at the location specified by this
L{IStreamServerEndpoint} provider.
@param protocolFactory: A provider of L{IProtocolFactory}
@return: A L{Deferred} that results in an L{IListeningPort} or an
L{CannotListenError}
"""
class IStreamServerEndpointStringParser(Interface):
"""
An L{IStreamServerEndpointStringParser} is like an
L{IStreamClientEndpointStringParserWithReactor}, except for
L{IStreamServerEndpoint}s instead of clients. It integrates with
L{endpoints.serverFromString} in much the same way.
"""
prefix = Attribute(
"""
A C{str}, the description prefix to respond to. For example, an
L{IStreamServerEndpointStringParser} plugin which had C{"foo"} for its
C{prefix} attribute would be called for endpoint descriptions like
C{"foo:bar:baz"} or C{"foo:"}.
"""
)
def parseStreamServer(reactor, *args, **kwargs):
"""
Parse a stream server endpoint from a reactor and string-only arguments
and keyword arguments.
@see: L{IStreamClientEndpointStringParserWithReactor.parseStreamClient}
@return: a stream server endpoint
@rtype: L{IStreamServerEndpoint}
"""
class IStreamClientEndpointStringParserWithReactor(Interface):
"""
An L{IStreamClientEndpointStringParserWithReactor} is a parser which can
convert a set of string C{*args} and C{**kwargs} into an
L{IStreamClientEndpoint} provider.
This interface is really only useful in the context of the plugin system
for L{endpoints.clientFromString}. See the document entitled "I{The
Twisted Plugin System}" for more details on how to write a plugin.
If you place an L{IStreamClientEndpointStringParserWithReactor} plugin in
the C{twisted.plugins} package, that plugin's C{parseStreamClient} method
will be used to produce endpoints for any description string that begins
with the result of that L{IStreamClientEndpointStringParserWithReactor}'s
prefix attribute.
"""
prefix = Attribute(
"""
L{bytes}, the description prefix to respond to. For example, an
L{IStreamClientEndpointStringParserWithReactor} plugin which had
C{b"foo"} for its C{prefix} attribute would be called for endpoint
descriptions like C{b"foo:bar:baz"} or C{b"foo:"}.
"""
)
def parseStreamClient(reactor, *args, **kwargs):
"""
This method is invoked by L{endpoints.clientFromString}, if the type of
endpoint matches the return value from this
L{IStreamClientEndpointStringParserWithReactor}'s C{prefix} method.
@param reactor: The reactor passed to L{endpoints.clientFromString}.
@param args: The byte string arguments, minus the endpoint type, in the
endpoint description string, parsed according to the rules
described in L{endpoints.quoteStringArgument}. For example, if the
description were C{b"my-type:foo:bar:baz=qux"}, C{args} would be
C{(b'foo', b'bar')}
@param kwargs: The byte string arguments from the endpoint description
passed as keyword arguments. For example, if the description were
C{b"my-type:foo:bar:baz=qux"}, C{kwargs} would be
C{dict(baz=b'qux')}.
@return: a client endpoint
@rtype: a provider of L{IStreamClientEndpoint}
"""
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
helper/tokenHelper.go
|
package helper
import (
"os"
"time"
"github.com/braswelljr/goax/model"
"github.com/golang-jwt/jwt/v4"
)
var (
SecretKey = os.Getenv("SECRET_KEY")
)
type SignedParams struct {
User model.TokenizedUserParams
jwt.RegisteredClaims
}
func GetAllTokens(user model.TokenizedUserParams) (string, string, error) {
// set secret key
if SecretKey != "" {
SecretKey = "xxyyzzaa"
}
// params
signedParams := &SignedParams{
User: user,
RegisteredClaims: jwt.RegisteredClaims{
ExpiresAt: &jwt.NumericDate{
Time: time.Now().Local().Add(time.Hour * time.Duration(168)),
},
},
}
// refresh token
refreshClaims := &SignedParams{
RegisteredClaims: jwt.RegisteredClaims{
ExpiresAt: &jwt.NumericDate{
Time: time.Now().Local().Add(time.Hour * time.Duration(168)),
},
},
}
// create token
token, err := jwt.NewWithClaims(jwt.SigningMethodHS256, signedParams).SignedString([]byte(SecretKey))
// create refresh token
refreshToken, err := jwt.NewWithClaims(jwt.SigningMethodHS256, refreshClaims).SignedString([]byte(SecretKey))
if err != nil {
return "", "", err
}
return token, refreshToken, err
}
// ValidateToken validates a token
func ValidateToken(token string) (*SignedParams, error) {
if SecretKey != "" {
SecretKey = "xxyyzzaa"
}
// parse token
tokenClaims, err := jwt.ParseWithClaims(
token,
&SignedParams{},
func(token *jwt.Token) (interface{}, error) {
return []byte(SecretKey), nil
},
)
if err != nil {
return nil, err
}
claims, ok := tokenClaims.Claims.(*SignedParams)
if !ok {
return nil, err
}
// Ensure token is valid not expired
if claims.VerifyExpiresAt(time.Now().Local(), true) == false {
return nil, err
}
return claims, nil
}
|
[
"\"SECRET_KEY\""
] |
[] |
[
"SECRET_KEY"
] |
[]
|
["SECRET_KEY"]
|
go
| 1 | 0 | |
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
|
package openshift
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"reflect"
"strings"
"time"
"github.com/ghodss/yaml"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/http2"
"k8s.io/client-go/util/homedir"
)
// restTLSClientConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.TLSClientConfig.
// restTLSClientConfig contains settings to enable transport layer security
type restTLSClientConfig struct {
// Server requires TLS client certificate authentication
CertFile string
// Server requires TLS client certificate authentication
KeyFile string
// Trusted root certificates for server
CAFile string
// CertData holds PEM-encoded bytes (typically read from a client certificate file).
// CertData takes precedence over CertFile
CertData []byte
// KeyData holds PEM-encoded bytes (typically read from a client certificate key file).
// KeyData takes precedence over KeyFile
KeyData []byte
// CAData holds PEM-encoded bytes (typically read from a root certificates bundle).
// CAData takes precedence over CAFile
CAData []byte
}
// restConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.Config.
// Config holds the common attributes that can be passed to a Kubernetes client on
// initialization.
type restConfig struct {
// Host must be a host string, a host:port pair, or a URL to the base of the apiserver.
// If a URL is given then the (optional) Path of that URL represents a prefix that must
// be appended to all request URIs used to access the apiserver. This allows a frontend
// proxy to easily relocate all of the apiserver endpoints.
Host string
// Server requires Basic authentication
Username string
Password string
// Server requires Bearer authentication. This client will not attempt to use
// refresh tokens for an OAuth2 flow.
// TODO: demonstrate an OAuth2 compatible client.
BearerToken string
// TLSClientConfig contains settings to enable transport layer security
TLSClientConfig restTLSClientConfig
// Server should be accessed without verifying the TLS
// certificate. For testing only.
Insecure bool
}
// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig.
// ClientConfig is used to make it easy to get an api server client
type clientConfig interface {
// ClientConfig returns a complete client config
ClientConfig() (*restConfig, error)
}
// defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig.
func defaultClientConfig() clientConfig {
loadingRules := newOpenShiftClientConfigLoadingRules()
// REMOVED: Allowing command-line overriding of loadingRules
// REMOVED: clientcmd.ConfigOverrides
clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules)
return clientConfig
}
var recommendedHomeFile = path.Join(homedir.HomeDir(), ".kube/config")
// newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules.
// NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift.
// 1. --config value
// 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file
func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules {
chain := []string{}
envVarFile := os.Getenv("KUBECONFIG")
if len(envVarFile) != 0 {
chain = append(chain, filepath.SplitList(envVarFile)...)
} else {
chain = append(chain, recommendedHomeFile)
}
return &clientConfigLoadingRules{
Precedence: chain,
// REMOVED: Migration support; run (oc login) to trigger migration
}
}
// deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.
// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules
// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that
// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before
// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid
// passing extraneous information down a call stack
type deferredLoadingClientConfig struct {
loadingRules *clientConfigLoadingRules
clientConfig clientConfig
}
// NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig.
// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name
func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig {
return &deferredLoadingClientConfig{loadingRules: loadingRules}
}
func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) {
if config.clientConfig == nil {
// REMOVED: Support for concurrent use in multiple threads.
mergedConfig, err := config.loadingRules.Load()
if err != nil {
return nil, err
}
// REMOVED: Interactive fallback support.
mergedClientConfig := newNonInteractiveClientConfig(*mergedConfig)
config.clientConfig = mergedClientConfig
}
return config.clientConfig, nil
}
// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig.
// ClientConfig implements ClientConfig
func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) {
mergedClientConfig, err := config.createClientConfig()
if err != nil {
return nil, err
}
mergedConfig, err := mergedClientConfig.ClientConfig()
if err != nil {
return nil, err
}
// REMOVED: In-cluster service account configuration use.
return mergedConfig, nil
}
var (
// DefaultCluster is the cluster config used when no other config is specified
// TODO: eventually apiserver should start on 443 and be secure by default
defaultCluster = clientcmdCluster{Server: "http://localhost:8080"}
// EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name
envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")}
)
// directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.
// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information
type directClientConfig struct {
config clientcmdConfig
}
// newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig.
// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information
func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig {
return &directClientConfig{config}
}
// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig.
// ClientConfig implements ClientConfig
func (config *directClientConfig) ClientConfig() (*restConfig, error) {
if err := config.ConfirmUsable(); err != nil {
return nil, err
}
configAuthInfo := config.getAuthInfo()
configClusterInfo := config.getCluster()
clientConfig := &restConfig{}
clientConfig.Host = configClusterInfo.Server
if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 {
u.RawQuery = ""
u.Fragment = ""
clientConfig.Host = u.String()
}
// only try to read the auth information if we are secure
if isConfigTransportTLS(*clientConfig) {
var err error
// REMOVED: Support for interactive fallback.
userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo)
if err != nil {
return nil, err
}
if err = mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig); err != nil {
return nil, err
}
serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
if err != nil {
return nil, err
}
if err = mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig); err != nil {
return nil, err
}
}
return clientConfig, nil
}
// getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig.
// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
// both, so we have to split the objects and merge them separately
// we want this order of precedence for the server identification
// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files)
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
// 3. load the ~/.kubernetes_auth file as a default
func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) {
mergedConfig := &restConfig{}
// configClusterInfo holds the information identify the server provided by .kubeconfig
configClientConfig := &restConfig{}
configClientConfig.TLSClientConfig.CAFile = configClusterInfo.CertificateAuthority
configClientConfig.TLSClientConfig.CAData = configClusterInfo.CertificateAuthorityData
configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
if err := mergo.MergeWithOverwrite(mergedConfig, configClientConfig); err != nil {
return nil, err
}
return mergedConfig, nil
}
// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig.
// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
// both, so we have to split the objects and merge them separately
// we want this order of precedence for user identifcation
// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file
// 4. if there is not enough information to identify the user, prompt if possible
func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) {
mergedConfig := &restConfig{}
// blindly overwrite existing values based on precedence
if len(configAuthInfo.Token) > 0 {
mergedConfig.BearerToken = configAuthInfo.Token
}
if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 {
mergedConfig.TLSClientConfig.CertFile = configAuthInfo.ClientCertificate
mergedConfig.TLSClientConfig.CertData = configAuthInfo.ClientCertificateData
mergedConfig.TLSClientConfig.KeyFile = configAuthInfo.ClientKey
mergedConfig.TLSClientConfig.KeyData = configAuthInfo.ClientKeyData
}
if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 {
mergedConfig.Username = configAuthInfo.Username
mergedConfig.Password = configAuthInfo.Password
}
// REMOVED: prompting for missing information.
return mergedConfig, nil
}
// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable.
// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config,
// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible.
func (config *directClientConfig) ConfirmUsable() error {
var validationErrors []error
validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...)
validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...)
// when direct client config is specified, and our only error is that no server is defined, we should
// return a standard "no config" error
if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster {
return newErrConfigurationInvalid([]error{errEmptyConfig})
}
return newErrConfigurationInvalid(validationErrors)
}
// getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName.
func (config *directClientConfig) getContextName() string {
// REMOVED: overrides support
return config.config.CurrentContext
}
// getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName.
func (config *directClientConfig) getAuthInfoName() string {
// REMOVED: overrides support
return config.getContext().AuthInfo
}
// getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName.
func (config *directClientConfig) getClusterName() string {
// REMOVED: overrides support
return config.getContext().Cluster
}
// getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext.
func (config *directClientConfig) getContext() clientcmdContext {
contexts := config.config.Contexts
contextName := config.getContextName()
var mergedContext clientcmdContext
if configContext, exists := contexts[contextName]; exists {
if err := mergo.MergeWithOverwrite(&mergedContext, configContext); err != nil {
logrus.Debugf("Can't merge configContext: %v", err)
}
}
// REMOVED: overrides support
return mergedContext
}
var (
errEmptyConfig = errors.New("no configuration has been provided")
// message is for consistency with old behavior
errEmptyCluster = errors.New("cluster has no server defined")
)
//helper for checking certificate/key/CA
func validateFileIsReadable(name string) error {
answer, err := os.Open(name)
defer func() {
if err := answer.Close(); err != nil {
logrus.Debugf("Error closing %v: %v", name, err)
}
}()
return err
}
// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo.
// validateClusterInfo looks for conflicts and errors in the cluster info
func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error {
var validationErrors []error
if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) {
return []error{errEmptyCluster}
}
if len(clusterInfo.Server) == 0 {
if len(clusterName) == 0 {
validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined"))
} else {
validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName))
}
}
// Make sure CA data and CA file aren't both specified
if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 {
validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
}
if len(clusterInfo.CertificateAuthority) != 0 {
err := validateFileIsReadable(clusterInfo.CertificateAuthority)
if err != nil {
validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
}
}
return validationErrors
}
// validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo.
// validateAuthInfo looks for conflicts and errors in the auth info
func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error {
var validationErrors []error
usingAuthPath := false
methods := make([]string, 0, 3)
if len(authInfo.Token) != 0 {
methods = append(methods, "token")
}
if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 {
methods = append(methods, "basicAuth")
}
if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 {
// Make sure cert data and file aren't both specified
if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 {
validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
}
// Make sure key data and file aren't both specified
if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 {
validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
}
// Make sure a key is specified
if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 {
validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
}
if len(authInfo.ClientCertificate) != 0 {
err := validateFileIsReadable(authInfo.ClientCertificate)
if err != nil {
validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
}
}
if len(authInfo.ClientKey) != 0 {
err := validateFileIsReadable(authInfo.ClientKey)
if err != nil {
validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
}
}
}
// authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
if (len(methods) > 1) && (!usingAuthPath) {
validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
}
return validationErrors
}
// getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo.
func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo {
authInfos := config.config.AuthInfos
authInfoName := config.getAuthInfoName()
var mergedAuthInfo clientcmdAuthInfo
if configAuthInfo, exists := authInfos[authInfoName]; exists {
if err := mergo.MergeWithOverwrite(&mergedAuthInfo, configAuthInfo); err != nil {
logrus.Debugf("Can't merge configAuthInfo: %v", err)
}
}
// REMOVED: overrides support
return mergedAuthInfo
}
// getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster.
func (config *directClientConfig) getCluster() clientcmdCluster {
clusterInfos := config.config.Clusters
clusterInfoName := config.getClusterName()
var mergedClusterInfo clientcmdCluster
if err := mergo.MergeWithOverwrite(&mergedClusterInfo, defaultCluster); err != nil {
logrus.Debugf("Can't merge defaultCluster: %v", err)
}
if err := mergo.MergeWithOverwrite(&mergedClusterInfo, envVarCluster); err != nil {
logrus.Debugf("Can't merge envVarCluster: %v", err)
}
if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
if err := mergo.MergeWithOverwrite(&mergedClusterInfo, configClusterInfo); err != nil {
logrus.Debugf("Can't merge configClusterInfo: %v", err)
}
}
// REMOVED: overrides support
return mergedClusterInfo
}
// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.
// This helper implements the error and Errors interfaces. Keeping it private
// prevents people from making an aggregate of 0 errors, which is not
// an error, but does satisfy the error interface.
type aggregateErr []error
// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate.
// NewAggregate converts a slice of errors into an Aggregate interface, which
// is itself an implementation of the error interface. If the slice is empty,
// this returns nil.
// It will check if any of the element of input error list is nil, to avoid
// nil pointer panic when call Error().
func newAggregate(errlist []error) error {
if len(errlist) == 0 {
return nil
}
// In case of input error list contains nil
var errs []error
for _, e := range errlist {
if e != nil {
errs = append(errs, e)
}
}
if len(errs) == 0 {
return nil
}
return aggregateErr(errs)
}
// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error.
// Error is part of the error interface.
func (agg aggregateErr) Error() string {
if len(agg) == 0 {
// This should never happen, really.
return ""
}
if len(agg) == 1 {
return agg[0].Error()
}
result := fmt.Sprintf("[%s", agg[0].Error())
for i := 1; i < len(agg); i++ {
result += fmt.Sprintf(", %s", agg[i].Error())
}
result += "]"
return result
}
// REMOVED: aggregateErr.Errors
// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid.
// errConfigurationInvalid is a set of errors indicating the configuration is invalid.
type errConfigurationInvalid []error
var _ error = errConfigurationInvalid{}
// REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid.
// newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid.
func newErrConfigurationInvalid(errs []error) error {
switch len(errs) {
case 0:
return nil
default:
return errConfigurationInvalid(errs)
}
}
// Error implements the error interface
func (e errConfigurationInvalid) Error() string {
return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error())
}
// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules
// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config
// Callers can put the chain together however they want, but we'd recommend:
// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath
// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present
type clientConfigLoadingRules struct {
Precedence []string
}
// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load
// Load starts by running the MigrationRules and then
// takes the loading rules and returns a Config object based on following rules.
// if the ExplicitPath, return the unmerged explicit file
// Otherwise, return a merged config based on the Precedence slice
// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored.
// Read errors or files with non-deserializable content produce errors.
// The first file to set a particular map key wins and map key's value is never changed.
// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed.
// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two.
// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even
// non-conflicting entries from the second file's "red-user" are discarded.
// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder
// and only absolute file paths are returned.
func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
errlist := []error{}
kubeConfigFiles := []string{}
// REMOVED: explicit path support
kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...)
kubeconfigs := []*clientcmdConfig{}
// read and cache the config files so that we only look at them once
for _, filename := range kubeConfigFiles {
if len(filename) == 0 {
// no work to do
continue
}
config, err := loadFromFile(filename)
if os.IsNotExist(err) {
// skip missing files
continue
}
if err != nil {
errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename))
continue
}
kubeconfigs = append(kubeconfigs, config)
}
// first merge all of our maps
mapConfig := clientcmdNewConfig()
for _, kubeconfig := range kubeconfigs {
if err := mergo.MergeWithOverwrite(mapConfig, kubeconfig); err != nil {
return nil, err
}
}
// merge all of the struct values in the reverse order so that priority is given correctly
// errors are not added to the list the second time
nonMapConfig := clientcmdNewConfig()
for i := len(kubeconfigs) - 1; i >= 0; i-- {
kubeconfig := kubeconfigs[i]
if err := mergo.MergeWithOverwrite(nonMapConfig, kubeconfig); err != nil {
return nil, err
}
}
// since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and
// get the values we expect.
config := clientcmdNewConfig()
if err := mergo.MergeWithOverwrite(config, mapConfig); err != nil {
return nil, err
}
if err := mergo.MergeWithOverwrite(config, nonMapConfig); err != nil {
return nil, err
}
// REMOVED: Possibility to skip this.
if err := resolveLocalPaths(config); err != nil {
errlist = append(errlist, err)
}
return config, newAggregate(errlist)
}
// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile
// LoadFromFile takes a filename and deserializes the contents into Config object
func loadFromFile(filename string) (*clientcmdConfig, error) {
kubeconfigBytes, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
config, err := load(kubeconfigBytes)
if err != nil {
return nil, err
}
// set LocationOfOrigin on every Cluster, User, and Context
for key, obj := range config.AuthInfos {
obj.LocationOfOrigin = filename
config.AuthInfos[key] = obj
}
for key, obj := range config.Clusters {
obj.LocationOfOrigin = filename
config.Clusters[key] = obj
}
for key, obj := range config.Contexts {
obj.LocationOfOrigin = filename
config.Contexts[key] = obj
}
if config.AuthInfos == nil {
config.AuthInfos = map[string]*clientcmdAuthInfo{}
}
if config.Clusters == nil {
config.Clusters = map[string]*clientcmdCluster{}
}
if config.Contexts == nil {
config.Contexts = map[string]*clientcmdContext{}
}
return config, nil
}
// load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load
// Load takes a byte slice and deserializes the contents into Config object.
// Encapsulates deserialization without assuming the source is a file.
func load(data []byte) (*clientcmdConfig, error) {
config := clientcmdNewConfig()
// if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input)
if len(data) == 0 {
return config, nil
}
// Note: This does absolutely no kind/version checking or conversions.
data, err := yaml.YAMLToJSON(data)
if err != nil {
return nil, err
}
if err := json.Unmarshal(data, config); err != nil {
return nil, err
}
return config, nil
}
// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths.
// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin
// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without
// modification of its contents.
func resolveLocalPaths(config *clientcmdConfig) error {
for _, cluster := range config.Clusters {
if len(cluster.LocationOfOrigin) == 0 {
continue
}
base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
if err != nil {
return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin)
}
if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil {
return err
}
}
for _, authInfo := range config.AuthInfos {
if len(authInfo.LocationOfOrigin) == 0 {
continue
}
base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
if err != nil {
return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin)
}
if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil {
return err
}
}
return nil
}
// getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences.
func getClusterFileReferences(cluster *clientcmdCluster) []*string {
return []*string{&cluster.CertificateAuthority}
}
// getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences.
func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string {
return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey}
}
// resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths.
// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory
func resolvePaths(refs []*string, base string) error {
for _, ref := range refs {
// Don't resolve empty paths
if len(*ref) > 0 {
// Don't resolve absolute paths
if !filepath.IsAbs(*ref) {
*ref = filepath.Join(base, *ref)
}
}
}
return nil
}
// restClientFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.RESTClientFor.
// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config
// object. Note that a RESTClient may require fields that are optional when initializing a Client.
// A RESTClient created by this method is generic - it expects to operate on an API that follows
// the Kubernetes conventions, but may not be the Kubernetes API.
func restClientFor(config *restConfig) (*url.URL, *http.Client, error) {
// REMOVED: Configurable GroupVersion, Codec
// REMOVED: Configurable versionedAPIPath
baseURL, err := defaultServerURLFor(config)
if err != nil {
return nil, nil, err
}
transport, err := transportFor(config)
if err != nil {
return nil, nil, err
}
var httpClient *http.Client
if transport != http.DefaultTransport {
httpClient = &http.Client{Transport: transport}
}
// REMOVED: Configurable QPS, Burst, ContentConfig
// REMOVED: Actually returning a RESTClient object.
return baseURL, httpClient, nil
}
// defaultServerURL is a modified copy of k8s.io/kubernets/pkg/client/restclient.DefaultServerURL.
// DefaultServerURL converts a host, host:port, or URL string to the default base server API path
// to use with a Client at a given API version following the standard conventions for a
// Kubernetes API.
func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
if host == "" {
return nil, errors.Errorf("host must be a URL or a host:port pair")
}
base := host
hostURL, err := url.Parse(base)
if err != nil {
return nil, err
}
if hostURL.Scheme == "" {
scheme := "http://"
if defaultTLS {
scheme = "https://"
}
hostURL, err = url.Parse(scheme + base)
if err != nil {
return nil, err
}
if hostURL.Path != "" && hostURL.Path != "/" {
return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base)
}
}
// REMOVED: versionedAPIPath computation.
return hostURL, nil
}
// defaultServerURLFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.defaultServerURLFor.
// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It
// requires Host and Version to be set prior to being called.
func defaultServerURLFor(config *restConfig) (*url.URL, error) {
// TODO: move the default to secure when the apiserver supports TLS by default
// config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA."
hasCA := len(config.TLSClientConfig.CAFile) != 0 || len(config.TLSClientConfig.CAData) != 0
hasCert := len(config.TLSClientConfig.CertFile) != 0 || len(config.TLSClientConfig.CertData) != 0
defaultTLS := hasCA || hasCert || config.Insecure
host := config.Host
if host == "" {
host = "localhost"
}
// REMOVED: Configurable APIPath, GroupVersion
return defaultServerURL(host, defaultTLS)
}
// transportFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.transportFor.
// TransportFor returns an http.RoundTripper that will provide the authentication
// or transport level security defined by the provided Config. Will return the
// default http.DefaultTransport if no special case behavior is needed.
func transportFor(config *restConfig) (http.RoundTripper, error) {
// REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support
return transportNew(config)
}
// isConfigTransportTLS is a modified copy of k8s.io/kubernets/pkg/client/restclient.IsConfigTransportTLS.
// IsConfigTransportTLS returns true if and only if the provided
// config will result in a protected connection to the server when it
// is passed to restclient.RESTClientFor(). Use to determine when to
// send credentials over the wire.
//
// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are
// still possible.
func isConfigTransportTLS(config restConfig) bool {
baseURL, err := defaultServerURLFor(&config)
if err != nil {
return false
}
return baseURL.Scheme == "https"
}
// transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New.
// New returns an http.RoundTripper that will provide the authentication
// or transport level security defined by the provided Config.
func transportNew(config *restConfig) (http.RoundTripper, error) {
// REMOVED: custom config.Transport support.
// Set transport level security
var (
rt http.RoundTripper
err error
)
rt, err = tlsCacheGet(config)
if err != nil {
return nil, err
}
// REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains.
if len(config.Username) != 0 && len(config.BearerToken) != 0 {
return nil, errors.Errorf("username/password or bearer token may be set, but not both")
}
return rt, nil
}
// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR.
// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if
// no matching CIDRs are found
func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) {
// we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it
noProxyEnv := os.Getenv("NO_PROXY")
noProxyRules := strings.Split(noProxyEnv, ",")
cidrs := []*net.IPNet{}
for _, noProxyRule := range noProxyRules {
_, cidr, _ := net.ParseCIDR(noProxyRule)
if cidr != nil {
cidrs = append(cidrs, cidr)
}
}
if len(cidrs) == 0 {
return delegate
}
return func(req *http.Request) (*url.URL, error) {
host := req.URL.Host
// for some urls, the Host is already the host, not the host:port
if net.ParseIP(host) == nil {
var err error
host, _, err = net.SplitHostPort(req.URL.Host)
if err != nil {
return delegate(req)
}
}
ip := net.ParseIP(host)
if ip == nil {
return delegate(req)
}
for _, cidr := range cidrs {
if cidr.Contains(ip) {
return nil, nil
}
}
return delegate(req)
}
}
// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get.
func tlsCacheGet(config *restConfig) (http.RoundTripper, error) {
// REMOVED: any actual caching
// Get the TLS options for this client config
tlsConfig, err := tlsConfigFor(config)
if err != nil {
return nil, err
}
// The options didn't require a custom TLS config
if tlsConfig == nil {
return http.DefaultTransport, nil
}
// REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here.
t := &http.Transport{
// http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings
// ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY
Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment),
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
}
// Allow clients to disable http2 if needed.
if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 {
_ = http2.ConfigureTransport(t)
}
return t, nil
}
// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor.
// TLSConfigFor returns a tls.Config that will provide the transport level security defined
// by the provided Config. Will return nil if no transport level security is requested.
func tlsConfigFor(c *restConfig) (*tls.Config, error) {
if !(c.HasCA() || c.HasCertAuth() || c.Insecure) {
return nil, nil
}
if c.HasCA() && c.Insecure {
return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed")
}
if err := loadTLSFiles(c); err != nil {
return nil, err
}
tlsConfig := &tls.Config{
// Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability)
MinVersion: tls.VersionTLS10,
InsecureSkipVerify: c.Insecure,
}
if c.HasCA() {
tlsConfig.RootCAs = rootCertPool(c.TLSClientConfig.CAData)
}
if c.HasCertAuth() {
cert, err := tls.X509KeyPair(c.TLSClientConfig.CertData, c.TLSClientConfig.KeyData)
if err != nil {
return nil, err
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
return tlsConfig, nil
}
// loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles.
// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData,
// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are
// either populated or were empty to start.
func loadTLSFiles(c *restConfig) error {
var err error
c.TLSClientConfig.CAData, err = dataFromSliceOrFile(c.TLSClientConfig.CAData, c.TLSClientConfig.CAFile)
if err != nil {
return err
}
c.TLSClientConfig.CertData, err = dataFromSliceOrFile(c.TLSClientConfig.CertData, c.TLSClientConfig.CertFile)
if err != nil {
return err
}
c.TLSClientConfig.KeyData, err = dataFromSliceOrFile(c.TLSClientConfig.KeyData, c.TLSClientConfig.KeyFile)
if err != nil {
return err
}
return nil
}
// dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile.
// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
// or an error if an error occurred reading the file
func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
if len(data) > 0 {
return data, nil
}
if len(file) > 0 {
fileData, err := ioutil.ReadFile(file)
if err != nil {
return []byte{}, err
}
return fileData, nil
}
return nil, nil
}
// rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool.
// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs".
// When caData is not empty, it will be the ONLY information used in the CertPool.
func rootCertPool(caData []byte) *x509.CertPool {
// What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go
// code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values
// It doesn't allow trusting either/or, but hopefully that won't be an issue
if len(caData) == 0 {
return nil
}
// if we have caData, use it
certPool := x509.NewCertPool()
certPool.AppendCertsFromPEM(caData)
return certPool
}
// HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA.
// HasCA returns whether the configuration has a certificate authority or not.
func (c *restConfig) HasCA() bool {
return len(c.TLSClientConfig.CAData) > 0 || len(c.TLSClientConfig.CAFile) > 0
}
// HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth.
// HasCertAuth returns whether the configuration has certificate authentication or not.
func (c *restConfig) HasCertAuth() bool {
return len(c.TLSClientConfig.CertData) != 0 || len(c.TLSClientConfig.CertFile) != 0
}
// clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config.
// Config holds the information needed to build connect to remote kubernetes clusters as a given user
// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
type clientcmdConfig struct {
// Clusters is a map of referencable names to cluster configs
Clusters clustersMap `json:"clusters"`
// AuthInfos is a map of referencable names to user configs
AuthInfos authInfosMap `json:"users"`
// Contexts is a map of referencable names to context configs
Contexts contextsMap `json:"contexts"`
// CurrentContext is the name of the context that you would like to use by default
CurrentContext string `json:"current-context"`
}
type clustersMap map[string]*clientcmdCluster
func (m *clustersMap) UnmarshalJSON(data []byte) error {
var a []v1NamedCluster
if err := json.Unmarshal(data, &a); err != nil {
return err
}
for _, e := range a {
cluster := e.Cluster // Allocates a new instance in each iteration
(*m)[e.Name] = &cluster
}
return nil
}
type authInfosMap map[string]*clientcmdAuthInfo
func (m *authInfosMap) UnmarshalJSON(data []byte) error {
var a []v1NamedAuthInfo
if err := json.Unmarshal(data, &a); err != nil {
return err
}
for _, e := range a {
authInfo := e.AuthInfo // Allocates a new instance in each iteration
(*m)[e.Name] = &authInfo
}
return nil
}
type contextsMap map[string]*clientcmdContext
func (m *contextsMap) UnmarshalJSON(data []byte) error {
var a []v1NamedContext
if err := json.Unmarshal(data, &a); err != nil {
return err
}
for _, e := range a {
context := e.Context // Allocates a new instance in each iteration
(*m)[e.Name] = &context
}
return nil
}
// clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig.
// NewConfig is a convenience function that returns a new Config object with non-nil maps
func clientcmdNewConfig() *clientcmdConfig {
return &clientcmdConfig{
Clusters: make(map[string]*clientcmdCluster),
AuthInfos: make(map[string]*clientcmdAuthInfo),
Contexts: make(map[string]*clientcmdContext),
}
}
// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster.
// Cluster contains information about how to communicate with a kubernetes cluster
type clientcmdCluster struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
LocationOfOrigin string
// Server is the address of the kubernetes cluster (https://hostname:port).
Server string `json:"server"`
// InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
// CertificateAuthority is the path to a cert file for the certificate authority.
CertificateAuthority string `json:"certificate-authority,omitempty"`
// CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
}
// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo.
// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are.
type clientcmdAuthInfo struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
LocationOfOrigin string
// ClientCertificate is the path to a client cert file for TLS.
ClientCertificate string `json:"client-certificate,omitempty"`
// ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
ClientCertificateData []byte `json:"client-certificate-data,omitempty"`
// ClientKey is the path to a client key file for TLS.
ClientKey string `json:"client-key,omitempty"`
// ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
ClientKeyData []byte `json:"client-key-data,omitempty"`
// Token is the bearer token for authentication to the kubernetes cluster.
Token string `json:"token,omitempty"`
// Username is the username for basic authentication to the kubernetes cluster.
Username string `json:"username,omitempty"`
// Password is the password for basic authentication to the kubernetes cluster.
Password string `json:"password,omitempty"`
}
// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context.
// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with)
type clientcmdContext struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
LocationOfOrigin string
// Cluster is the name of the cluster for this context
Cluster string `json:"cluster"`
// AuthInfo is the name of the authInfo for this context
AuthInfo string `json:"user"`
// Namespace is the default namespace to use on unspecified requests
Namespace string `json:"namespace,omitempty"`
}
// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster.
// NamedCluster relates nicknames to cluster information
type v1NamedCluster struct {
// Name is the nickname for this Cluster
Name string `json:"name"`
// Cluster holds the cluster information
Cluster clientcmdCluster `json:"cluster"`
}
// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext.
// NamedContext relates nicknames to context information
type v1NamedContext struct {
// Name is the nickname for this Context
Name string `json:"name"`
// Context holds the context information
Context clientcmdContext `json:"context"`
}
// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo.
// NamedAuthInfo relates nicknames to auth information
type v1NamedAuthInfo struct {
// Name is the nickname for this AuthInfo
Name string `json:"name"`
// AuthInfo holds the auth information
AuthInfo clientcmdAuthInfo `json:"user"`
}
|
[
"\"KUBECONFIG\"",
"\"KUBERNETES_MASTER\"",
"\"NO_PROXY\"",
"\"DISABLE_HTTP2\""
] |
[] |
[
"NO_PROXY",
"DISABLE_HTTP2",
"KUBERNETES_MASTER",
"KUBECONFIG"
] |
[]
|
["NO_PROXY", "DISABLE_HTTP2", "KUBERNETES_MASTER", "KUBECONFIG"]
|
go
| 4 | 0 | |
01.HelloWorld.py/v2.second-service/app/src/main.py
|
from flask import Flask
app = Flask(__name__)
import os
from flask import json
import berlioz
berlioz.setupFlask(app)
@app.route('/')
def my_handler():
return json.dumps({
'message': 'Hello from App Tier',
'appId': os.environ['BERLIOZ_TASK_ID']
})
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
[] |
[] |
[
"BERLIOZ_TASK_ID"
] |
[]
|
["BERLIOZ_TASK_ID"]
|
python
| 1 | 0 | |
plugins/doc/doc_test.go
|
/*
Copyright © 2019 The Goca.io team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package doc
import (
"os"
"strings"
"testing"
"github.com/gocaio/goca"
"github.com/gocaio/goca/gocaTesting"
)
// Test server URL.
var testserver = os.Getenv("GOCA_TEST_SERVER")
// T is a global reference for the test. This allows us to use *testing.T
// methods anywhere
var T *testing.T
// TestReadDOC tests the read on DOC files
func TestReadDOC(t *testing.T) {
T = t // Assignment t (*testing.T to a global T variable)
// Get a controller
ctrl := goca.NewControllerTest()
// Subscribe a processOutput. The propper test will be placed in proccessOutput
ctrl.Subscribe(goca.Topics["NewOutput"], processOutput)
// Call the plugin entrypoint
setup(ctrl)
gocatesting.GetAssets(t, ctrl, testserver, plugName)
}
func processOutput(module, url string, out *goca.Output) {
// We have to validate goca.Output according to the resource
parts := strings.Split(out.Target, "/")
switch parts[len(parts)-1] {
case "Doc1.docx":
validateCaseA(out)
case "Doc2.docx":
validateCaseB(out)
case "Doc3.docx":
validateCaseC(out)
}
}
func validateCaseA(out *goca.Output) {
if out.MainType != "DOCX" {
T.Errorf("expected DOCX but found %s", out.MainType)
}
if out.Title != "2018–2019 Statewide Testing Schedule and Administration Deadlines, January 18, 2019" {
T.Errorf("expected \"2018–2019 Statewide Testing Schedule and Administration Deadlines, January 18, 2019\" but found %s", out.Title)
}
if out.Comment != "" {
T.Errorf("expected \"\" but found %s", out.Comment)
}
if out.Producer != "DESE" {
T.Errorf("expected \"DESE\" but found %s", out.Producer)
}
if out.Keywords != "" {
T.Errorf("expected \"\" but found %s", out.Keywords)
}
if out.Description != "" {
T.Errorf("expected \"\" but found %s", out.MainType)
}
if out.ModifiedBy != "Zou, Dong (EOE)" {
T.Errorf("expected \"Zou, Dong (EOE)\" but found %s", out.ModifiedBy)
}
if out.DocumentID != "16" {
T.Errorf("expected \"16\" but found %s", out.DocumentID)
}
if out.CreateDate != "2018-10-24T14:04:00Z" {
T.Errorf("expected \"2018-10-24T14:04:00Z\" but found %s", out.CreateDate)
}
if out.ModifyDate != "2019-01-18T20:59:00Z" {
T.Errorf("expected \"2019-01-18T20:59:00Z\" but found %s", out.ModifyDate)
}
if out.Category != "" {
T.Errorf("expected \"\" but found %s", out.Category)
}
}
func validateCaseB(out *goca.Output) {
if out.MainType != "DOCX" {
T.Errorf("expected DOCX but found %s", out.MainType)
}
if out.Title != "Arizona’s Instrument to Measure Standards" {
T.Errorf("expected \"Arizona’s Instrument to Measure Standards\" but found %s", out.Title)
}
if out.Comment != "" {
T.Errorf("expected \"\" but found %s", out.Comment)
}
if out.Producer != "Network Services" {
T.Errorf("expected \"Network Services\" but found %s", out.Producer)
}
if out.Keywords != "" {
T.Errorf("expected \"\" but found %s", out.Keywords)
}
if out.Description != "" {
T.Errorf("expected \"\" but found %s", out.MainType)
}
if out.ModifiedBy != "" {
T.Errorf("expected \"\" but found %s", out.ModifiedBy)
}
if out.DocumentID != "" {
T.Errorf("expected \"16\" but found %s", out.DocumentID)
}
if out.CreateDate != "2018-04-30T17:15:26Z" {
T.Errorf("expected \"2018-04-30T17:15:26Z\" but found %s", out.CreateDate)
}
if out.ModifyDate != "2018-04-30T17:15:26Z" {
T.Errorf("expected \"2018-04-30T17:15:26Z\" but found %s", out.ModifyDate)
}
if out.Category != "" {
T.Errorf("expected \"\" but found %s", out.Category)
}
}
func validateCaseC(out *goca.Output) {
if out.MainType != "DOCX" {
T.Errorf("expected DOCX but found %s", out.MainType)
}
if out.Title != "MCAS Permission Request to Test in Alternate Setting Form: 2018" {
T.Errorf("expected \"MCAS Permission Request to Test in Alternate Setting Form: 2018\" but found %s", out.Title)
}
if out.Comment != "" {
T.Errorf("expected \"\" but found %s", out.Comment)
}
if out.Producer != "" {
T.Errorf("expected \"\" but found %s", out.Producer)
}
if out.Keywords != "" {
T.Errorf("expected \"\" but found %s", out.Keywords)
}
if out.Description != "" {
T.Errorf("expected \"\" but found %s", out.MainType)
}
if out.ModifiedBy != "" {
T.Errorf("expected \"\" but found %s", out.ModifiedBy)
}
if out.DocumentID != "1" {
T.Errorf("expected \"1\" but found %s", out.DocumentID)
}
if out.CreateDate != "2018-03-20T20:23:00Z" {
T.Errorf("expected \"2018-03-20T20:23:00Z\" but found %s", out.CreateDate)
}
if out.ModifyDate != "2018-03-20T20:23:00Z" {
T.Errorf("expected \"2018-03-20T20:23:00Z\" but found %s", out.ModifyDate)
}
if out.Category != "" {
T.Errorf("expected \"\" but found %s", out.Category)
}
}
|
[
"\"GOCA_TEST_SERVER\""
] |
[] |
[
"GOCA_TEST_SERVER"
] |
[]
|
["GOCA_TEST_SERVER"]
|
go
| 1 | 0 | |
internal/tests/overall_test.go
|
package tests
import (
"context"
"encoding/json"
"flag"
"log"
"os"
"strings"
"testing"
"time"
"github.com/PumpkinSeed/heimdall/cmd/flags"
"github.com/PumpkinSeed/heimdall/cmd/server"
"github.com/PumpkinSeed/heimdall/internal/socket"
"github.com/PumpkinSeed/heimdall/internal/structs"
"github.com/PumpkinSeed/heimdall/pkg/client"
"github.com/PumpkinSeed/heimdall/pkg/client/grpc"
initcommand "github.com/PumpkinSeed/heimdall/pkg/init"
externalStructs "github.com/PumpkinSeed/heimdall/pkg/structs"
"github.com/urfave/cli/v2"
)
var force = false
func TestEncrypt(t *testing.T) {
if runTest := os.Getenv("OVERALL"); force == false && runTest != "true" {
t.Skip("Don't run overall test at this time")
}
set := flag.NewFlagSet("server", 0)
set.String(flags.NameBackendCredentials, "89C2B840-CDE0-4E77-ACAF-73EABB7A489B", "doc")
set.String(flags.NameBackendAddress, "127.0.0.1:8500", "doc")
set.String(flags.NameGrpc, "0.0.0.0:9090", "doc")
set.String(flags.NameHttp, "0.0.0.0:10080", "doc")
set.String(flags.NameSocket, "/tmp/mellek.sock", "doc")
set.Int(flags.NameThreshold, 3, "doc")
set.Int(flags.NameTotalShares, 5, "doc")
ctx := cli.NewContext(nil, set, nil)
var done chan struct{}
go func(chan struct{}) {
if err := server.Cmd.Action(ctx); err != nil {
log.Print(err)
}
done <- struct{}{}
}(done)
time.Sleep(3 * time.Second)
initParams := initcommand.Request{
SecretShares: ctx.Int(flags.NameTotalShares),
SecretThreshold: ctx.Int(flags.NameThreshold),
}
data, err := json.Marshal(initParams)
if err != nil {
t.Fatal(err)
}
resp, err := socket.Do(ctx, structs.SocketRequest{
Type: structs.SocketInit,
Data: data,
})
log.Println(string(resp))
type Result struct {
SecretShares []string
RootToken string
}
var initResult Result
if err := json.Unmarshal(resp, &initResult); err != nil {
t.Error(err)
}
for _, key := range initResult.SecretShares {
unsealResult, err := socket.Do(ctx, structs.SocketRequest{
Type: structs.SocketUnseal,
Data: []byte(key),
})
if err != nil {
t.Fatal(err)
}
if strings.Contains(string(unsealResult), "Unsealed: true") {
break
}
}
hc := client.New(grpc.Options{
TLS: false,
URLs: []string{"127.0.0.1:9090"},
})
keyname := "test1234"
keyResp, err := hc.CreateKey(context.Background(), &externalStructs.Key{
Name: keyname,
})
if err != nil {
t.Error(err)
}
log.Println(keyResp)
plaintext := "test"
mes := time.Now()
encryptResult, err := hc.Encrypt(context.Background(), &externalStructs.EncryptRequest{
KeyName: keyname,
PlainText: plaintext,
})
if err != nil {
t.Error(err)
}
log.Println("Encrypt time: " + time.Since(mes).String())
mes = time.Now()
decryptResult, err := hc.Decrypt(context.Background(), &externalStructs.DecryptRequest{
KeyName: keyname,
Ciphertext: encryptResult.Result,
})
if err != nil {
t.Error(err)
}
log.Println("Decrypt time: " + time.Since(mes).String())
if decryptResult.Result != plaintext {
t.Errorf("Decrypted result should be %s, instead of %s", plaintext, decryptResult.Result)
}
mes = time.Now()
for i := 0; i < 100; i++ {
_, err = hc.Encrypt(context.Background(), &externalStructs.EncryptRequest{
KeyName: keyname,
PlainText: plaintext,
})
if err != nil {
t.Error(err)
}
}
log.Println("Encrypt 100 time: " + time.Since(mes).String())
//<- done
}
|
[
"\"OVERALL\""
] |
[] |
[
"OVERALL"
] |
[]
|
["OVERALL"]
|
go
| 1 | 0 | |
components/video/stream.go
|
package video
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"mime"
"mime/multipart"
"net/http"
"net/url"
"regexp"
"time"
"github.com/sirupsen/logrus"
)
const (
StatusNotRunning = "NOT_RUNNING"
StatusRunning = "RUNNING"
)
type Capture struct {
CapturerUUID string
Data []byte
Timestamp time.Time
}
type MJPEGCapturer struct {
UUIDIdent string
URL *url.URL
output chan *Capture
close chan struct{}
logger *logrus.Logger
status string
maxBackOff float64
}
func NewMJPEGCapturer(uuid, rawURL string, maxFrameBuffer int, logger *logrus.Logger) (*MJPEGCapturer, error) {
captURL, err := url.Parse(rawURL)
if err != nil {
return nil, fmt.Errorf("capturer (%s): %w", rawURL, err)
}
if !regexp.MustCompile("https?").MatchString(captURL.Scheme) {
return nil, fmt.Errorf("capturer (%s): only http or https scheme supported", rawURL)
}
var maxBackOff float64 = 16
return &MJPEGCapturer{
UUIDIdent: uuid,
URL: captURL,
output: make(chan *Capture, maxFrameBuffer),
close: make(chan struct{}, 1),
logger: logger,
status: StatusNotRunning,
maxBackOff: maxBackOff, // Todo think about extracting to a client.
}, nil
}
func (m *MJPEGCapturer) UUID() string {
return m.UUIDIdent
}
func (m *MJPEGCapturer) Start() {
m.status = StatusRunning
resp, err := m.connect()
if err != nil {
m.logger.Error(err)
return
}
defer resp.Body.Close()
mainLoop:
for {
select {
case <-m.close:
close(m.output)
break mainLoop
default:
_, param, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
if err != nil {
m.logger.Error(err)
break mainLoop
}
mr := multipart.NewReader(resp.Body, param["boundary"])
partLoop:
for {
select {
case <-m.close:
close(m.output)
break mainLoop
default:
if err := m.processNextPart(mr); err != nil {
m.logger.Error(err)
break partLoop
}
}
}
}
}
}
func (m *MJPEGCapturer) connect() (resp *http.Response, err error) {
var backoff float64 = 1
var sleepTime time.Duration
for {
time.Sleep(sleepTime * time.Second)
var req *http.Request
req, err = http.NewRequestWithContext(context.TODO(), http.MethodGet, m.URL.String(), nil)
if err != nil {
return nil, err
}
client := &http.Client{} // TODO think about transport, timeouts and further configuration
resp, err = client.Do(req)
if err != nil {
select {
case <-m.close:
close(m.output)
return
default:
if backoff < m.maxBackOff {
sleepTime = time.Duration(math.Exp2(backoff))
backoff++
}
m.logger.Error(fmt.Errorf("capturer: %w", err))
continue
}
}
break
}
return
}
func (m *MJPEGCapturer) processNextPart(mr *multipart.Reader) error {
p, err := mr.NextPart()
if errors.Is(err, io.EOF) {
return fmt.Errorf("capturer: %w", err)
}
if err != nil {
return fmt.Errorf("capturer: %w", err)
}
data, err := ioutil.ReadAll(p)
if err != nil {
return fmt.Errorf("capturer: %w", err)
}
select {
case m.output <- &Capture{
CapturerUUID: m.UUID(),
Data: data,
Timestamp: time.Now(),
}:
case <-m.close:
}
return nil
}
func (m *MJPEGCapturer) NextOutput() (*Capture, error) {
capt, ok := <-m.output
if !ok {
return nil, io.EOF
}
return capt, nil
}
func (m *MJPEGCapturer) Close() {
close(m.close)
}
func (m *MJPEGCapturer) Status() string {
return m.status
}
func (m *MJPEGCapturer) TargetURL() string {
return m.URL.String()
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
volttrontesting/services/test_platformweb.py
|
"""
This file tests the PlatformWebService as it is used in the base platform. Most
of the tests in here are not integration tests, but unit tests to test the
functionality of the PlatformWebService agent.
"""
import binascii
import contextlib
from io import BytesIO
import mock
import os
import shutil
import tempfile
from types import SimpleNamespace
from urllib.parse import urlencode
import pytest
from deepdiff import DeepDiff
from werkzeug.wrappers import Response
from volttron.platform import jsonapi
from volttron.platform.agent.known_identities import PLATFORM_WEB
from volttron.platform.keystore import KeyStore
from volttron.platform.vip.agent import Agent
from volttron.platform.vip.agent.subsystems.web import ResourceType
from volttron.platform.vip.socket import decode_key
from volttron.platform.web import PlatformWebService
from volttron.platform.web.admin_endpoints import AdminEndpoints
from volttron.utils import get_random_key
from volttrontesting.utils.platformwrapper import create_volttron_home
from volttrontesting.fixtures.volttron_platform_fixtures import get_test_volttron_home
from volttrontesting.utils.web_utils import get_test_web_env
from volttrontesting.utils.utils import AgentMock, get_hostname_and_random_port
#from volttrontesting.utils.platformwrapper import create_volttron_home
from volttrontesting.fixtures.cert_fixtures import certs_profile_1
# Patch the PlatformWebService so the underlying Agent interfaces are mocked
# so we can just test the things that the PlatformWebService is responsible for.
PlatformWebService.__bases__ = (AgentMock.imitate(Agent, Agent()),)
#TODO add tests for new RPC calls
@pytest.fixture()
def platform_web_service():
serverkey = "serverkey"
mock_aip = mock.Mock()
yield PlatformWebService(serverkey=serverkey, identity=PLATFORM_WEB, address="tcp://stuff",
bind_web_address="http://v2:8888")
@contextlib.contextmanager
def get_platform_web(bind_web_address="http://v2:8080", **kwargs) -> PlatformWebService:
"""
Create a new PlatformWebService instance with a mocked aip.
:return: PlatformWebService
"""
serverkey = "serverkey"
mws = PlatformWebService(serverkey=serverkey, identity=PLATFORM_WEB, address="tcp://stuff",
bind_web_address=bind_web_address, **kwargs)
mws.startupagent(sender='testweb')
# original_volttron_home = os.environ.get('VOLTTRON_HOME')
# new_volttron_home = create_volttron_home()
# os.environ['VOLTTRON_HOME'] = new_volttron_home
yield mws
# if original_volttron_home is None:
# os.environ.unsetenv('VOLTTRON_HOME')
# else:
# os.environ['VOLTTRON_HOME'] = original_volttron_home
mws.unregister_all_agent_routes()
mws.onstop(sender='testweb')
#shutil.rmtree(new_volttron_home, ignore_errors=True)
def get_server_response(env_fixture, ws):
"""
Use the `PlatformWebService` instance passed to call the app_routing function with
the environment <env_fixture> and a mocked start_response function.
:param env_fixture: environment to run in
:param ws: PlatformWebServer instance.
:return: tuple
"""
mocked_start_response = mock.MagicMock()
iobytes = ws.app_routing(env_fixture, mocked_start_response)
response = BytesIO()
if isinstance(iobytes, Response):
for chunk in iobytes.response:
if isinstance(chunk, str):
response.write(chunk.encode('utf-8'))
else:
response.write(chunk)
else:
for chunk in iobytes:
response.write(chunk)
# use getvalue instead of moving to the begining of stream and reading.
response = response.getvalue().decode('utf-8')
return mocked_start_response, response
def add_points_of_interest(ws: PlatformWebService, endpoints: dict):
"""
Adds endpoints based upon type.
The three types of
:param ws: The platformwebservice object
:param endpoints: A dictionary of endpoints
"""
for k, v in endpoints.items():
if v['type'] == 'agent_route':
ws.register_agent_route(k, v['fn'])
elif v['type'] == 'endpoint':
ws.register_endpoint(k, ResourceType.RAW.value)
elif v['type'] == 'path':
ws.register_path_route(k, v['root_dir'])
else:
raise ValueError(f"Invalid type specified in endpoints dictionary {k}")
@pytest.mark.parametrize('scheme', ('http', 'https'))
def test_authenticate_endpoint(scheme):
kwargs = {}
# Note this is not a context wrapper, it just does the creation for us
vhome = create_volttron_home()
if scheme == 'https':
with certs_profile_1(vhome) as certs:
kwargs['web_ssl_key'] = certs.server_certs[0].key_file
kwargs['web_ssl_cert'] = certs.server_certs[0].cert_file
else:
kwargs['web_secret_key'] = binascii.hexlify(os.urandom(65)).decode('utf-8')
host, port = get_hostname_and_random_port()
kwargs['bind_web_address'] = f"{scheme}://{host}:{port}"
# We are specifying the volttron_home here so we don't create an additional one.
with get_test_volttron_home(messagebus='zmq', config_params=kwargs, volttron_home=vhome):
# add a user so that we can actually log in.
user = 'bogart'
passwd = 'cat'
adminep = AdminEndpoints()
adminep.add_user(user, passwd, groups=['foo', 'read-only'])
expected_claims = dict(groups=['foo', 'read-only'])
with get_platform_web(**kwargs) as mw:
data = urlencode(dict(username=user, password=passwd)).encode('utf-8')
assert len(data) > 0
# test not sending any parameters.
env = get_test_web_env("/authenticate", input_data=data, method='POST')
mocked_start_response, response = get_server_response(env, mw)
assert 3 == len(response.split("."))
claims = mw.get_user_claims(response)
assert claims
assert not DeepDiff(expected_claims, claims)
class MockQuery(object):
"""
The MockQuery object is used to be able to mock the .get() from AsyncResult()
objects.
The constructor takes key:value arguments. The keys should be the arguments passed
to the query method, with the values the return value of the call.
"""
def __init__(self, **kwargs):
"""
Constructs a MockQuery instance and creates key value entries for each
of the kwargs.
:param kwargs:
"""
self._kvargs = {}
for k, v in kwargs.items():
self._kvargs[k] = v
def query(self, key):
"""
Mock for the query function of the volttron.platform.vip.agent.subsystems.query.Query object.
:param key: the key on the server to be returned.
:return: A class with a .get(timeout) function available.
"""
return MockQuery.InnerClass(self._kvargs[key])
class InnerClass(object):
def __init__(self, value):
self.value = value
def get(self, timeout=5):
return self.value
@pytest.mark.parametrize('scheme', ('http', 'https'))
def test_discovery(scheme):
vhome = create_volttron_home()
# creates a vhome level key store
keystore = KeyStore()
serverkey = decode_key(keystore.public)
# Depending upon scheme we enable/disable password jwt and certificate based jwt.
if scheme == 'https':
with certs_profile_1('/'.join([vhome, 'certs'])) as certs:
config_params = dict(web_ssl_key=certs.server_certs[0].key_file,
web_ssl_cert=certs.server_certs[0].cert_file)
else:
config_params = dict(web_secret_key=get_random_key())
with get_test_volttron_home(messagebus='zmq', config_params=config_params):
instance_name = "booballoon"
host, port = get_hostname_and_random_port()
# this is the vip address
address = f"tcp://{host}:{port}"
def _construct_query_mock(core):
"""
Internal function that creates a concrete response for the data.
when query('instance-name').get() is called the passed instance name
is returned
"""
nonlocal instance_name, address
kv = {
"instance-name": instance_name,
"addresses": [address]
}
return MockQuery(**kv)
with mock.patch('volttron.platform.vip.agent.subsystems.query.Query', _construct_query_mock):
host, port = get_hostname_and_random_port()
bind_web_address = f"{scheme}://{host}:{port}"
serverkey = decode_key(keystore.public)
mws = PlatformWebService(serverkey=serverkey, identity=PLATFORM_WEB, address=address,
bind_web_address=bind_web_address, **config_params)
mws.startupagent(sender='testweb')
env = get_test_web_env("/discovery/")
mock_start_response = mock.Mock()
# A closingiterator is returned from the response object so we use the next
# on the returned response. Then we can do json responses.
response = mws.app_routing(env, mock_start_response).__next__()
# load json into a dict for testing responses.
response = jsonapi.loads(response.decode('utf-8'))
assert response.get('instance-name') is not None
assert instance_name == response.get('instance-name')
assert keystore.public == response.get('serverkey')
assert address == response.get('vip-address')
# def test_platformweb_has_discovery():
# web_secret = "my secret key"
#
# def _construct_query_mock(core):
# instance_name = "booballoon"
# kv = {
# "instance-name": instance_name,
# "addresses": []
# }3
# return MockQuery(**kv)
#
# with mock.patch('volttron.platform.vip.agent.subsystems.query.Query', _construct_query_mock):
# with get_platform_web(web_secret_key=web_secret) as mw:
# env = get_test_web_env("/discovery/")
# mocked_start_response, response = get_server_response(env, mw)
#
# assert response
@pytest.mark.web
def test_path_route():
with get_platform_web(web_secret_key="oh my goodnes") as ws:
# Stage 1 create a temp dir and add index.html to that directory
tempdir = tempfile.mkdtemp(prefix="web")
html = """<html><head><title>sweet</title><body>Yay I am here</body></html>"""
index_filepath = f"{tempdir}/myhtml/index.html"
os.makedirs(os.path.dirname(index_filepath))
with open(index_filepath, 'w') as fp:
fp.write(html)
# Stage 2 register the path route and specify the root directory as the
# tempdirectory created above.
interest = {"/myhtml": {"type": "path", "root_dir": tempdir}}
registerd_routes_before = len(ws.registeredroutes)
add_points_of_interest(ws, interest)
assert 1 == len(ws.pathroutes)
assert registerd_routes_before + 1 == len(ws.registeredroutes)
# Stage 3 - emulate a request which will call the app_routing function
#
# We need to update the enf_fixture to what the webserver will normally send
# in to the server. So for this we are going to update the PATH_INFO
#
# since we registered the path /myhtml then this should route to
# <tempdir>/myhtml/index.html for this example.
env_fixture = get_test_web_env('/myhtml/index.html')
mocked_start_response, response = get_server_response(env_fixture, ws)
assert response == html
assert mocked_start_response.call_count == 1
mocked_start_response.reset_mock()
# file not found
env_fixture = get_test_web_env('/myhtml/alpha.jpg')
mocked_start_response, response = get_server_response(env_fixture, ws)
assert response == '<h1>Not Found</h1>'
assert mocked_start_response.call_count == 1
mocked_start_response.reset_mock()
# TODO: redirect to new content, need to set up some more stuff for this one to work
# env_fixture = get_test_web_env('/')
# mocked_start_response, response = get_server_response(env_fixture, ws)
# assert response == '<h1>Not Found</h1>'
# assert mocked_start_response.call_count == 1
# mocked_start_response.reset_mock()
@pytest.mark.web
def test_register_route(platform_web_service: PlatformWebService):
ws = platform_web_service
fn_mock = mock.Mock()
fn_mock.__name__ = "test_register_route"
interest = {'/web': {'type': 'agent_route', 'fn': fn_mock}}
routes_before = len(ws.peerroutes)
registered_routes_before = len(ws.registeredroutes)
# setup the context for the rpc call
ws.vip.rpc.context.vip_message.peer.return_value = "my_agent"
add_points_of_interest(ws, interest)
assert routes_before + 1 == len(ws.peerroutes)
assert registered_routes_before + 1 == len(ws.registeredroutes)
ws.unregister_all_agent_routes()
assert routes_before == len(ws.peerroutes)
assert registered_routes_before == len(ws.registeredroutes)
@pytest.mark.web
def test_register_endpoint(platform_web_service: PlatformWebService):
ws = platform_web_service
fn_mock = mock.Mock()
fn_mock.__name__ = "test_register_endpoint"
interest = {"/battle/one": {'type': 'endpoint'}}
# setup the context for the rpc call
ws.vip.rpc.context.vip_message.peer.return_value = "my_agent"
add_points_of_interest(ws, interest)
assert len(ws.endpoints) == 1
ws.unregister_all_agent_routes()
assert len(ws.endpoints) == 0
|
[] |
[] |
[
"VOLTTRON_HOME"
] |
[]
|
["VOLTTRON_HOME"]
|
python
| 1 | 0 | |
pkg/microservice/reaper/core/service/reaper/reaper.go
|
/*
Copyright 2021 The KodeRover Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reaper
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"time"
"gopkg.in/yaml.v3"
"github.com/koderover/zadig/pkg/microservice/reaper/config"
"github.com/koderover/zadig/pkg/microservice/reaper/core/service/meta"
"github.com/koderover/zadig/pkg/setting"
"github.com/koderover/zadig/pkg/tool/log"
"github.com/koderover/zadig/pkg/types"
"github.com/koderover/zadig/pkg/util/fs"
)
const (
ReadmeScriptFile = "readme_script.sh"
ReadmeFile = "/tmp/README"
)
type Reaper struct {
Ctx *meta.Context
StartTime time.Time
ActiveWorkspace string
UserEnvs map[string]string
Type types.ReaperType
cm CacheManager
}
func NewReaper() (*Reaper, error) {
context, err := ioutil.ReadFile(config.JobConfigFile())
if err != nil {
return nil, fmt.Errorf("read job config file error: %v", err)
}
var ctx *meta.Context
if err := yaml.Unmarshal(context, &ctx); err != nil {
return nil, fmt.Errorf("cannot unmarshal job data: %v", err)
}
ctx.Paths = config.Path()
reaper := &Reaper{
Ctx: ctx,
cm: NewTarCacheManager(ctx.StorageURI, ctx.PipelineName, ctx.ServiceName, ctx.AesKey),
}
if ctx.TestType == "" {
reaper.Type = types.BuildReaperType
} else {
reaper.Type = types.TestReaperType
}
workspace := "/workspace"
if reaper.Ctx.ClassicBuild {
workspace = reaper.Ctx.Workspace
}
err = reaper.EnsureActiveWorkspace(workspace)
if err != nil {
return nil, fmt.Errorf("failed to ensure active workspace `%s`: %s", workspace, err)
}
userEnvs := reaper.getUserEnvs()
reaper.UserEnvs = make(map[string]string, len(userEnvs))
for _, env := range userEnvs {
items := strings.Split(env, "=")
if len(items) != 2 {
continue
}
reaper.UserEnvs[items[0]] = items[1]
}
return reaper, nil
}
func (r *Reaper) GetCacheFile() string {
return filepath.Join(r.Ctx.Workspace, "reaper.tar.gz")
}
func (r *Reaper) CompressCache(storageURI string) error {
cacheDir := r.ActiveWorkspace
if r.Ctx.CacheDirType == types.UserDefinedCacheDir {
// Note: Product supports using environment variables, so we need to parsing the directory path here.
cacheDir = r.renderUserEnv(r.Ctx.CacheUserDir)
}
log.Infof("Data in `%s` will be cached.", cacheDir)
if err := r.cm.Archive(cacheDir, r.GetCacheFile()); err != nil {
return fmt.Errorf("failed to cache %s: %s", cacheDir, err)
}
log.Infof("Succeed to cache %s.", cacheDir)
// remove workspace
err := os.RemoveAll(r.ActiveWorkspace)
if err != nil {
log.Errorf("RemoveAll err:%v", err)
return err
}
return nil
}
func (r *Reaper) DecompressCache() error {
cacheDir := r.ActiveWorkspace
if r.Ctx.CacheDirType == types.UserDefinedCacheDir {
// Note: Product supports using environment variables, so we need to parsing the directory path here.
cacheDir = r.renderUserEnv(r.Ctx.CacheUserDir)
}
err := r.EnsureDir(cacheDir)
if err != nil {
return fmt.Errorf("failed to ensure cache dir `%s`: %s", cacheDir, err)
}
log.Infof("Cache will be decompressed to %s.", cacheDir)
err = r.cm.Unarchive(r.GetCacheFile(), cacheDir)
if err != nil && strings.Contains(err.Error(), "decompression OK") {
// could met decompression OK, trailing garbage ignored
err = nil
}
return err
}
func (r *Reaper) EnsureActiveWorkspace(workspace string) error {
if workspace == "" {
tempWorkspace, err := ioutil.TempDir(os.TempDir(), "reaper")
if err != nil {
return fmt.Errorf("create workspace error: %v", err)
}
r.ActiveWorkspace = tempWorkspace
return os.Chdir(r.ActiveWorkspace)
}
err := os.MkdirAll(workspace, os.ModePerm)
if err != nil {
return fmt.Errorf("failed to create workspace: %v", err)
}
r.ActiveWorkspace = workspace
return os.Chdir(r.ActiveWorkspace)
}
func (r *Reaper) EnsureDir(dir string) error {
return os.MkdirAll(dir, os.ModePerm)
}
func (r *Reaper) BeforeExec() error {
r.StartTime = time.Now()
log.Infof("Checking Docker Connectivity.")
startTimeCheckDocker := time.Now()
for i := 0; i < 15; i++ {
if err := dockerInfo().Run(); err == nil {
break
}
time.Sleep(time.Second * 1)
}
log.Infof("Check ended. Duration: %.2f seconds.", time.Since(startTimeCheckDocker).Seconds())
if r.Ctx.DockerRegistry != nil {
if r.Ctx.DockerRegistry.UserName != "" {
log.Infof("Logining Docker Registry: %s.", r.Ctx.DockerRegistry.Host)
startTimeDockerLogin := time.Now()
cmd := dockerLogin(r.Ctx.DockerRegistry.UserName, r.Ctx.DockerRegistry.Password, r.Ctx.DockerRegistry.Host)
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to login docker registry: %s", err)
}
log.Infof("Login ended. Duration: %.2f seconds.", time.Since(startTimeDockerLogin).Seconds())
}
}
if r.Ctx.CacheEnable && r.Ctx.Cache.MediumType == types.ObjectMedium {
log.Info("Pulling Cache.")
startTimePullCache := time.Now()
if err := r.DecompressCache(); err != nil {
// If the workflow runs for the first time, there may be no cache.
log.Infof("Failed to pull cache: %s. Duration: %.2f seconds.", err, time.Since(startTimePullCache).Seconds())
} else {
log.Infof("Succeed to pull cache. Duration: %.2f seconds.", time.Since(startTimePullCache).Seconds())
}
}
if err := os.MkdirAll(path.Join(os.Getenv("HOME"), "/.ssh"), os.ModePerm); err != nil {
return fmt.Errorf("create ssh folder error: %v", err)
}
if r.Ctx.Archive != nil && len(r.Ctx.Archive.Dir) > 0 {
if err := os.MkdirAll(r.Ctx.Archive.Dir, os.ModePerm); err != nil {
return fmt.Errorf("create DistDir error: %v", err)
}
}
if r.Ctx.Git != nil {
if err := r.Ctx.Git.WriteGithubSSHFile(); err != nil {
return fmt.Errorf("write github ssh file error: %v", err)
}
if err := r.Ctx.Git.WriteGitlabSSHFile(); err != nil {
return fmt.Errorf("write gitlab ssh file error: %v", err)
}
if err := r.Ctx.Git.WriteKnownHostFile(); err != nil {
return fmt.Errorf("write known_host file error: %v", err)
}
if err := r.Ctx.Git.WriteSSHConfigFile(r.Ctx.Proxy); err != nil {
return fmt.Errorf("write ssh config error: %v", err)
}
}
if r.Ctx.GinkgoTest != nil && len(r.Ctx.GinkgoTest.ResultPath) > 0 {
r.Ctx.GinkgoTest.ResultPath = filepath.Join(r.ActiveWorkspace, r.Ctx.GinkgoTest.ResultPath)
if err := os.RemoveAll(r.Ctx.GinkgoTest.ResultPath); err != nil {
log.Warning(err.Error())
}
if err := os.MkdirAll(r.Ctx.GinkgoTest.ResultPath, os.ModePerm); err != nil {
return fmt.Errorf("create test result path error: %v", err)
}
}
return nil
}
func dockerBuildCmd(dockerfile, fullImage, ctx, buildArgs string, ignoreCache bool) *exec.Cmd {
args := []string{"-c"}
dockerCommand := "docker build --rm=true"
if ignoreCache {
dockerCommand += " --no-cache"
}
if buildArgs != "" {
for _, val := range strings.Fields(buildArgs) {
if val != "" {
dockerCommand = dockerCommand + " " + val
}
}
}
dockerCommand = dockerCommand + " -t " + fullImage + " -f " + dockerfile + " " + ctx
args = append(args, dockerCommand)
return exec.Command("sh", args...)
}
func (r *Reaper) setProxy(ctx *meta.DockerBuildCtx, cfg *meta.Proxy) {
if cfg.EnableRepoProxy && cfg.Type == "http" {
if !strings.Contains(strings.ToLower(ctx.BuildArgs), "--build-arg http_proxy=") {
ctx.BuildArgs = fmt.Sprintf("%s --build-arg http_proxy=%s", ctx.BuildArgs, cfg.GetProxyURL())
}
if !strings.Contains(strings.ToLower(ctx.BuildArgs), "--build-arg https_proxy=") {
ctx.BuildArgs = fmt.Sprintf("%s --build-arg https_proxy=%s", ctx.BuildArgs, cfg.GetProxyURL())
}
}
}
func (r *Reaper) dockerCommands() []*exec.Cmd {
cmds := make([]*exec.Cmd, 0)
cmds = append(
cmds,
dockerBuildCmd(
r.Ctx.DockerBuildCtx.GetDockerFile(),
r.Ctx.DockerBuildCtx.ImageName,
r.Ctx.DockerBuildCtx.WorkDir,
r.Ctx.DockerBuildCtx.BuildArgs,
r.Ctx.IgnoreCache,
),
dockerPush(r.Ctx.DockerBuildCtx.ImageName),
)
return cmds
}
func (r *Reaper) runDockerBuild() error {
if r.Ctx.DockerBuildCtx == nil {
return nil
}
log.Info("Preparing Dockerfile.")
startTimePrepareDockerfile := time.Now()
err := r.prepareDockerfile()
if err != nil {
return fmt.Errorf("failed to prepare dockerfile: %s", err)
}
log.Infof("Preparation ended. Duration: %.2f seconds.", time.Since(startTimePrepareDockerfile).Seconds())
if r.Ctx.Proxy != nil {
r.setProxy(r.Ctx.DockerBuildCtx, r.Ctx.Proxy)
}
log.Info("Runing Docker Build.")
startTimeDockerBuild := time.Now()
envs := r.getUserEnvs()
for _, c := range r.dockerCommands() {
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.Dir = r.ActiveWorkspace
c.Env = envs
if err := c.Run(); err != nil {
return fmt.Errorf("failed to run docker build: %s", err)
}
}
log.Infof("Docker build ended. Duration: %.2f seconds.", time.Since(startTimeDockerBuild).Seconds())
return nil
}
func (r *Reaper) prepareDockerfile() error {
if r.Ctx.DockerBuildCtx.Source == setting.DockerfileSourceTemplate {
reader := strings.NewReader(r.Ctx.DockerBuildCtx.DockerTemplateContent)
readCloser := io.NopCloser(reader)
path := fmt.Sprintf("/%s", setting.ZadigDockerfilePath)
err := fs.SaveFile(readCloser, path)
if err != nil {
return err
}
}
return nil
}
func (r *Reaper) Exec() error {
log.Info("Installing Dependency Packages.")
startTimeInstallDeps := time.Now()
if err := r.runIntallationScripts(); err != nil {
return fmt.Errorf("failed to install dependency packages: %s", err)
}
log.Infof("Install ended. Duration: %.2f seconds.", time.Since(startTimeInstallDeps).Seconds())
log.Info("Cloning Repository.")
startTimeCloneRepo := time.Now()
if err := r.runGitCmds(); err != nil {
return fmt.Errorf("failed to clone repository: %s", err)
}
log.Infof("Clone ended. Duration: %.2f seconds.", time.Since(startTimeCloneRepo).Seconds())
if err := r.createReadme(ReadmeFile); err != nil {
log.Warningf("Failed to create README file: %s", err)
}
log.Info("Executing User Build Script.")
startTimeRunBuildScript := time.Now()
if err := r.runScripts(); err != nil {
return fmt.Errorf("failed to execute user build script: %s", err)
}
log.Infof("Execution ended. Duration: %.2f seconds.", time.Since(startTimeRunBuildScript).Seconds())
return r.runDockerBuild()
}
func (r *Reaper) AfterExec() error {
if r.Ctx.GinkgoTest != nil {
resultPath := r.Ctx.GinkgoTest.ResultPath
if resultPath != "" && !strings.HasPrefix(resultPath, "/") {
resultPath = filepath.Join(r.ActiveWorkspace, resultPath)
}
if r.Ctx.TestType == "" {
r.Ctx.TestType = setting.FunctionTest
}
switch r.Ctx.TestType {
case setting.FunctionTest:
err := mergeGinkgoTestResults(r.Ctx.Archive.File, resultPath, r.Ctx.Archive.Dir, r.StartTime)
if err != nil {
return fmt.Errorf("failed to merge test result: %s", err)
}
case setting.PerformanceTest:
err := JmeterTestResults(r.Ctx.Archive.File, resultPath, r.Ctx.Archive.Dir)
if err != nil {
return fmt.Errorf("failed to archive performance test result: %s", err)
}
}
if len(r.Ctx.GinkgoTest.ArtifactPaths) > 0 {
if err := artifactsUpload(r.Ctx, r.ActiveWorkspace, r.Ctx.GinkgoTest.ArtifactPaths); err != nil {
return fmt.Errorf("failed to upload artifacts: %s", err)
}
}
if err := r.archiveTestFiles(); err != nil {
return fmt.Errorf("failed to archive test files: %s", err)
}
if err := r.archiveHTMLTestReportFile(); err != nil {
return fmt.Errorf("failed to archive HTML test report: %s", err)
}
}
if r.Ctx.ArtifactInfo == nil {
if err := r.archiveS3Files(); err != nil {
return fmt.Errorf("failed to archive S3 files: %s", err)
}
if err := r.RunPostScripts(); err != nil {
return fmt.Errorf("failed to run postscripts: %s", err)
}
} else {
if err := r.downloadArtifactFile(); err != nil {
return fmt.Errorf("failed to download artifact files: %s", err)
}
}
if r.Ctx.ArtifactPath != "" {
if err := artifactsUpload(r.Ctx, r.ActiveWorkspace, []string{r.Ctx.ArtifactPath}, "buildv3"); err != nil {
return fmt.Errorf("failed to upload artifacts: %s", err)
}
}
if err := r.RunPMDeployScripts(); err != nil {
return fmt.Errorf("failed to run deploy scripts on physical machine: %s", err)
}
// Upload workspace cache if the user turns on caching and uses object storage.
// Note: Whether the cache is uploaded successfully or not cannot hinder the progress of the overall process,
// so only exceptions are printed here and the process is not interrupted.
if r.Ctx.CacheEnable && r.Ctx.Cache.MediumType == types.ObjectMedium {
log.Info("Uploading Build Cache.")
startTimeUploadBuildCache := time.Now()
if err := r.CompressCache(r.Ctx.StorageURI); err != nil {
log.Warnf("Failed to upload build cache: %s. Duration: %.2f seconds.", err, time.Since(startTimeUploadBuildCache).Seconds())
} else {
log.Infof("Upload ended. Duration: %.2f seconds.", time.Since(startTimeUploadBuildCache).Seconds())
}
}
return nil
}
func (r *Reaper) maskSecret(secrets []string, message string) string {
out := message
for _, val := range secrets {
if len(val) == 0 {
continue
}
out = strings.Replace(out, val, "********", -1)
}
return out
}
const (
secretEnvMask = "********"
)
func (r *Reaper) maskSecretEnvs(message string) string {
out := message
for _, val := range r.Ctx.SecretEnvs {
if len(val) == 0 {
continue
}
sl := strings.Split(val, "=")
if len(sl) != 2 {
continue
}
if len(sl[0]) == 0 || len(sl[1]) == 0 {
// invalid key value pair received
continue
}
out = strings.Replace(out, strings.Join(sl[1:], "="), secretEnvMask, -1)
}
return out
}
func (r *Reaper) getUserEnvs() []string {
envs := []string{
"CI=true",
"ZADIG=true",
fmt.Sprintf("HOME=%s", config.Home()),
fmt.Sprintf("WORKSPACE=%s", r.ActiveWorkspace),
// TODO: readme文件可以使用别的方式代替
fmt.Sprintf("README=%s", ReadmeFile),
}
r.Ctx.Paths = strings.Replace(r.Ctx.Paths, "$HOME", config.Home(), -1)
envs = append(envs, fmt.Sprintf("PATH=%s", r.Ctx.Paths))
envs = append(envs, fmt.Sprintf("DOCKER_HOST=%s", config.DockerHost()))
envs = append(envs, r.Ctx.Envs...)
envs = append(envs, r.Ctx.SecretEnvs...)
return envs
}
func (r *Reaper) renderUserEnv(raw string) string {
mapper := func(env string) string {
return r.UserEnvs[env]
}
return os.Expand(raw, mapper)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
internal/mysql/mysql_init.go
|
package mysql
import (
"fmt"
"os"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
)
func InitMysql() (*gorm.DB, error) {
user := os.Getenv("MYSQL_USER")
password := os.Getenv("MYSQL_PASSWORD")
host := os.Getenv("MYSQL_HOST")
port := os.Getenv("MYSQL_PORT")
name := os.Getenv("MYSQL_NAME")
uri := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=True&loc=Local",
user, password, host, port, name)
db, err := gorm.Open("mysql", uri)
if err != nil {
return nil, fmt.Errorf("failed to connect mysql server %s: %w", uri, err)
}
db.SingularTable(true)
return db, nil
}
|
[
"\"MYSQL_USER\"",
"\"MYSQL_PASSWORD\"",
"\"MYSQL_HOST\"",
"\"MYSQL_PORT\"",
"\"MYSQL_NAME\""
] |
[] |
[
"MYSQL_PASSWORD",
"MYSQL_USER",
"MYSQL_PORT",
"MYSQL_NAME",
"MYSQL_HOST"
] |
[]
|
["MYSQL_PASSWORD", "MYSQL_USER", "MYSQL_PORT", "MYSQL_NAME", "MYSQL_HOST"]
|
go
| 5 | 0 | |
test/nosplit.go
|
// +build !nacl
// run
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
)
var tests = `
# These are test cases for the linker analysis that detects chains of
# nosplit functions that would cause a stack overflow.
#
# Lines beginning with # are comments.
#
# Each test case describes a sequence of functions, one per line.
# Each function definition is the function name, then the frame size,
# then optionally the keyword 'nosplit', then the body of the function.
# The body is assembly code, with some shorthands.
# The shorthand 'call x' stands for CALL x(SB).
# The shorthand 'callind' stands for 'CALL R0', where R0 is a register.
# Each test case must define a function named main, and it must be first.
# That is, a line beginning "main " indicates the start of a new test case.
# Within a stanza, ; can be used instead of \n to separate lines.
#
# After the function definition, the test case ends with an optional
# REJECT line, specifying the architectures on which the case should
# be rejected. "REJECT" without any architectures means reject on all architectures.
# The linker should accept the test case on systems not explicitly rejected.
#
# 64-bit systems do not attempt to execute test cases with frame sizes
# that are only 32-bit aligned.
# Ordinary function should work
main 0
# Large frame marked nosplit is always wrong.
main 10000 nosplit
REJECT
# Calling a large frame is okay.
main 0 call big
big 10000
# But not if the frame is nosplit.
main 0 call big
big 10000 nosplit
REJECT
# Recursion is okay.
main 0 call main
# Recursive nosplit runs out of space.
main 0 nosplit call main
REJECT
# Chains of ordinary functions okay.
main 0 call f1
f1 80 call f2
f2 80
# Chains of nosplit must fit in the stack limit, 128 bytes.
main 0 call f1
f1 80 nosplit call f2
f2 80 nosplit
REJECT
# Larger chains.
main 0 call f1
f1 16 call f2
f2 16 call f3
f3 16 call f4
f4 16 call f5
f5 16 call f6
f6 16 call f7
f7 16 call f8
f8 16 call end
end 1000
main 0 call f1
f1 16 nosplit call f2
f2 16 nosplit call f3
f3 16 nosplit call f4
f4 16 nosplit call f5
f5 16 nosplit call f6
f6 16 nosplit call f7
f7 16 nosplit call f8
f8 16 nosplit call end
end 1000
REJECT
# Test cases near the 128-byte limit.
# Ordinary stack split frame is always okay.
main 112
main 116
main 120
main 124
main 128
main 132
main 136
# A nosplit leaf can use the whole 128-CallSize bytes available on entry.
# (CallSize is 32 on ppc64)
main 96 nosplit
main 100 nosplit; REJECT ppc64 ppc64le
main 104 nosplit; REJECT ppc64 ppc64le
main 108 nosplit; REJECT ppc64 ppc64le
main 112 nosplit; REJECT ppc64 ppc64le
main 116 nosplit; REJECT ppc64 ppc64le
main 120 nosplit; REJECT ppc64 ppc64le
main 124 nosplit; REJECT ppc64 ppc64le
main 128 nosplit; REJECT
main 132 nosplit; REJECT
main 136 nosplit; REJECT
# Calling a nosplit function from a nosplit function requires
# having room for the saved caller PC and the called frame.
# Because ARM doesn't save LR in the leaf, it gets an extra 4 bytes.
# Because arm64 doesn't save LR in the leaf, it gets an extra 8 bytes.
# ppc64 doesn't save LR in the leaf, but CallSize is 32, so it gets 24 fewer bytes than amd64.
main 96 nosplit call f; f 0 nosplit
main 100 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
main 104 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
main 108 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
main 112 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
main 116 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
main 120 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64
main 124 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 386
main 128 nosplit call f; f 0 nosplit; REJECT
main 132 nosplit call f; f 0 nosplit; REJECT
main 136 nosplit call f; f 0 nosplit; REJECT
# Calling a splitting function from a nosplit function requires
# having room for the saved caller PC of the call but also the
# saved caller PC for the call to morestack.
# RISC architectures differ in the same way as before.
main 96 nosplit call f; f 0 call f
main 100 nosplit call f; f 0 call f; REJECT ppc64 ppc64le
main 104 nosplit call f; f 0 call f; REJECT ppc64 ppc64le
main 108 nosplit call f; f 0 call f; REJECT ppc64 ppc64le
main 112 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64
main 116 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64
main 120 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 386
main 124 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 386
main 128 nosplit call f; f 0 call f; REJECT
main 132 nosplit call f; f 0 call f; REJECT
main 136 nosplit call f; f 0 call f; REJECT
# Indirect calls are assumed to be splitting functions.
main 96 nosplit callind
main 100 nosplit callind; REJECT ppc64 ppc64le
main 104 nosplit callind; REJECT ppc64 ppc64le
main 108 nosplit callind; REJECT ppc64 ppc64le
main 112 nosplit callind; REJECT ppc64 ppc64le amd64
main 116 nosplit callind; REJECT ppc64 ppc64le amd64
main 120 nosplit callind; REJECT ppc64 ppc64le amd64 386
main 124 nosplit callind; REJECT ppc64 ppc64le amd64 386
main 128 nosplit callind; REJECT
main 132 nosplit callind; REJECT
main 136 nosplit callind; REJECT
# Issue 7623
main 0 call f; f 112
main 0 call f; f 116
main 0 call f; f 120
main 0 call f; f 124
main 0 call f; f 128
main 0 call f; f 132
main 0 call f; f 136
`
var (
commentRE = regexp.MustCompile(`(?m)^#.*`)
rejectRE = regexp.MustCompile(`(?s)\A(.+?)((\n|; *)REJECT(.*))?\z`)
lineRE = regexp.MustCompile(`(\w+) (\d+)( nosplit)?(.*)`)
callRE = regexp.MustCompile(`\bcall (\w+)\b`)
callindRE = regexp.MustCompile(`\bcallind\b`)
)
func main() {
goarch := os.Getenv("GOARCH")
if goarch == "" {
goarch = runtime.GOARCH
}
version, err := exec.Command("go", "tool", "compile", "-V").Output()
if err != nil {
bug()
fmt.Printf("running go tool compile -V: %v\n", err)
return
}
if strings.Contains(string(version), "framepointer") {
// Skip this test if GOEXPERIMENT=framepointer
return
}
dir, err := ioutil.TempDir("", "go-test-nosplit")
if err != nil {
bug()
fmt.Printf("creating temp dir: %v\n", err)
return
}
defer os.RemoveAll(dir)
tests = strings.Replace(tests, "\t", " ", -1)
tests = commentRE.ReplaceAllString(tests, "")
nok := 0
nfail := 0
TestCases:
for len(tests) > 0 {
var stanza string
i := strings.Index(tests, "\nmain ")
if i < 0 {
stanza, tests = tests, ""
} else {
stanza, tests = tests[:i], tests[i+1:]
}
m := rejectRE.FindStringSubmatch(stanza)
if m == nil {
bug()
fmt.Printf("invalid stanza:\n\t%s\n", indent(stanza))
continue
}
lines := strings.TrimSpace(m[1])
reject := false
if m[2] != "" {
if strings.TrimSpace(m[4]) == "" {
reject = true
} else {
for _, rej := range strings.Fields(m[4]) {
if rej == goarch {
reject = true
}
}
}
}
if lines == "" && !reject {
continue
}
var gobuf bytes.Buffer
fmt.Fprintf(&gobuf, "package main\n")
var buf bytes.Buffer
ptrSize := 4
switch goarch {
case "mips64", "mips64le":
ptrSize = 8
fmt.Fprintf(&buf, "#define CALL JAL\n#define REGISTER (R0)\n")
case "ppc64", "ppc64le":
ptrSize = 8
fmt.Fprintf(&buf, "#define CALL BL\n#define REGISTER (CTR)\n")
case "arm":
fmt.Fprintf(&buf, "#define CALL BL\n#define REGISTER (R0)\n")
case "arm64":
ptrSize = 8
fmt.Fprintf(&buf, "#define CALL BL\n#define REGISTER (R0)\n")
case "amd64":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER AX\n")
default:
fmt.Fprintf(&buf, "#define REGISTER AX\n")
}
for _, line := range strings.Split(lines, "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
for i, subline := range strings.Split(line, ";") {
subline = strings.TrimSpace(subline)
if subline == "" {
continue
}
m := lineRE.FindStringSubmatch(subline)
if m == nil {
bug()
fmt.Printf("invalid function line: %s\n", subline)
continue TestCases
}
name := m[1]
size, _ := strconv.Atoi(m[2])
// The limit was originally 128 but is now 592.
// Instead of rewriting the test cases above, adjust
// the first stack frame to use up the extra bytes.
if i == 0 {
size += (720 - 128) - 128
// Noopt builds have a larger stackguard.
// See ../src/cmd/dist/buildruntime.go:stackGuardMultiplier
// This increase is included in obj.StackGuard
for _, s := range strings.Split(os.Getenv("GO_GCFLAGS"), " ") {
if s == "-N" {
size += 720
}
}
}
if size%ptrSize == 4 || goarch == "arm64" && size != 0 && (size+8)%16 != 0 {
continue TestCases
}
nosplit := m[3]
body := m[4]
if nosplit != "" {
nosplit = ",7"
} else {
nosplit = ",0"
}
body = callRE.ReplaceAllString(body, "CALL ·$1(SB);")
body = callindRE.ReplaceAllString(body, "CALL REGISTER;")
fmt.Fprintf(&gobuf, "func %s()\n", name)
fmt.Fprintf(&buf, "TEXT ·%s(SB)%s,$%d-0\n\t%s\n\tRET\n\n", name, nosplit, size, body)
}
}
if err := ioutil.WriteFile(filepath.Join(dir, "asm.s"), buf.Bytes(), 0666); err != nil {
log.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(dir, "main.go"), gobuf.Bytes(), 0666); err != nil {
log.Fatal(err)
}
cmd := exec.Command("go", "build")
cmd.Dir = dir
output, err := cmd.CombinedOutput()
if err == nil {
nok++
if reject {
bug()
fmt.Printf("accepted incorrectly:\n\t%s\n", indent(strings.TrimSpace(stanza)))
}
} else {
nfail++
if !reject {
bug()
fmt.Printf("rejected incorrectly:\n\t%s\n", indent(strings.TrimSpace(stanza)))
fmt.Printf("\n\tlinker output:\n\t%s\n", indent(string(output)))
}
}
}
if !bugged && (nok == 0 || nfail == 0) {
bug()
fmt.Printf("not enough test cases run\n")
}
}
func indent(s string) string {
return strings.Replace(s, "\n", "\n\t", -1)
}
var bugged = false
func bug() {
if !bugged {
bugged = true
fmt.Printf("BUG\n")
}
}
|
[
"\"GOARCH\"",
"\"GO_GCFLAGS\""
] |
[] |
[
"GOARCH",
"GO_GCFLAGS"
] |
[]
|
["GOARCH", "GO_GCFLAGS"]
|
go
| 2 | 0 | |
workflow/controller/operator.go
|
package controller
import (
"encoding/json"
"fmt"
"math"
"os"
"reflect"
"regexp"
"runtime/debug"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/argoproj/pkg/humanize"
argokubeerr "github.com/argoproj/pkg/kube/errors"
"github.com/argoproj/pkg/strftime"
jsonpatch "github.com/evanphx/json-patch"
log "github.com/sirupsen/logrus"
"github.com/valyala/fasttemplate"
apiv1 "k8s.io/api/core/v1"
policyv1beta "k8s.io/api/policy/v1beta1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/utils/pointer"
"sigs.k8s.io/yaml"
"github.com/argoproj/argo/config"
"github.com/argoproj/argo/errors"
"github.com/argoproj/argo/pkg/apis/workflow"
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1"
"github.com/argoproj/argo/util"
errorsutil "github.com/argoproj/argo/util/errors"
"github.com/argoproj/argo/util/intstr"
"github.com/argoproj/argo/util/resource"
"github.com/argoproj/argo/util/retry"
"github.com/argoproj/argo/workflow/common"
controllercache "github.com/argoproj/argo/workflow/controller/cache"
"github.com/argoproj/argo/workflow/controller/estimation"
"github.com/argoproj/argo/workflow/controller/indexes"
"github.com/argoproj/argo/workflow/metrics"
"github.com/argoproj/argo/workflow/progress"
argosync "github.com/argoproj/argo/workflow/sync"
"github.com/argoproj/argo/workflow/templateresolution"
wfutil "github.com/argoproj/argo/workflow/util"
"github.com/argoproj/argo/workflow/validate"
)
// wfOperationCtx is the context for evaluation and operation of a single workflow
type wfOperationCtx struct {
// wf is the workflow object. It should not be used in execution logic. woc.wfSpec should be used instead
wf *wfv1.Workflow
// orig is the original workflow object for purposes of creating a patch
orig *wfv1.Workflow
// updated indicates whether or not the workflow object itself was updated
// and needs to be persisted back to kubernetes
updated bool
// log is an logrus logging context to corralate logs with a workflow
log *log.Entry
// controller reference to workflow controller
controller *WorkflowController
// estimate duration
estimator estimation.Estimator
// globalParams holds any parameters that are available to be referenced
// in the global scope (e.g. workflow.parameters.XXX).
globalParams common.Parameters
// volumes holds a DeepCopy of wf.Spec.Volumes to perform substitutions.
// It is then used in addVolumeReferences() when creating a pod.
volumes []apiv1.Volume
// ArtifactRepository contains the default location of an artifact repository for container artifacts
artifactRepository *config.ArtifactRepository
// map of pods which need to be labeled with completed=true
completedPods map[string]bool
// map of pods which is identified as succeeded=true
succeededPods map[string]bool
// deadline is the dealine time in which this operation should relinquish
// its hold on the workflow so that an operation does not run for too long
// and starve other workqueue items. It also enables workflow progress to
// be periodically synced to the database.
deadline time.Time
// activePods tracks the number of active (Running/Pending) pods for controlling
// parallelism
activePods int64
// workflowDeadline is the deadline which the workflow is expected to complete before we
// terminate the workflow.
workflowDeadline *time.Time
eventRecorder record.EventRecorder
// preExecutionNodePhases contains the phases of all the nodes before the current operation. Necessary to infer
// changes in phase for metric emission
preExecutionNodePhases map[string]wfv1.NodePhase
// execWf holds the Workflow for use in execution.
// In Normal workflow scenario: It holds copy of workflow object
// In Submit From WorkflowTemplate: It holds merged workflow with WorkflowDefault, Workflow and WorkflowTemplate
// 'execWf.Spec' should usually be used instead `wf.Spec`, with two exceptions for user editable fields:
// 1. `wf.Spec.Suspend`
// 2. `wf.Spec.Shutdown`
execWf *wfv1.Workflow
}
var (
// ErrDeadlineExceeded indicates the operation exceeded its deadline for execution
ErrDeadlineExceeded = errors.New(errors.CodeTimeout, "Deadline exceeded")
// ErrParallelismReached indicates this workflow reached its parallelism limit
ErrParallelismReached = errors.New(errors.CodeForbidden, "Max parallelism reached")
// ErrTimeout indicates a specific template timed out
ErrTimeout = errors.New(errors.CodeTimeout, "timeout")
)
// maxOperationTime is the maximum time a workflow operation is allowed to run
// for before requeuing the workflow onto the workqueue.
const maxOperationTime = 10 * time.Second
const defaultRequeueTime = maxOperationTime
// failedNodeStatus is a subset of NodeStatus that is only used to Marshal certain fields into a JSON of failed nodes
type failedNodeStatus struct {
DisplayName string `json:"displayName"`
Message string `json:"message"`
TemplateName string `json:"templateName"`
Phase string `json:"phase"`
PodName string `json:"podName"`
FinishedAt metav1.Time `json:"finishedAt"`
}
// newWorkflowOperationCtx creates and initializes a new wfOperationCtx object.
func newWorkflowOperationCtx(wf *wfv1.Workflow, wfc *WorkflowController) *wfOperationCtx {
// NEVER modify objects from the store. It's a read-only, local cache.
// You can use DeepCopy() to make a deep copy of original object and modify this copy
// Or create a copy manually for better performance
wfCopy := wf.DeepCopyObject().(*wfv1.Workflow)
woc := wfOperationCtx{
wf: wfCopy,
orig: wf,
execWf: wfCopy,
updated: false,
log: log.WithFields(log.Fields{
"workflow": wf.ObjectMeta.Name,
"namespace": wf.ObjectMeta.Namespace,
}),
controller: wfc,
globalParams: make(map[string]string),
volumes: wf.Spec.DeepCopy().Volumes,
artifactRepository: &wfc.Config.ArtifactRepository,
completedPods: make(map[string]bool),
succeededPods: make(map[string]bool),
deadline: time.Now().UTC().Add(maxOperationTime),
eventRecorder: wfc.eventRecorderManager.Get(wf.Namespace),
preExecutionNodePhases: make(map[string]wfv1.NodePhase),
}
if woc.wf.Status.Nodes == nil {
woc.wf.Status.Nodes = make(map[string]wfv1.NodeStatus)
}
if woc.wf.Status.StoredTemplates == nil {
woc.wf.Status.StoredTemplates = make(map[string]wfv1.Template)
}
return &woc
}
// operate is the main operator logic of a workflow. It evaluates the current state of the workflow,
// and its pods and decides how to proceed down the execution path.
// TODO: an error returned by this method should result in requeuing the workflow to be retried at a
// later time
// As you must not call `persistUpdates` twice, you must not call `operate` twice.
func (woc *wfOperationCtx) operate() {
defer func() {
if woc.wf.Status.Fulfilled() {
_ = woc.killDaemonedChildren("")
}
woc.persistUpdates()
}()
defer func() {
if r := recover(); r != nil {
woc.log.WithFields(log.Fields{"stack": string(debug.Stack()), "r": r}).Errorf("Recovered from panic")
if rerr, ok := r.(error); ok {
woc.markWorkflowError(rerr, true)
} else {
woc.markWorkflowPhase(wfv1.NodeError, true, fmt.Sprintf("%v", r))
}
woc.controller.metrics.OperationPanic()
}
}()
woc.log.Infof("Processing workflow")
// Load the WorkflowSpec for execution
execTmplRef, execArgs, err := woc.loadExecutionSpec()
if err != nil {
woc.log.WithError(err).Errorf("Unable to get Workflow Template Reference for workflow")
woc.markWorkflowError(err, true)
return
}
// Update workflow duration variable
woc.globalParams[common.GlobalVarWorkflowDuration] = fmt.Sprintf("%f", time.Since(woc.wf.Status.StartedAt.Time).Seconds())
// Populate the phase of all the nodes prior to execution
for _, node := range woc.wf.Status.Nodes {
woc.preExecutionNodePhases[node.ID] = node.Phase
}
woc.setGlobalParameters(execArgs)
// Perform one-time workflow validation
if woc.wf.Status.Phase == "" {
woc.markWorkflowRunning()
err := woc.createPDBResource()
if err != nil {
msg := fmt.Sprintf("Unable to create PDB resource for workflow, %s error: %s", woc.wf.Name, err)
woc.markWorkflowFailed(msg)
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowFailed", msg)
return
}
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeNormal, "WorkflowRunning", "Workflow Running")
validateOpts := validate.ValidateOpts{ContainerRuntimeExecutor: woc.controller.GetContainerRuntimeExecutor()}
wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(woc.controller.wfclientset.ArgoprojV1alpha1().WorkflowTemplates(woc.wf.Namespace))
cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(woc.controller.wfclientset.ArgoprojV1alpha1().ClusterWorkflowTemplates())
// Validate the execution wfSpec
wfConditions, err := validate.ValidateWorkflow(wftmplGetter, cwftmplGetter, woc.wf, validateOpts)
if err != nil {
msg := fmt.Sprintf("invalid spec: %s", err.Error())
woc.markWorkflowFailed(msg)
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowFailed", msg)
return
}
// If we received conditions during validation (such as SpecWarnings), add them to the Workflow object
if len(*wfConditions) > 0 {
woc.wf.Status.Conditions.JoinConditions(wfConditions)
woc.updated = true
}
woc.workflowDeadline = woc.getWorkflowDeadline()
// Workflow will not be requeued if workflow steps are in pending state.
// Workflow needs to requeue on its deadline,
if woc.workflowDeadline != nil {
woc.requeue(time.Until(*woc.workflowDeadline))
}
if woc.execWf.Spec.Metrics != nil {
realTimeScope := map[string]func() float64{common.GlobalVarWorkflowDuration: func() float64 {
return time.Since(woc.wf.Status.StartedAt.Time).Seconds()
}}
woc.computeMetrics(woc.execWf.Spec.Metrics.Prometheus, woc.globalParams, realTimeScope, true)
}
woc.wf.Status.EstimatedDuration = woc.estimateWorkflowDuration()
} else {
woc.workflowDeadline = woc.getWorkflowDeadline()
err := woc.podReconciliation()
if err == nil {
err = woc.failSuspendedAndPendingNodesAfterDeadlineOrShutdown()
}
if err != nil {
woc.log.WithError(err).WithField("workflow", woc.wf.ObjectMeta.Name).Error("workflow timeout")
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowTimedOut", "Workflow timed out")
// TODO: we need to re-add to the workqueue, but should happen in caller
return
}
}
if woc.wf.Spec.Suspend != nil && *woc.wf.Spec.Suspend {
woc.log.Infof("workflow suspended")
return
}
if woc.execWf.Spec.Parallelism != nil {
woc.activePods = woc.countActivePods()
}
// Create a starting template context.
tmplCtx, err := woc.createTemplateContext(wfv1.ResourceScopeLocal, "")
if err != nil {
woc.log.WithError(err).Error("Failed to create a template context")
woc.markWorkflowError(err, true)
return
}
if woc.execWf.Spec.ArtifactRepositoryRef != nil {
repo, err := woc.getArtifactRepositoryByRef(woc.execWf.Spec.ArtifactRepositoryRef)
if err == nil {
woc.artifactRepository = repo
} else {
msg := fmt.Sprintf("Failed to load artifact repository configMap: %+v", err)
woc.log.Errorf(msg)
woc.markWorkflowError(err, true)
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowFailed", msg)
return
}
}
err = woc.substituteParamsInVolumes(woc.globalParams)
if err != nil {
woc.log.WithError(err).Error("volumes global param substitution error")
woc.markWorkflowError(err, true)
return
}
err = woc.createPVCs()
if err != nil {
if errorsutil.IsTransientErr(err) {
// Error was most likely caused by a lack of resources.
// In this case, Workflow will be in pending state and requeue.
woc.markWorkflowPhase(wfv1.NodePending, false, fmt.Sprintf("Waiting for a PVC to be created. %v", err))
woc.requeue(defaultRequeueTime)
return
}
msg := "pvc create error"
woc.log.WithError(err).Error(msg)
woc.markWorkflowError(err, true)
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowFailed", fmt.Sprintf("%s %s: %+v", woc.wf.ObjectMeta.Name, msg, err))
return
} else if woc.wf.Status.Phase == wfv1.NodePending {
// Workflow might be in pending state if previous PVC creation is forbidden
woc.markWorkflowRunning()
}
node, err := woc.executeTemplate(woc.wf.ObjectMeta.Name, execTmplRef, tmplCtx, execArgs, &executeTemplateOpts{})
if err != nil {
// the error are handled in the callee so just log it.
msg := "error in entry template execution"
woc.log.WithError(err).Error(msg)
msg = fmt.Sprintf("%s %s: %+v", woc.wf.Name, msg, err)
switch err {
case ErrDeadlineExceeded:
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowTimedOut", msg)
default:
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowFailed", msg)
}
return
}
if node == nil || !node.Fulfilled() {
// node can be nil if a workflow created immediately in a parallelism == 0 state
return
}
workflowStatus := node.Phase
var onExitNode *wfv1.NodeStatus
if woc.execWf.Spec.OnExit != "" && woc.wf.Spec.Shutdown.ShouldExecute(true) {
if workflowStatus == wfv1.NodeSkipped {
// treat skipped the same as Succeeded for workflow.status
woc.globalParams[common.GlobalVarWorkflowStatus] = string(wfv1.NodeSucceeded)
} else {
woc.globalParams[common.GlobalVarWorkflowStatus] = string(workflowStatus)
}
var failures []failedNodeStatus
for _, node := range woc.wf.Status.Nodes {
if node.Phase == wfv1.NodeFailed || node.Phase == wfv1.NodeError {
failures = append(failures,
failedNodeStatus{
DisplayName: node.DisplayName,
Message: node.Message,
TemplateName: node.TemplateName,
Phase: string(node.Phase),
PodName: node.ID,
FinishedAt: node.FinishedAt,
})
}
}
failedNodeBytes, err := json.Marshal(failures)
if err != nil {
woc.log.Errorf("Error marshalling failed nodes list: %+v", err)
// No need to return here
}
// This strconv.Quote is necessary so that the escaped quotes are not removed during parameter substitution
woc.globalParams[common.GlobalVarWorkflowFailures] = strconv.Quote(string(failedNodeBytes))
woc.log.Infof("Running OnExit handler: %s", woc.execWf.Spec.OnExit)
onExitNodeName := common.GenerateOnExitNodeName(woc.wf.ObjectMeta.Name)
onExitNode, err = woc.executeTemplate(onExitNodeName, &wfv1.WorkflowStep{Template: woc.execWf.Spec.OnExit}, tmplCtx, execArgs, &executeTemplateOpts{onExitTemplate: true})
if err != nil {
// the error are handled in the callee so just log it.
woc.log.WithError(err).Error("error in exit template execution")
return
}
if onExitNode == nil || !onExitNode.Fulfilled() {
return
}
}
var workflowMessage string
if node.FailedOrError() && woc.execWf.Spec.Shutdown != "" {
workflowMessage = fmt.Sprintf("Stopped with strategy '%s'", woc.execWf.Spec.Shutdown)
} else {
workflowMessage = node.Message
}
// If we get here, the workflow completed, all PVCs were deleted successfully, and
// exit handlers were executed. We now need to infer the workflow phase from the
// node phase.
switch workflowStatus {
case wfv1.NodeSucceeded, wfv1.NodeSkipped:
if onExitNode != nil && onExitNode.FailedOrError() {
// if main workflow succeeded, but the exit node was unsuccessful
// the workflow is now considered unsuccessful.
woc.markWorkflowPhase(onExitNode.Phase, true, onExitNode.Message)
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowFailed", onExitNode.Message)
} else {
woc.markWorkflowSuccess()
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeNormal, "WorkflowSucceeded", "Workflow completed")
}
case wfv1.NodeFailed:
woc.markWorkflowFailed(workflowMessage)
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowFailed", workflowMessage)
case wfv1.NodeError:
woc.markWorkflowPhase(wfv1.NodeError, true, workflowMessage)
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowFailed", workflowMessage)
default:
// NOTE: we should never make it here because if the node was 'Running' we should have
// returned earlier.
err = errors.InternalErrorf("Unexpected node phase %s: %+v", woc.wf.ObjectMeta.Name, err)
woc.markWorkflowError(err, true)
}
if woc.execWf.Spec.Metrics != nil {
realTimeScope := map[string]func() float64{common.GlobalVarWorkflowDuration: func() float64 {
return node.FinishedAt.Sub(node.StartedAt.Time).Seconds()
}}
woc.globalParams[common.GlobalVarWorkflowStatus] = string(workflowStatus)
woc.computeMetrics(woc.execWf.Spec.Metrics.Prometheus, woc.globalParams, realTimeScope, false)
}
err = woc.deletePVCs()
if err != nil {
woc.log.WithError(err).Warn("failed to delete PVCs")
}
}
func (woc *wfOperationCtx) getWorkflowDeadline() *time.Time {
if woc.execWf.Spec.ActiveDeadlineSeconds == nil {
return nil
}
if woc.wf.Status.StartedAt.IsZero() {
return nil
}
startedAt := woc.wf.Status.StartedAt.Truncate(time.Second)
deadline := startedAt.Add(time.Duration(*woc.execWf.Spec.ActiveDeadlineSeconds) * time.Second).UTC()
return &deadline
}
// setGlobalParameters sets the globalParam map with global parameters
func (woc *wfOperationCtx) setGlobalParameters(executionParameters wfv1.Arguments) {
woc.globalParams[common.GlobalVarWorkflowName] = woc.wf.ObjectMeta.Name
woc.globalParams[common.GlobalVarWorkflowNamespace] = woc.wf.ObjectMeta.Namespace
woc.globalParams[common.GlobalVarWorkflowServiceAccountName] = woc.execWf.Spec.ServiceAccountName
woc.globalParams[common.GlobalVarWorkflowUID] = string(woc.wf.ObjectMeta.UID)
woc.globalParams[common.GlobalVarWorkflowCreationTimestamp] = woc.wf.ObjectMeta.CreationTimestamp.Format(time.RFC3339)
if woc.execWf.Spec.Priority != nil {
woc.globalParams[common.GlobalVarWorkflowPriority] = strconv.Itoa(int(*woc.execWf.Spec.Priority))
}
for char := range strftime.FormatChars {
cTimeVar := fmt.Sprintf("%s.%s", common.GlobalVarWorkflowCreationTimestamp, string(char))
woc.globalParams[cTimeVar] = strftime.Format("%"+string(char), woc.wf.ObjectMeta.CreationTimestamp.Time)
}
if workflowParameters, err := json.Marshal(woc.execWf.Spec.Arguments.Parameters); err == nil {
woc.globalParams[common.GlobalVarWorkflowParameters] = string(workflowParameters)
}
for _, param := range executionParameters.Parameters {
woc.globalParams["workflow.parameters."+param.Name] = param.Value.String()
}
for k, v := range woc.wf.ObjectMeta.Annotations {
woc.globalParams["workflow.annotations."+k] = v
}
for k, v := range woc.wf.ObjectMeta.Labels {
woc.globalParams["workflow.labels."+k] = v
}
if woc.wf.Status.Outputs != nil {
for _, param := range woc.wf.Status.Outputs.Parameters {
woc.globalParams["workflow.outputs.parameters."+param.Name] = param.Value.String()
}
}
}
// persistUpdates will update a workflow with any updates made during workflow operation.
// It also labels any pods as completed if we have extracted everything we need from it.
// NOTE: a previous implementation used Patch instead of Update, but Patch does not work with
// the fake CRD clientset which makes unit testing extremely difficult.
func (woc *wfOperationCtx) persistUpdates() {
if !woc.updated {
return
}
resource.UpdateResourceDurations(woc.wf)
progress.UpdateProgress(woc.wf)
// You MUST not call `persistUpdates` twice.
// * Fails the `reapplyUpdate` cannot work unless resource versions are different.
// * It will double the number of Kubernetes API requests.
if woc.orig.ResourceVersion != woc.wf.ResourceVersion {
woc.log.Panic("cannot persist updates with mismatched resource versions")
}
wfClient := woc.controller.wfclientset.ArgoprojV1alpha1().Workflows(woc.wf.ObjectMeta.Namespace)
// try and compress nodes if needed
nodes := woc.wf.Status.Nodes
err := woc.controller.hydrator.Dehydrate(woc.wf)
if err != nil {
woc.log.Warnf("Failed to dehydrate: %v", err)
woc.markWorkflowError(err, true)
}
// Release all acquired lock for completed workflow
if woc.wf.Status.Synchronization != nil && woc.wf.Status.Fulfilled() {
if woc.controller.syncManager.ReleaseAll(woc.wf) {
log.WithFields(log.Fields{"key": woc.wf.Name}).Info("Released all acquired locks")
}
}
wf, err := wfClient.Update(woc.wf)
if err != nil {
woc.log.Warnf("Error updating workflow: %v %s", err, apierr.ReasonForError(err))
if argokubeerr.IsRequestEntityTooLargeErr(err) {
woc.persistWorkflowSizeLimitErr(wfClient, err)
return
}
if !apierr.IsConflict(err) {
return
}
woc.log.Info("Re-applying updates on latest version and retrying update")
wf, err := woc.reapplyUpdate(wfClient, nodes)
if err != nil {
woc.log.Infof("Failed to re-apply update: %+v", err)
return
}
woc.wf = wf
} else {
woc.wf = wf
woc.controller.hydrator.HydrateWithNodes(woc.wf, nodes)
}
if !woc.controller.hydrator.IsHydrated(woc.wf) {
panic("workflow should be hydrated")
}
woc.log.WithFields(log.Fields{"resourceVersion": woc.wf.ResourceVersion, "phase": woc.wf.Status.Phase}).Info("Workflow update successful")
if os.Getenv("INFORMER_WRITE_BACK") != "false" {
if err := woc.writeBackToInformer(); err != nil {
woc.markWorkflowError(err, true)
return
}
} else {
time.Sleep(enoughTimeForInformerSync)
}
// It is important that we *never* label pods as completed until we successfully updated the workflow
// Failing to do so means we can have inconsistent state.
// TODO: The completedPods will be labeled multiple times. I think it would be improved in the future.
// Send succeeded pods or completed pods to gcPods channel to delete it later depend on the PodGCStrategy.
// Notice we do not need to label the pod if we will delete it later for GC. Otherwise, that may even result in
// errors if we label a pod that was deleted already.
if woc.execWf.Spec.PodGC != nil {
switch woc.execWf.Spec.PodGC.Strategy {
case wfv1.PodGCOnPodSuccess:
for podName := range woc.succeededPods {
woc.controller.gcPods <- fmt.Sprintf("%s/%s", woc.wf.ObjectMeta.Namespace, podName)
}
case wfv1.PodGCOnPodCompletion:
for podName := range woc.completedPods {
woc.controller.gcPods <- fmt.Sprintf("%s/%s", woc.wf.ObjectMeta.Namespace, podName)
}
}
} else {
// label pods which will not be deleted
for podName := range woc.completedPods {
woc.controller.completedPods <- fmt.Sprintf("%s/%s", woc.wf.ObjectMeta.Namespace, podName)
}
}
}
func (woc *wfOperationCtx) writeBackToInformer() error {
un, err := wfutil.ToUnstructured(woc.wf)
if err != nil {
return fmt.Errorf("failed to convert workflow to unstructured: %w", err)
}
err = woc.controller.wfInformer.GetStore().Update(un)
if err != nil {
return fmt.Errorf("failed to update informer store: %w", err)
}
return nil
}
// persistWorkflowSizeLimitErr will fail a the workflow with an error when we hit the resource size limit
// See https://github.com/argoproj/argo/issues/913
func (woc *wfOperationCtx) persistWorkflowSizeLimitErr(wfClient v1alpha1.WorkflowInterface, err error) {
woc.wf = woc.orig.DeepCopy()
woc.markWorkflowError(err, true)
_, err = wfClient.Update(woc.wf)
if err != nil {
woc.log.Warnf("Error updating workflow with size error: %v", err)
}
}
// reapplyUpdate GETs the latest version of the workflow, re-applies the updates and
// retries the UPDATE multiple times. For reasoning behind this technique, see:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency
func (woc *wfOperationCtx) reapplyUpdate(wfClient v1alpha1.WorkflowInterface, nodes wfv1.Nodes) (*wfv1.Workflow, error) {
// if this condition is true, then this func will always error
if woc.orig.ResourceVersion != woc.wf.ResourceVersion {
woc.log.Panic("cannot re-apply update with mismatched resource versions")
}
err := woc.controller.hydrator.Hydrate(woc.orig)
if err != nil {
return nil, err
}
// First generate the patch
oldData, err := json.Marshal(woc.orig)
if err != nil {
return nil, err
}
woc.controller.hydrator.HydrateWithNodes(woc.wf, nodes)
newData, err := json.Marshal(woc.wf)
if err != nil {
return nil, err
}
patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData)
if err != nil {
return nil, err
}
// Next get latest version of the workflow, apply the patch and retry the update
attempt := 1
for {
currWf, err := wfClient.Get(woc.wf.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
err = woc.controller.hydrator.Hydrate(currWf)
if err != nil {
return nil, err
}
currWfBytes, err := json.Marshal(currWf)
if err != nil {
return nil, err
}
newWfBytes, err := jsonpatch.MergePatch(currWfBytes, patchBytes)
if err != nil {
return nil, err
}
var newWf wfv1.Workflow
err = json.Unmarshal(newWfBytes, &newWf)
if err != nil {
return nil, err
}
err = woc.controller.hydrator.Dehydrate(&newWf)
if err != nil {
return nil, err
}
wf, err := wfClient.Update(&newWf)
if err == nil {
woc.log.Infof("Update retry attempt %d successful", attempt)
woc.controller.hydrator.HydrateWithNodes(wf, nodes)
return wf, nil
}
attempt++
woc.log.Warnf("Update retry attempt %d failed: %v", attempt, err)
if attempt > 5 {
return nil, err
}
}
}
// requeue this workflow onto the workqueue for later processing
func (woc *wfOperationCtx) requeue(afterDuration time.Duration) {
key, err := cache.MetaNamespaceKeyFunc(woc.wf)
if err != nil {
woc.log.Errorf("Failed to requeue workflow %s: %v", woc.wf.ObjectMeta.Name, err)
return
}
woc.controller.wfQueue.AddAfter(key, afterDuration)
}
// processNodeRetries updates the retry node state based on the child node state and the retry strategy and returns the node.
func (woc *wfOperationCtx) processNodeRetries(node *wfv1.NodeStatus, retryStrategy wfv1.RetryStrategy, opts *executeTemplateOpts) (*wfv1.NodeStatus, bool, error) {
if node.Fulfilled() {
return node, true, nil
}
lastChildNode := getChildNodeIndex(node, woc.wf.Status.Nodes, -1)
if lastChildNode == nil {
return node, true, nil
}
if !lastChildNode.Fulfilled() {
// last child node is still running.
return node, true, nil
}
if !lastChildNode.FailedOrError() {
node.Outputs = lastChildNode.Outputs.DeepCopy()
woc.wf.Status.Nodes[node.ID] = *node
return woc.markNodePhase(node.Name, wfv1.NodeSucceeded), true, nil
}
if woc.execWf.Spec.Shutdown != "" || (woc.workflowDeadline != nil && time.Now().UTC().After(*woc.workflowDeadline)) {
var message string
if woc.execWf.Spec.Shutdown != "" {
message = fmt.Sprintf("Stopped with strategy '%s'", woc.execWf.Spec.Shutdown)
} else {
message = fmt.Sprintf("retry exceeded workflow deadline %s", *woc.workflowDeadline)
}
woc.log.Infoln(message)
return woc.markNodePhase(node.Name, lastChildNode.Phase, message), true, nil
}
if retryStrategy.Backoff != nil {
maxDurationDeadline := time.Time{}
// Process max duration limit
if retryStrategy.Backoff.MaxDuration != "" && len(node.Children) > 0 {
maxDuration, err := parseStringToDuration(retryStrategy.Backoff.MaxDuration)
if err != nil {
return nil, false, err
}
firstChildNode := getChildNodeIndex(node, woc.wf.Status.Nodes, 0)
maxDurationDeadline = firstChildNode.StartedAt.Add(maxDuration)
if time.Now().After(maxDurationDeadline) {
woc.log.Infoln("Max duration limit exceeded. Failing...")
return woc.markNodePhase(node.Name, lastChildNode.Phase, "Max duration limit exceeded"), true, nil
}
}
// Max duration limit hasn't been exceeded, process back off
if retryStrategy.Backoff.Duration == "" {
return nil, false, fmt.Errorf("no base duration specified for retryStrategy")
}
baseDuration, err := parseStringToDuration(retryStrategy.Backoff.Duration)
if err != nil {
return nil, false, err
}
timeToWait := baseDuration
retryStrategyBackoffFactor, err := intstr.Int32(retryStrategy.Backoff.Factor)
if err != nil {
return nil, false, err
}
if retryStrategyBackoffFactor != nil && *retryStrategyBackoffFactor > 0 {
// Formula: timeToWait = duration * factor^retry_number
// Note that timeToWait should equal to duration for the first retry attempt.
timeToWait = baseDuration * time.Duration(math.Pow(float64(*retryStrategyBackoffFactor), float64(len(node.Children)-1)))
}
waitingDeadline := lastChildNode.FinishedAt.Add(timeToWait)
// If the waiting deadline is after the max duration deadline, then it's futile to wait until then. Stop early
if !maxDurationDeadline.IsZero() && waitingDeadline.After(maxDurationDeadline) {
woc.log.Infoln("Backoff would exceed max duration limit. Failing...")
return woc.markNodePhase(node.Name, lastChildNode.Phase, "Backoff would exceed max duration limit"), true, nil
}
// See if we have waited past the deadline
if time.Now().Before(waitingDeadline) {
woc.requeue(timeToWait)
retryMessage := fmt.Sprintf("Backoff for %s", humanize.Duration(timeToWait))
return woc.markNodePhase(node.Name, node.Phase, retryMessage), false, nil
}
woc.log.WithField("node", node.Name).Infof("node has maxDuration set, setting executionDeadline to: %s", humanize.Timestamp(maxDurationDeadline))
opts.executionDeadline = maxDurationDeadline
node = woc.markNodePhase(node.Name, node.Phase, "")
}
var retryOnFailed bool
var retryOnError bool
switch retryStrategy.RetryPolicy {
case wfv1.RetryPolicyAlways:
retryOnFailed = true
retryOnError = true
case wfv1.RetryPolicyOnError:
retryOnFailed = false
retryOnError = true
case wfv1.RetryPolicyOnFailure, "":
retryOnFailed = true
retryOnError = false
default:
return nil, false, fmt.Errorf("%s is not a valid RetryPolicy", retryStrategy.RetryPolicy)
}
if (lastChildNode.Phase == wfv1.NodeFailed && !retryOnFailed) || (lastChildNode.Phase == wfv1.NodeError && !retryOnError) {
woc.log.Infof("Node not set to be retried after status: %s", lastChildNode.Phase)
return woc.markNodePhase(node.Name, lastChildNode.Phase, lastChildNode.Message), true, nil
}
if !lastChildNode.CanRetry() {
woc.log.Infof("Node cannot be retried. Marking it failed")
return woc.markNodePhase(node.Name, lastChildNode.Phase, lastChildNode.Message), true, nil
}
limit, err := intstr.Int32(retryStrategy.Limit)
if err != nil {
return nil, false, err
}
if retryStrategy.Limit != nil && limit != nil && int32(len(node.Children)) > *limit {
woc.log.Infoln("No more retries left. Failing...")
return woc.markNodePhase(node.Name, lastChildNode.Phase, "No more retries left"), true, nil
}
woc.log.Infof("%d child nodes of %s failed. Trying again...", len(node.Children), node.Name)
return node, true, nil
}
// podReconciliation is the process by which a workflow will examine all its related
// pods and update the node state before continuing the evaluation of the workflow.
// Records all pods which were observed completed, which will be labeled completed=true
// after successful persist of the workflow.
func (woc *wfOperationCtx) podReconciliation() error {
podList, err := woc.getAllWorkflowPods()
if err != nil {
return err
}
seenPods := make(map[string]*apiv1.Pod)
seenPodLock := &sync.Mutex{}
wfNodesLock := &sync.RWMutex{}
performAssessment := func(pod *apiv1.Pod) {
if pod == nil {
return
}
nodeNameForPod := pod.Annotations[common.AnnotationKeyNodeName]
nodeID := woc.wf.NodeID(nodeNameForPod)
seenPodLock.Lock()
seenPods[nodeID] = pod
seenPodLock.Unlock()
wfNodesLock.Lock()
defer wfNodesLock.Unlock()
if node, ok := woc.wf.Status.Nodes[nodeID]; ok {
if newState := woc.assessNodeStatus(pod, &node); newState != nil {
woc.wf.Status.Nodes[nodeID] = *newState
woc.addOutputsToGlobalScope(node.Outputs)
if node.MemoizationStatus != nil {
c := woc.controller.cacheFactory.GetCache(controllercache.ConfigMapCache, node.MemoizationStatus.CacheName)
err := c.Save(node.MemoizationStatus.Key, node.ID, node.Outputs)
if err != nil {
woc.log.WithFields(log.Fields{"nodeID": node.ID}).WithError(err).Error("Failed to save node outputs to cache")
node.Phase = wfv1.NodeError
}
}
woc.updated = true
}
node := woc.wf.Status.Nodes[pod.ObjectMeta.Name]
if node.Fulfilled() && !node.IsDaemoned() {
if tmpVal, tmpOk := pod.Labels[common.LabelKeyCompleted]; tmpOk {
if tmpVal == "true" {
return
}
}
woc.completedPods[pod.ObjectMeta.Name] = true
if woc.shouldPrintPodSpec(node) {
printPodSpecLog(pod, woc.wf.Name)
}
if !woc.orig.Status.Nodes[node.ID].Fulfilled() {
woc.onNodeComplete(&node)
}
}
if node.Succeeded() {
woc.succeededPods[pod.ObjectMeta.Name] = true
}
}
}
parallelPodNum := make(chan string, 500)
var wg sync.WaitGroup
for _, pod := range podList {
parallelPodNum <- pod.Name
wg.Add(1)
go func(pod *apiv1.Pod) {
defer wg.Done()
performAssessment(pod)
err = woc.applyExecutionControl(pod, wfNodesLock)
if err != nil {
woc.log.Warnf("Failed to apply execution control to pod %s", pod.Name)
}
<-parallelPodNum
}(pod)
}
wg.Wait()
// Now check for deleted pods. Iterate our nodes. If any one of our nodes does not show up in
// the seen list it implies that the pod was deleted without the controller seeing the event.
// It is now impossible to infer pod status. We can do at this point is to mark the node with Error, or
// we can re-submit it.
for nodeID, node := range woc.wf.Status.Nodes {
if node.Type != wfv1.NodeTypePod || node.Fulfilled() || node.StartedAt.IsZero() {
// node is not a pod, it is already complete, or it can be re-run.
continue
}
if _, ok := seenPods[nodeID]; !ok {
// If the node is pending and the pod does not exist, it could be the case that we want to try to submit it
// again instead of marking it as an error. Check if that's the case.
if node.Pending() {
continue
}
node.Message = "pod deleted"
node.Phase = wfv1.NodeError
// FinishedAt must be set since retry strategy depends on it to determine the backoff duration.
// See processNodeRetries for more details.
node.FinishedAt = metav1.Time{Time: time.Now().UTC()}
woc.wf.Status.Nodes[nodeID] = node
woc.log.WithField("displayName", node.DisplayName).WithField("templateName", node.TemplateName).
WithField("node", node.Name).Error("Pod for node deleted")
woc.updated = true
} else {
// At this point we are certain that the pod associated with our node is running or has been run;
// it is safe to extract the k8s-node information given this knowledge.
if node.HostNodeName != seenPods[nodeID].Spec.NodeName {
node.HostNodeName = seenPods[nodeID].Spec.NodeName
woc.wf.Status.Nodes[nodeID] = node
woc.updated = true
}
}
}
return nil
}
// shouldPrintPodSpec return eligible to print to the pod spec
func (woc *wfOperationCtx) shouldPrintPodSpec(node wfv1.NodeStatus) bool {
return woc.controller.Config.PodSpecLogStrategy.AllPods ||
(woc.controller.Config.PodSpecLogStrategy.FailedPod && node.FailedOrError())
}
//fails any suspended and pending nodes if the workflow deadline has passed
func (woc *wfOperationCtx) failSuspendedAndPendingNodesAfterDeadlineOrShutdown() error {
deadlineExceeded := woc.workflowDeadline != nil && time.Now().UTC().After(*woc.workflowDeadline)
if woc.execWf.Spec.Shutdown != "" || deadlineExceeded {
for _, node := range woc.wf.Status.Nodes {
if node.IsActiveSuspendNode() || (node.Phase == wfv1.NodePending && deadlineExceeded) {
var message string
if woc.execWf.Spec.Shutdown != "" {
message = fmt.Sprintf("Stopped with strategy '%s'", woc.execWf.Spec.Shutdown)
} else {
message = "Step exceeded its deadline"
}
woc.markNodePhase(node.Name, wfv1.NodeFailed, message)
}
}
}
return nil
}
// countActivePods counts the number of active (Pending/Running) pods.
// Optionally restricts it to a template invocation (boundaryID)
func (woc *wfOperationCtx) countActivePods(boundaryIDs ...string) int64 {
var boundaryID = ""
if len(boundaryIDs) > 0 {
boundaryID = boundaryIDs[0]
}
var activePods int64
// if we care about parallelism, count the active pods at the template level
for _, node := range woc.wf.Status.Nodes {
if node.Type != wfv1.NodeTypePod {
continue
}
if boundaryID != "" && node.BoundaryID != boundaryID {
continue
}
switch node.Phase {
case wfv1.NodePending, wfv1.NodeRunning:
if node.SynchronizationStatus != nil && node.SynchronizationStatus.Waiting != "" {
// Do not include pending nodes that are waiting for a lock
continue
}
activePods++
}
}
return activePods
}
// countActiveChildren counts the number of active (Pending/Running) children nodes of parent parentName
func (woc *wfOperationCtx) countActiveChildren(boundaryIDs ...string) int64 {
var boundaryID = ""
if len(boundaryIDs) > 0 {
boundaryID = boundaryIDs[0]
}
var activeChildren int64
// if we care about parallelism, count the active pods at the template level
for _, node := range woc.wf.Status.Nodes {
if boundaryID != "" && node.BoundaryID != boundaryID {
continue
}
switch node.Type {
case wfv1.NodeTypePod, wfv1.NodeTypeSteps, wfv1.NodeTypeDAG:
default:
continue
}
switch node.Phase {
case wfv1.NodePending, wfv1.NodeRunning:
activeChildren++
}
}
return activeChildren
}
// getAllWorkflowPods returns all pods related to the current workflow
func (woc *wfOperationCtx) getAllWorkflowPods() ([]*apiv1.Pod, error) {
objs, err := woc.controller.podInformer.GetIndexer().ByIndex(indexes.WorkflowIndex, indexes.WorkflowIndexValue(woc.wf.Namespace, woc.wf.Name))
if err != nil {
return nil, err
}
pods := make([]*apiv1.Pod, len(objs))
for i, obj := range objs {
pod, ok := obj.(*apiv1.Pod)
if !ok {
return nil, fmt.Errorf("expected \"*apiv1.Pod\", got \"%v\"", reflect.TypeOf(obj).String())
}
pods[i] = pod
}
return pods, nil
}
func printPodSpecLog(pod *apiv1.Pod, wfName string) {
podSpecByte, err := json.Marshal(pod)
if err != nil {
log.WithField("workflow", wfName).WithField("nodename", pod.Name).WithField("namespace", pod.Namespace).Warnf("Unable to mashal pod spec. %v", err)
}
log.WithField("workflow", wfName).WithField("nodename", pod.Name).WithField("namespace", pod.Namespace).Infof("Pod Spec: %s", string(podSpecByte))
}
// assessNodeStatus compares the current state of a pod with its corresponding node
// and returns the new node status if something changed
func (woc *wfOperationCtx) assessNodeStatus(pod *apiv1.Pod, node *wfv1.NodeStatus) *wfv1.NodeStatus {
var newPhase wfv1.NodePhase
var newDaemonStatus *bool
var message string
updated := false
switch pod.Status.Phase {
case apiv1.PodPending:
newPhase = wfv1.NodePending
newDaemonStatus = pointer.BoolPtr(false)
message = getPendingReason(pod)
case apiv1.PodSucceeded:
newPhase = wfv1.NodeSucceeded
newDaemonStatus = pointer.BoolPtr(false)
case apiv1.PodFailed:
// ignore pod failure for daemoned steps
if node.IsDaemoned() {
newPhase = wfv1.NodeSucceeded
} else {
newPhase, message = inferFailedReason(pod)
woc.log.WithField("displayName", node.DisplayName).WithField("templateName", node.TemplateName).
WithField("pod", pod.Name).Infof("Pod failed")
}
newDaemonStatus = pointer.BoolPtr(false)
case apiv1.PodRunning:
if pod.DeletionTimestamp != nil {
// pod is being terminated
newPhase = wfv1.NodeError
message = "pod deleted during operation"
woc.log.WithField("displayName", node.DisplayName).WithField("templateName", node.TemplateName).
WithField("pod", pod.Name).Error(message)
} else {
newPhase = wfv1.NodeRunning
tmplStr, ok := pod.Annotations[common.AnnotationKeyTemplate]
if !ok {
log.WithField("pod", pod.ObjectMeta.Name).Warn("missing template annotation")
return nil
}
var tmpl wfv1.Template
err := json.Unmarshal([]byte(tmplStr), &tmpl)
if err != nil {
log.WithError(err).WithField("pod", pod.ObjectMeta.Name).Warn("template annotation unreadable")
return nil
}
if tmpl.Daemon != nil && *tmpl.Daemon {
// pod is running and template is marked daemon. check if everything is ready
for _, ctrStatus := range pod.Status.ContainerStatuses {
if !ctrStatus.Ready {
return nil
}
}
// proceed to mark node status as running (and daemoned)
newPhase = wfv1.NodeRunning
newDaemonStatus = pointer.BoolPtr(true)
log.Infof("Processing ready daemon pod: %v", pod.ObjectMeta.SelfLink)
}
}
default:
newPhase = wfv1.NodeError
message = fmt.Sprintf("Unexpected pod phase for %s: %s", pod.ObjectMeta.Name, pod.Status.Phase)
woc.log.WithField("displayName", node.DisplayName).WithField("templateName", node.TemplateName).
WithField("pod", pod.Name).Error(message)
}
if newDaemonStatus != nil {
if !*newDaemonStatus {
// if the daemon status switched to false, we prefer to just unset daemoned status field
// (as opposed to setting it to false)
newDaemonStatus = nil
}
if (newDaemonStatus != nil && node.Daemoned == nil) || (newDaemonStatus == nil && node.Daemoned != nil) {
log.Infof("Setting node %v daemoned: %v -> %v", node.ID, node.Daemoned, newDaemonStatus)
node.Daemoned = newDaemonStatus
updated = true
if pod.Status.PodIP != "" && pod.Status.PodIP != node.PodIP {
// only update Pod IP for daemoned nodes to reduce number of updates
log.Infof("Updating daemon node %s IP %s -> %s", node.ID, node.PodIP, pod.Status.PodIP)
node.PodIP = pod.Status.PodIP
}
}
}
outputStr, ok := pod.Annotations[common.AnnotationKeyOutputs]
if ok && node.Outputs == nil {
updated = true
log.Infof("Setting node %v outputs", node.ID)
var outputs wfv1.Outputs
err := json.Unmarshal([]byte(outputStr), &outputs)
if err != nil {
woc.log.WithField("displayName", node.DisplayName).WithField("templateName", node.TemplateName).
WithField("pod", pod.Name).Errorf("Failed to unmarshal %s outputs from pod annotation: %v", pod.Name, err)
node.Phase = wfv1.NodeError
} else {
node.Outputs = &outputs
}
}
if node.Phase != newPhase {
log.Infof("Updating node %s status %s -> %s", node.ID, node.Phase, newPhase)
// if we are transitioning from Pending to a different state, clear out pending message
if node.Phase == wfv1.NodePending {
node.Message = ""
}
updated = true
node.Phase = newPhase
}
if message != "" && node.Message != message {
log.Infof("Updating node %s message: %s", node.ID, message)
updated = true
node.Message = message
}
if node.Fulfilled() && node.FinishedAt.IsZero() {
updated = true
if !node.IsDaemoned() {
node.FinishedAt = getLatestFinishedAt(pod)
}
if node.FinishedAt.IsZero() {
// If we get here, the container is daemoned so the
// finishedAt might not have been set.
node.FinishedAt = metav1.Time{Time: time.Now().UTC()}
}
node.ResourcesDuration = resource.DurationForPod(pod)
}
if updated {
return node
}
return nil
}
// getLatestFinishedAt returns the latest finishAt timestamp from all the
// containers of this pod.
func getLatestFinishedAt(pod *apiv1.Pod) metav1.Time {
var latest metav1.Time
for _, ctr := range pod.Status.InitContainerStatuses {
if ctr.State.Terminated != nil && ctr.State.Terminated.FinishedAt.After(latest.Time) {
latest = ctr.State.Terminated.FinishedAt
}
}
for _, ctr := range pod.Status.ContainerStatuses {
if ctr.State.Terminated != nil && ctr.State.Terminated.FinishedAt.After(latest.Time) {
latest = ctr.State.Terminated.FinishedAt
}
}
return latest
}
func getPendingReason(pod *apiv1.Pod) string {
for _, ctrStatus := range pod.Status.ContainerStatuses {
if ctrStatus.State.Waiting != nil {
if ctrStatus.State.Waiting.Message != "" {
return fmt.Sprintf("%s: %s", ctrStatus.State.Waiting.Reason, ctrStatus.State.Waiting.Message)
}
return ctrStatus.State.Waiting.Reason
}
}
// Example:
// - lastProbeTime: null
// lastTransitionTime: 2018-08-29T06:38:36Z
// message: '0/3 nodes are available: 2 Insufficient cpu, 3 MatchNodeSelector.'
// reason: Unschedulable
// status: "False"
// type: PodScheduled
for _, cond := range pod.Status.Conditions {
if cond.Reason == apiv1.PodReasonUnschedulable {
if cond.Message != "" {
return fmt.Sprintf("%s: %s", cond.Reason, cond.Message)
}
return cond.Reason
}
}
return ""
}
// inferFailedReason returns metadata about a Failed pod to be used in its NodeStatus
// Returns a tuple of the new phase and message
func inferFailedReason(pod *apiv1.Pod) (wfv1.NodePhase, string) {
if pod.Status.Message != "" {
// Pod has a nice error message. Use that.
return wfv1.NodeFailed, pod.Status.Message
}
annotatedMsg := pod.Annotations[common.AnnotationKeyNodeMessage]
// We only get one message to set for the overall node status.
// If multiple containers failed, in order of preference:
// init, main (annotated), main (exit code), wait, sidecars
for _, ctr := range pod.Status.InitContainerStatuses {
// Virtual Kubelet environment will not set the terminate on waiting container
// https://github.com/argoproj/argo/issues/3879
// https://github.com/virtual-kubelet/virtual-kubelet/blob/7f2a02291530d2df14905702e6d51500dd57640a/node/sync.go#L195-L208
if ctr.State.Waiting != nil {
return wfv1.NodeError, fmt.Sprintf("Pod failed before %s container starts", ctr.Name)
}
if ctr.State.Terminated == nil {
// We should never get here
log.Warnf("Pod %s phase was Failed but %s did not have terminated state", pod.ObjectMeta.Name, ctr.Name)
continue
}
if ctr.State.Terminated.ExitCode == 0 {
continue
}
errMsg := "failed to load artifacts"
for _, msg := range []string{annotatedMsg, ctr.State.Terminated.Message} {
if msg != "" {
errMsg += ": " + msg
break
}
}
// NOTE: we consider artifact load issues as Error instead of Failed
return wfv1.NodeError, errMsg
}
failMessages := make(map[string]string)
for _, ctr := range pod.Status.ContainerStatuses {
// Virtual Kubelet environment will not set the terminate on waiting container
// https://github.com/argoproj/argo/issues/3879
// https://github.com/virtual-kubelet/virtual-kubelet/blob/7f2a02291530d2df14905702e6d51500dd57640a/node/sync.go#L195-L208
if ctr.State.Waiting != nil {
return wfv1.NodeError, fmt.Sprintf("Pod failed before %s container starts", ctr.Name)
}
if ctr.State.Terminated == nil {
// We should never get here
log.Warnf("Pod %s phase was Failed but %s did not have terminated state", pod.ObjectMeta.Name, ctr.Name)
continue
}
if ctr.State.Terminated.ExitCode == 0 {
continue
}
if ctr.Name == common.WaitContainerName {
errDetails := ""
for _, msg := range []string{annotatedMsg, ctr.State.Terminated.Message} {
if msg != "" {
errDetails = msg
break
}
}
if errDetails == "" {
// executor is expected to annotate a message to the pod upon any errors.
// If we failed to see the annotated message, it is likely the pod ran with
// insufficient privileges. Give a hint to that effect.
errDetails = fmt.Sprintf("verify serviceaccount %s:%s has necessary privileges", pod.ObjectMeta.Namespace, pod.Spec.ServiceAccountName)
}
errMsg := fmt.Sprintf("failed to save outputs: %s", errDetails)
failMessages[ctr.Name] = errMsg
continue
}
if ctr.State.Terminated.Message != "" {
errMsg := ctr.State.Terminated.Message
if ctr.Name != common.MainContainerName {
errMsg = fmt.Sprintf("sidecar '%s' %s", ctr.Name, errMsg)
}
failMessages[ctr.Name] = errMsg
continue
}
if ctr.State.Terminated.Reason == "OOMKilled" {
failMessages[ctr.Name] = ctr.State.Terminated.Reason
continue
}
errMsg := fmt.Sprintf("failed with exit code %d", ctr.State.Terminated.ExitCode)
if ctr.Name != common.MainContainerName {
if ctr.State.Terminated.ExitCode == 137 || ctr.State.Terminated.ExitCode == 143 {
// if the sidecar was SIGKILL'd (exit code 137) assume it was because argoexec
// forcibly killed the container, which we ignore the error for.
// Java code 143 is a normal exit 128 + 15 https://github.com/elastic/elasticsearch/issues/31847
log.Infof("Ignoring %d exit code of sidecar '%s'", ctr.State.Terminated.ExitCode, ctr.Name)
continue
}
errMsg = fmt.Sprintf("sidecar '%s' %s", ctr.Name, errMsg)
}
failMessages[ctr.Name] = errMsg
}
if failMsg, ok := failMessages[common.MainContainerName]; ok {
_, ok = failMessages[common.WaitContainerName]
isResourceTemplate := !ok
if isResourceTemplate && annotatedMsg != "" {
// For resource templates, we prefer the annotated message
// over the vanilla exit code 1 error
return wfv1.NodeFailed, annotatedMsg
}
return wfv1.NodeFailed, failMsg
}
if failMsg, ok := failMessages[common.WaitContainerName]; ok {
return wfv1.NodeError, failMsg
}
// If we get here, both the main and wait container succeeded. Iterate the fail messages to
// identify the sidecar which failed and return the message.
for _, failMsg := range failMessages {
return wfv1.NodeFailed, failMsg
}
// If we get here, we have detected that the main/wait containers succeed but the sidecar(s)
// were SIGKILL'd. The executor may have had to forcefully terminate the sidecar (kill -9),
// resulting in a 137 exit code (which we had ignored earlier). If failMessages is empty, it
// indicates that this is the case and we return Success instead of Failure.
return wfv1.NodeSucceeded, ""
}
func (woc *wfOperationCtx) createPVCs() error {
if !(woc.wf.Status.Phase == wfv1.NodePending || woc.wf.Status.Phase == wfv1.NodeRunning) {
// Only attempt to create PVCs if workflow is in Pending or Running state
// (e.g. passed validation, or didn't already complete)
return nil
}
if len(woc.execWf.Spec.VolumeClaimTemplates) == len(woc.wf.Status.PersistentVolumeClaims) {
// If we have already created the PVCs, then there is nothing to do.
// This will also handle the case where workflow has no volumeClaimTemplates.
return nil
}
pvcClient := woc.controller.kubeclientset.CoreV1().PersistentVolumeClaims(woc.wf.ObjectMeta.Namespace)
for i, pvcTmpl := range woc.execWf.Spec.VolumeClaimTemplates {
if pvcTmpl.ObjectMeta.Name == "" {
return errors.Errorf(errors.CodeBadRequest, "volumeClaimTemplates[%d].metadata.name is required", i)
}
pvcTmpl = *pvcTmpl.DeepCopy()
// PVC name will be <workflowname>-<volumeclaimtemplatename>
refName := pvcTmpl.ObjectMeta.Name
pvcName := fmt.Sprintf("%s-%s", woc.wf.ObjectMeta.Name, pvcTmpl.ObjectMeta.Name)
woc.log.Infof("Creating pvc %s", pvcName)
pvcTmpl.ObjectMeta.Name = pvcName
if pvcTmpl.ObjectMeta.Labels == nil {
pvcTmpl.ObjectMeta.Labels = make(map[string]string)
}
pvcTmpl.ObjectMeta.Labels[common.LabelKeyWorkflow] = woc.wf.ObjectMeta.Name
pvcTmpl.OwnerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(woc.wf, wfv1.SchemeGroupVersion.WithKind(workflow.WorkflowKind)),
}
pvc, err := pvcClient.Create(&pvcTmpl)
if err != nil && apierr.IsAlreadyExists(err) {
woc.log.WithField("pvc", pvcTmpl.Name).Info("pvc already exists. Workflow is re-using it")
pvc, err = pvcClient.Get(pvcTmpl.Name, metav1.GetOptions{})
if err != nil {
return err
}
hasOwnerReference := false
for i := range pvc.OwnerReferences {
ownerRef := pvc.OwnerReferences[i]
if ownerRef.UID == woc.wf.UID {
hasOwnerReference = true
break
}
}
if !hasOwnerReference {
return errors.Errorf(errors.CodeForbidden, "%s pvc already exists with different ownerreference", pvcTmpl.Name)
}
}
//continue
if err != nil {
return err
}
vol := apiv1.Volume{
Name: refName,
VolumeSource: apiv1.VolumeSource{
PersistentVolumeClaim: &apiv1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.ObjectMeta.Name,
},
},
}
woc.wf.Status.PersistentVolumeClaims = append(woc.wf.Status.PersistentVolumeClaims, vol)
woc.updated = true
}
return nil
}
func (woc *wfOperationCtx) deletePVCs() error {
gcStrategy := woc.wf.Spec.GetVolumeClaimGC().GetStrategy()
switch gcStrategy {
case wfv1.VolumeClaimGCOnSuccess:
if woc.wf.Status.Phase == wfv1.NodeError || woc.wf.Status.Phase == wfv1.NodeFailed {
// Skip deleting PVCs to reuse them for retried failed/error workflows.
// PVCs are automatically deleted when corresponded owner workflows get deleted.
return nil
}
case wfv1.VolumeClaimGCOnCompletion:
default:
return fmt.Errorf("unknown volume gc strategy: %s", gcStrategy)
}
totalPVCs := len(woc.wf.Status.PersistentVolumeClaims)
if totalPVCs == 0 {
// PVC list already empty. nothing to do
return nil
}
pvcClient := woc.controller.kubeclientset.CoreV1().PersistentVolumeClaims(woc.wf.ObjectMeta.Namespace)
newPVClist := make([]apiv1.Volume, 0)
// Attempt to delete all PVCs. Record first error encountered
var firstErr error
for _, pvc := range woc.wf.Status.PersistentVolumeClaims {
woc.log.Infof("Deleting PVC %s", pvc.PersistentVolumeClaim.ClaimName)
err := pvcClient.Delete(pvc.PersistentVolumeClaim.ClaimName, nil)
if err != nil {
if !apierr.IsNotFound(err) {
woc.log.Errorf("Failed to delete pvc %s: %v", pvc.PersistentVolumeClaim.ClaimName, err)
newPVClist = append(newPVClist, pvc)
if firstErr == nil {
firstErr = err
}
}
}
}
if len(newPVClist) != totalPVCs {
// we were successful in deleting one ore more PVCs
woc.log.Infof("Deleted %d/%d PVCs", totalPVCs-len(newPVClist), totalPVCs)
woc.wf.Status.PersistentVolumeClaims = newPVClist
woc.updated = true
}
return firstErr
}
func getChildNodeIndex(node *wfv1.NodeStatus, nodes wfv1.Nodes, index int) *wfv1.NodeStatus {
if len(node.Children) <= 0 {
return nil
}
nodeIndex := index
if index < 0 {
nodeIndex = len(node.Children) + index // This actually subtracts, since index is negative
if nodeIndex < 0 {
panic(fmt.Sprintf("child index '%d' out of bounds", index))
}
}
lastChildNodeName := node.Children[nodeIndex]
lastChildNode, ok := nodes[lastChildNodeName]
if !ok {
panic("could not find child node")
}
return &lastChildNode
}
type executeTemplateOpts struct {
// boundaryID is an ID for node grouping
boundaryID string
// onExitTemplate signifies that executeTemplate was called as part of an onExit handler.
// Necessary for graceful shutdowns
onExitTemplate bool
// activeDeadlineSeconds is a deadline to set to any pods executed. This is necessary for pods to inherit backoff.maxDuration
executionDeadline time.Time
}
// executeTemplate executes the template with the given arguments and returns the created NodeStatus
// for the created node (if created). Nodes may not be created if parallelism or deadline exceeded.
// nodeName is the name to be used as the name of the node, and boundaryID indicates which template
// boundary this node belongs to.
func (woc *wfOperationCtx) executeTemplate(nodeName string, orgTmpl wfv1.TemplateReferenceHolder, tmplCtx *templateresolution.Context, args wfv1.Arguments, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
woc.log.Debugf("Evaluating node %s: template: %s, boundaryID: %s", nodeName, common.GetTemplateHolderString(orgTmpl), opts.boundaryID)
node := woc.wf.GetNodeByName(nodeName)
// Set templateScope from which the template resolution starts.
templateScope := tmplCtx.GetTemplateScope()
newTmplCtx, resolvedTmpl, templateStored, err := tmplCtx.ResolveTemplate(orgTmpl)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
// A new template was stored during resolution, persist it
if templateStored {
woc.updated = true
}
localParams := make(map[string]string)
// Inject the pod name. If the pod has a retry strategy, the pod name will be changed and will be injected when it
// is determined
if resolvedTmpl.IsPodType() && woc.retryStrategy(resolvedTmpl) == nil {
localParams[common.LocalVarPodName] = woc.wf.NodeID(nodeName)
}
// Inputs has been processed with arguments already, so pass empty arguments.
processedTmpl, err := common.ProcessArgs(resolvedTmpl, &args, woc.globalParams, localParams, false)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
// If memoization is on, check if node output exists in cache
if node == nil && processedTmpl.Memoize != nil {
memoizationCache := woc.controller.cacheFactory.GetCache(controllercache.ConfigMapCache, processedTmpl.Memoize.Cache.ConfigMap.Name)
if memoizationCache == nil {
err := fmt.Errorf("cache could not be found or created")
woc.log.WithFields(log.Fields{"cacheName": processedTmpl.Memoize.Cache.ConfigMap.Name}).WithError(err)
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
entry, err := memoizationCache.Load(processedTmpl.Memoize.Key)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
hit := entry.Hit()
var outputs *wfv1.Outputs
if processedTmpl.Memoize.MaxAge != "" {
maxAge, err := time.ParseDuration(processedTmpl.Memoize.MaxAge)
if err != nil {
err := fmt.Errorf("invalid maxAge: %s", err)
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
maxAgeOutputs, ok := entry.GetOutputsWithMaxAge(maxAge)
if !ok {
// The outputs are expired, so this cache entry is not hit
hit = false
}
outputs = maxAgeOutputs
} else {
outputs = entry.GetOutputs()
}
memoizationStatus := &wfv1.MemoizationStatus{
Hit: hit,
Key: processedTmpl.Memoize.Key,
CacheName: processedTmpl.Memoize.Cache.ConfigMap.Name,
}
if hit {
node = woc.initializeCacheHitNode(nodeName, processedTmpl, templateScope, orgTmpl, opts.boundaryID, outputs, memoizationStatus)
} else {
node = woc.initializeCacheNode(nodeName, processedTmpl, templateScope, orgTmpl, opts.boundaryID, memoizationStatus)
}
woc.wf.Status.Nodes[node.ID] = *node
woc.updated = true
}
if node != nil {
if node.Fulfilled() {
if processedTmpl.Synchronization != nil {
woc.controller.syncManager.Release(woc.wf, node.ID, processedTmpl.Synchronization)
}
woc.log.Debugf("Node %s already completed", nodeName)
if processedTmpl.Metrics != nil {
// Check if this node completed between executions. If it did, emit metrics. If a node completes within
// the same execution, its metrics are emitted below.
// We can infer that this node completed during the current operation, emit metrics
if prevNodeStatus, ok := woc.preExecutionNodePhases[node.ID]; ok && !prevNodeStatus.Fulfilled() {
localScope, realTimeScope := woc.prepareMetricScope(node)
woc.computeMetrics(processedTmpl.Metrics.Prometheus, localScope, realTimeScope, false)
}
}
return node, nil
}
woc.log.Debugf("Executing node %s of %s is %s", nodeName, node.Type, node.Phase)
// Memoized nodes don't have StartedAt.
if node.StartedAt.IsZero() {
node.StartedAt = metav1.Time{Time: time.Now().UTC()}
node.EstimatedDuration = woc.estimateNodeDuration(node.Name)
woc.wf.Status.Nodes[node.ID] = *node
woc.updated = true
}
}
// Check if we took too long operating on this workflow and immediately return if we did
if time.Now().UTC().After(woc.deadline) {
woc.log.Warnf("Deadline exceeded")
woc.requeue(defaultRequeueTime)
return node, ErrDeadlineExceeded
}
// Check the template deadline for Pending nodes
// This check will cover the resource forbidden, synchronization scenario,
// In above scenario, only Node will be created in pending state
_, err = woc.checkTemplateTimeout(processedTmpl, node)
if err != nil {
woc.log.Warnf("Template %s exceeded its deadline", processedTmpl.Name)
return woc.markNodePhase(nodeName, wfv1.NodeFailed, err.Error()), err
}
// Check if we exceeded template or workflow parallelism and immediately return if we did
if err := woc.checkParallelism(processedTmpl, node, opts.boundaryID); err != nil {
return node, err
}
if processedTmpl.Synchronization != nil {
lockAcquired, wfUpdated, msg, err := woc.controller.syncManager.TryAcquire(woc.wf, woc.wf.NodeID(nodeName), processedTmpl.Synchronization)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
if !lockAcquired {
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfutil.GetNodeType(processedTmpl), templateScope, processedTmpl, orgTmpl, opts.boundaryID, wfv1.NodePending, msg)
}
lockName, err := argosync.GetLockName(processedTmpl.Synchronization, woc.wf.Namespace)
if err != nil {
// If an error were to be returned here, it would have been caught by TryAcquire. If it didn't, then it is
// unexpected behavior and is a bug.
panic("bug: GetLockName should not return an error after a call to TryAcquire")
}
return woc.markNodeWaitingForLock(node.Name, lockName.EncodeName()), nil
} else {
woc.log.Infof("Node %s acquired synchronization lock", nodeName)
if node != nil {
node.Message = ""
node = woc.markNodeWaitingForLock(node.Name, "")
}
}
woc.updated = wfUpdated
}
// If the user has specified retries, node becomes a special retry node.
// This node acts as a parent of all retries that will be done for
// the container. The status of this node should be "Success" if any
// of the retries succeed. Otherwise, it is "Failed".
retryNodeName := ""
if woc.retryStrategy(processedTmpl) != nil {
retryNodeName = nodeName
retryParentNode := node
if retryParentNode == nil {
woc.log.Debugf("Inject a retry node for node %s", retryNodeName)
retryParentNode = woc.initializeExecutableNode(retryNodeName, wfv1.NodeTypeRetry, templateScope, processedTmpl, orgTmpl, opts.boundaryID, wfv1.NodeRunning)
}
processedRetryParentNode, continueExecution, err := woc.processNodeRetries(retryParentNode, *woc.retryStrategy(processedTmpl), opts)
if err != nil {
return woc.markNodeError(retryNodeName, err), err
} else if !continueExecution {
// We are still waiting for a retry delay to finish
return retryParentNode, nil
}
retryParentNode = processedRetryParentNode
// The retry node might have completed by now.
if retryParentNode.Fulfilled() {
if processedTmpl.Metrics != nil {
// In this check, a completed node may or may not have existed prior to this execution. If it did exist, ensure that it wasn't
// completed before this execution. If it did not exist prior, then we can infer that it was completed during this execution.
// The statement "(!ok || !prevNodeStatus.Fulfilled())" checks for this behavior and represents the material conditional
// "ok -> !prevNodeStatus.Fulfilled()" (https://en.wikipedia.org/wiki/Material_conditional)
if prevNodeStatus, ok := woc.preExecutionNodePhases[retryParentNode.ID]; (!ok || !prevNodeStatus.Fulfilled()) && retryParentNode.Fulfilled() {
localScope, realTimeScope := woc.prepareMetricScope(node)
woc.computeMetrics(processedTmpl.Metrics.Prometheus, localScope, realTimeScope, false)
}
}
return retryParentNode, nil
}
lastChildNode := getChildNodeIndex(retryParentNode, woc.wf.Status.Nodes, -1)
if lastChildNode != nil && !lastChildNode.Fulfilled() {
// Last child node is still running.
nodeName = lastChildNode.Name
node = lastChildNode
} else {
// Create a new child node and append it to the retry node.
nodeName = fmt.Sprintf("%s(%d)", retryNodeName, len(retryParentNode.Children))
woc.addChildNode(retryNodeName, nodeName)
node = nil
localParams := make(map[string]string)
// Change the `pod.name` variable to the new retry node name
if processedTmpl.IsPodType() {
localParams[common.LocalVarPodName] = woc.wf.NodeID(nodeName)
}
// Inject the retryAttempt number
localParams[common.LocalVarRetries] = strconv.Itoa(len(retryParentNode.Children))
processedTmpl, err = common.SubstituteParams(processedTmpl, map[string]string{}, localParams)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
}
}
switch processedTmpl.GetType() {
case wfv1.TemplateTypeContainer:
node, err = woc.executeContainer(nodeName, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeSteps:
node, err = woc.executeSteps(nodeName, newTmplCtx, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeScript:
node, err = woc.executeScript(nodeName, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeResource:
node, err = woc.executeResource(nodeName, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeDAG:
node, err = woc.executeDAG(nodeName, newTmplCtx, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeSuspend:
node, err = woc.executeSuspend(nodeName, templateScope, processedTmpl, orgTmpl, opts)
default:
err = errors.Errorf(errors.CodeBadRequest, "Template '%s' missing specification", processedTmpl.Name)
return woc.initializeNode(nodeName, wfv1.NodeTypeSkipped, templateScope, orgTmpl, opts.boundaryID, wfv1.NodeError, err.Error()), err
}
if err != nil {
node = woc.markNodeError(nodeName, err)
if processedTmpl.Synchronization != nil {
woc.controller.syncManager.Release(woc.wf, node.ID, processedTmpl.Synchronization)
}
// If retry policy is not set, or if it is not set to Always or OnError, we won't attempt to retry an errored container
// and we return instead.
retryStrategy := woc.retryStrategy(processedTmpl)
if retryStrategy == nil ||
(retryStrategy.RetryPolicy != wfv1.RetryPolicyAlways &&
retryStrategy.RetryPolicy != wfv1.RetryPolicyOnError) {
return node, err
}
}
if processedTmpl.Metrics != nil {
// Check if the node was just created, if it was emit realtime metrics.
// If the node did not previously exist, we can infer that it was created during the current operation, emit real time metrics.
if _, ok := woc.preExecutionNodePhases[node.ID]; !ok {
localScope, realTimeScope := woc.prepareMetricScope(node)
woc.computeMetrics(processedTmpl.Metrics.Prometheus, localScope, realTimeScope, true)
}
// Check if the node completed during this execution, if it did emit metrics
//
// This check is necessary because sometimes a node will be marked completed during the current execution and will
// not be considered again. The best example of this is the entrypoint steps/dag template (once completed, the
// workflow ends and it's not reconsidered). This checks makes sure that its metrics also get emitted.
//
// In this check, a completed node may or may not have existed prior to this execution. If it did exist, ensure that it wasn't
// completed before this execution. If it did not exist prior, then we can infer that it was completed during this execution.
// The statement "(!ok || !prevNodeStatus.Fulfilled())" checks for this behavior and represents the material conditional
// "ok -> !prevNodeStatus.Fulfilled()" (https://en.wikipedia.org/wiki/Material_conditional)
if prevNodeStatus, ok := woc.preExecutionNodePhases[node.ID]; (!ok || !prevNodeStatus.Fulfilled()) && node.Fulfilled() {
localScope, realTimeScope := woc.prepareMetricScope(node)
woc.computeMetrics(processedTmpl.Metrics.Prometheus, localScope, realTimeScope, false)
}
}
node = woc.wf.GetNodeByName(node.Name)
// Swap the node back to retry node
if retryNodeName != "" {
retryNode := woc.wf.GetNodeByName(retryNodeName)
if !retryNode.Fulfilled() && node.Fulfilled() { //if the retry child has completed we need to update outself
node, err = woc.executeTemplate(retryNodeName, orgTmpl, tmplCtx, args, opts)
if err != nil {
return woc.markNodeError(node.Name, err), err
}
}
node = retryNode
}
return node, nil
}
// Checks if the template has exceeded its deadline
func (woc *wfOperationCtx) checkTemplateTimeout(tmpl *wfv1.Template, node *wfv1.NodeStatus) (*time.Time, error) {
if node == nil {
return nil, nil
}
if tmpl.Timeout != "" {
tmplTimeout, err := time.ParseDuration(tmpl.Timeout)
if err != nil {
return nil, fmt.Errorf("invalid timeout format. %v", err)
}
deadline := node.StartedAt.Add(tmplTimeout)
if node.Phase == wfv1.NodePending && time.Now().After(deadline) {
return nil, ErrTimeout
}
return &deadline, nil
}
return nil, nil
}
// markWorkflowPhase is a convenience method to set the phase of the workflow with optional message
// optionally marks the workflow completed, which sets the finishedAt timestamp and completed label
func (woc *wfOperationCtx) markWorkflowPhase(phase wfv1.NodePhase, markCompleted bool, message ...string) {
if woc.wf.Status.Phase != phase {
if woc.wf.Status.Phase.Fulfilled() {
woc.log.WithFields(log.Fields{"fromPhase": woc.wf.Status.Phase, "toPhase": phase}).
Panic("workflow is already fulfilled")
}
woc.log.Infof("Updated phase %s -> %s", woc.wf.Status.Phase, phase)
woc.updated = true
woc.wf.Status.Phase = phase
if woc.wf.ObjectMeta.Labels == nil {
woc.wf.ObjectMeta.Labels = make(map[string]string)
}
woc.wf.ObjectMeta.Labels[common.LabelKeyPhase] = string(phase)
}
if woc.wf.Status.StartedAt.IsZero() {
woc.updated = true
woc.wf.Status.StartedAt = metav1.Time{Time: time.Now().UTC()}
woc.wf.Status.EstimatedDuration = woc.estimateWorkflowDuration()
}
if len(message) > 0 && woc.wf.Status.Message != message[0] {
woc.log.Infof("Updated message %s -> %s", woc.wf.Status.Message, message[0])
woc.updated = true
woc.wf.Status.Message = message[0]
}
if phase == wfv1.NodeError {
entryNode, ok := woc.wf.Status.Nodes[woc.wf.ObjectMeta.Name]
if ok && entryNode.Phase == wfv1.NodeRunning {
entryNode.Phase = wfv1.NodeError
entryNode.Message = "Workflow operation error"
woc.wf.Status.Nodes[woc.wf.ObjectMeta.Name] = entryNode
woc.updated = true
}
}
switch phase {
case wfv1.NodeSucceeded, wfv1.NodeFailed, wfv1.NodeError:
// wait for all daemon nodes to get terminated before marking workflow completed
if markCompleted && !woc.hasDaemonNodes() {
woc.log.Infof("Marking workflow completed")
woc.wf.Status.FinishedAt = metav1.Time{Time: time.Now().UTC()}
woc.globalParams[common.GlobalVarWorkflowDuration] = fmt.Sprintf("%f", woc.wf.Status.FinishedAt.Sub(woc.wf.Status.StartedAt.Time).Seconds())
if woc.wf.ObjectMeta.Labels == nil {
woc.wf.ObjectMeta.Labels = make(map[string]string)
}
woc.wf.ObjectMeta.Labels[common.LabelKeyCompleted] = "true"
woc.wf.Status.Conditions.UpsertCondition(wfv1.Condition{Status: metav1.ConditionTrue, Type: wfv1.ConditionTypeCompleted})
err := woc.deletePDBResource()
if err != nil {
woc.wf.Status.Phase = wfv1.NodeError
woc.wf.ObjectMeta.Labels[common.LabelKeyPhase] = string(wfv1.NodeError)
woc.updated = true
woc.wf.Status.Message = err.Error()
}
if woc.controller.wfArchive.IsEnabled() {
if woc.controller.isArchivable(woc.wf) {
woc.log.Infof("Marking workflow as pending archiving")
woc.wf.Labels[common.LabelKeyWorkflowArchivingStatus] = "Pending"
} else {
woc.log.Infof("Doesn't match with archive label selector. Skipping Archive")
}
}
woc.updated = true
}
}
}
// get a predictor, this maybe null implementation in the case of rare error
func (woc *wfOperationCtx) getEstimator() estimation.Estimator {
if woc.estimator == nil {
woc.estimator, _ = woc.controller.estimatorFactory.NewEstimator(woc.wf)
}
return woc.estimator
}
func (woc *wfOperationCtx) estimateWorkflowDuration() wfv1.EstimatedDuration {
return woc.getEstimator().EstimateWorkflowDuration()
}
func (woc *wfOperationCtx) estimateNodeDuration(nodeName string) wfv1.EstimatedDuration {
return woc.getEstimator().EstimateNodeDuration(nodeName)
}
func (woc *wfOperationCtx) hasDaemonNodes() bool {
for _, node := range woc.wf.Status.Nodes {
if node.IsDaemoned() {
return true
}
}
return false
}
func (woc *wfOperationCtx) markWorkflowRunning() {
woc.markWorkflowPhase(wfv1.NodeRunning, false, "")
}
func (woc *wfOperationCtx) markWorkflowSuccess() {
woc.markWorkflowPhase(wfv1.NodeSucceeded, true)
}
func (woc *wfOperationCtx) markWorkflowFailed(message string) {
woc.markWorkflowPhase(wfv1.NodeFailed, true, message)
}
func (woc *wfOperationCtx) markWorkflowError(err error, markCompleted bool) {
woc.markWorkflowPhase(wfv1.NodeError, markCompleted, err.Error())
}
// stepsOrDagSeparator identifies if a node name starts with our naming convention separator from
// DAG or steps templates. Will match stings with prefix like: [0]. or .
var stepsOrDagSeparator = regexp.MustCompile(`^(\[\d+\])?\.`)
// initializeExecutableNode initializes a node and stores the template.
func (woc *wfOperationCtx) initializeExecutableNode(nodeName string, nodeType wfv1.NodeType, templateScope string, executeTmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, phase wfv1.NodePhase, messages ...string) *wfv1.NodeStatus {
node := woc.initializeNode(nodeName, nodeType, templateScope, orgTmpl, boundaryID, phase)
// Set the input values to the node.
if executeTmpl.Inputs.HasInputs() {
node.Inputs = executeTmpl.Inputs.DeepCopy()
}
if nodeType == wfv1.NodeTypeSuspend {
node = addRawOutputFields(node, executeTmpl)
}
if len(messages) > 0 {
node.Message = messages[0]
}
// Update the node
woc.wf.Status.Nodes[node.ID] = *node
woc.updated = true
return node
}
// initializeNodeOrMarkError initializes an error node or mark a node if it already exists.
func (woc *wfOperationCtx) initializeNodeOrMarkError(node *wfv1.NodeStatus, nodeName string, templateScope string, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, err error) *wfv1.NodeStatus {
if node != nil {
return woc.markNodeError(nodeName, err)
}
return woc.initializeNode(nodeName, wfv1.NodeTypeSkipped, templateScope, orgTmpl, boundaryID, wfv1.NodeError, err.Error())
}
// Creates a node status that is or will be chaced
func (woc *wfOperationCtx) initializeCacheNode(nodeName string, resolvedTmpl *wfv1.Template, templateScope string, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, memStat *wfv1.MemoizationStatus, messages ...string) *wfv1.NodeStatus {
if resolvedTmpl.Memoize == nil {
err := fmt.Errorf("cannot initialize a cached node from a non-memoized template")
woc.log.WithFields(log.Fields{"namespace": woc.wf.Namespace, "wfName": woc.wf.Name}).WithError(err)
panic(err)
}
woc.log.Debug("Initializing cached node ", nodeName, common.GetTemplateHolderString(orgTmpl), boundaryID)
node := woc.initializeExecutableNode(nodeName, wfutil.GetNodeType(resolvedTmpl), templateScope, resolvedTmpl, orgTmpl, boundaryID, wfv1.NodePending, messages...)
node.MemoizationStatus = memStat
return node
}
// Creates a node status that has been cached, completely initialized, and marked as finished
func (woc *wfOperationCtx) initializeCacheHitNode(nodeName string, resolvedTmpl *wfv1.Template, templateScope string, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, outputs *wfv1.Outputs, memStat *wfv1.MemoizationStatus, messages ...string) *wfv1.NodeStatus {
node := woc.initializeCacheNode(nodeName, resolvedTmpl, templateScope, orgTmpl, boundaryID, memStat, messages...)
node.Phase = wfv1.NodeSucceeded
node.Outputs = outputs
node.FinishedAt = metav1.Time{Time: time.Now().UTC()}
return node
}
func (woc *wfOperationCtx) initializeNode(nodeName string, nodeType wfv1.NodeType, templateScope string, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, phase wfv1.NodePhase, messages ...string) *wfv1.NodeStatus {
woc.log.Debugf("Initializing node %s: template: %s, boundaryID: %s", nodeName, common.GetTemplateHolderString(orgTmpl), boundaryID)
nodeID := woc.wf.NodeID(nodeName)
_, ok := woc.wf.Status.Nodes[nodeID]
if ok {
panic(fmt.Sprintf("node %s already initialized", nodeName))
}
node := wfv1.NodeStatus{
ID: nodeID,
Name: nodeName,
TemplateName: orgTmpl.GetTemplateName(),
TemplateRef: orgTmpl.GetTemplateRef(),
TemplateScope: templateScope,
Type: nodeType,
BoundaryID: boundaryID,
Phase: phase,
StartedAt: metav1.Time{Time: time.Now().UTC()},
EstimatedDuration: woc.estimateNodeDuration(nodeName),
}
if boundaryNode, ok := woc.wf.Status.Nodes[boundaryID]; ok {
node.DisplayName = strings.TrimPrefix(node.Name, boundaryNode.Name)
if stepsOrDagSeparator.MatchString(node.DisplayName) {
node.DisplayName = stepsOrDagSeparator.ReplaceAllString(node.DisplayName, "")
}
} else {
node.DisplayName = nodeName
}
if node.Fulfilled() && node.FinishedAt.IsZero() {
node.FinishedAt = node.StartedAt
}
var message string
if len(messages) > 0 {
message = fmt.Sprintf(" (message: %s)", messages[0])
node.Message = messages[0]
}
woc.wf.Status.Nodes[nodeID] = node
woc.log.Infof("%s node %v initialized %s%s", node.Type, node.ID, node.Phase, message)
woc.updated = true
return &node
}
// markNodePhase marks a node with the given phase, creating the node if necessary and handles timestamps
func (woc *wfOperationCtx) markNodePhase(nodeName string, phase wfv1.NodePhase, message ...string) *wfv1.NodeStatus {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
panic(fmt.Sprintf("workflow '%s' node '%s' uninitialized when marking as %v: %s", woc.wf.Name, nodeName, phase, message))
}
if node.Phase != phase {
if node.Phase.Fulfilled() {
woc.log.WithFields(log.Fields{"nodeName": node.Name, "fromPhase": node.Phase, "toPhase": phase}).
Error("node is already fulfilled")
}
woc.log.Infof("node %s phase %s -> %s", node.ID, node.Phase, phase)
node.Phase = phase
woc.updated = true
}
if len(message) > 0 {
if message[0] != node.Message {
woc.log.Infof("node %s message: %s", node.ID, message[0])
node.Message = message[0]
woc.updated = true
}
}
if node.Fulfilled() && node.FinishedAt.IsZero() {
node.FinishedAt = metav1.Time{Time: time.Now().UTC()}
woc.log.Infof("node %s finished: %s", node.ID, node.FinishedAt)
woc.updated = true
}
if !woc.orig.Status.Nodes[node.ID].Fulfilled() && node.Fulfilled() {
woc.onNodeComplete(node)
}
woc.wf.Status.Nodes[node.ID] = *node
return node
}
func (woc *wfOperationCtx) onNodeComplete(node *wfv1.NodeStatus) {
if !woc.controller.Config.NodeEvents.IsEnabled() {
return
}
message := fmt.Sprintf("%v node %s", node.Phase, node.Name)
if node.Message != "" {
message = message + ": " + node.Message
}
eventType := apiv1.EventTypeWarning
if node.Phase == wfv1.NodeSucceeded {
eventType = apiv1.EventTypeNormal
}
woc.eventRecorder.AnnotatedEventf(
woc.wf,
map[string]string{
common.AnnotationKeyNodeType: string(node.Type),
common.AnnotationKeyNodeName: node.Name,
},
eventType,
fmt.Sprintf("WorkflowNode%s", node.Phase),
message,
)
}
// markNodeError is a convenience method to mark a node with an error and set the message from the error
func (woc *wfOperationCtx) markNodeError(nodeName string, err error) *wfv1.NodeStatus {
woc.log.WithError(err).WithField("nodeName", nodeName).Error("Mark error node")
return woc.markNodePhase(nodeName, wfv1.NodeError, err.Error())
}
// markNodePending is a convenience method to mark a node and set the message from the error
func (woc *wfOperationCtx) markNodePending(nodeName string, err error) *wfv1.NodeStatus {
woc.log.Infof("Mark node %s as Pending, due to: %+v", nodeName, err)
node := woc.wf.GetNodeByName(nodeName)
return woc.markNodePhase(nodeName, wfv1.NodePending, fmt.Sprintf("Pending %s", time.Since(node.StartedAt.Time)))
}
// markNodeWaitingForLock is a convenience method to mark that a node is waiting for a lock
func (woc *wfOperationCtx) markNodeWaitingForLock(nodeName string, lockName string) *wfv1.NodeStatus {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
return node
}
if node.SynchronizationStatus == nil {
node.SynchronizationStatus = &wfv1.NodeSynchronizationStatus{}
}
if lockName == "" {
// If we are no longer waiting for a lock, nil out the sync status
node.SynchronizationStatus = nil
} else {
node.SynchronizationStatus.Waiting = lockName
}
woc.wf.Status.Nodes[node.ID] = *node
woc.updated = true
return node
}
// checkParallelism checks if the given template is able to be executed, considering the current active pods and workflow/template parallelism
func (woc *wfOperationCtx) checkParallelism(tmpl *wfv1.Template, node *wfv1.NodeStatus, boundaryID string) error {
if woc.execWf.Spec.Parallelism != nil && woc.activePods >= *woc.execWf.Spec.Parallelism {
woc.log.Infof("workflow active pod spec parallelism reached %d/%d", woc.activePods, *woc.execWf.Spec.Parallelism)
return ErrParallelismReached
}
// TODO: repeated calls to countActivePods is not optimal
switch tmpl.GetType() {
case wfv1.TemplateTypeDAG, wfv1.TemplateTypeSteps:
// if we are about to execute a DAG/Steps template, make sure we havent already reached our limit
if tmpl.Parallelism != nil && node != nil {
templateActivePods := woc.countActivePods(node.ID)
if templateActivePods >= *tmpl.Parallelism {
woc.log.Infof("template (node %s) active pod parallelism reached %d/%d", node.ID, templateActivePods, *tmpl.Parallelism)
return ErrParallelismReached
}
}
fallthrough
default:
// if we are about to execute a pod, make our parent hasn't reached it's limit
if boundaryID != "" && (node == nil || (node.Phase != wfv1.NodePending && node.Phase != wfv1.NodeRunning)) {
boundaryNode, ok := woc.wf.Status.Nodes[boundaryID]
if !ok {
return errors.InternalError("boundaryNode not found")
}
tmplCtx, err := woc.createTemplateContext(boundaryNode.GetTemplateScope())
if err != nil {
return err
}
_, boundaryTemplate, templateStored, err := tmplCtx.ResolveTemplate(&boundaryNode)
if err != nil {
return err
}
// A new template was stored during resolution, persist it
if templateStored {
woc.updated = true
}
if boundaryTemplate != nil && boundaryTemplate.Parallelism != nil {
activeSiblings := woc.countActiveChildren(boundaryID)
woc.log.Debugf("counted %d/%d active children in boundary %s", activeSiblings, *boundaryTemplate.Parallelism, boundaryID)
if activeSiblings >= *boundaryTemplate.Parallelism {
woc.log.Infof("template (node %s) active children parallelism reached %d/%d", boundaryID, activeSiblings, *boundaryTemplate.Parallelism)
return ErrParallelismReached
}
}
}
}
return nil
}
func (woc *wfOperationCtx) executeContainer(nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfv1.NodeTypePod, templateScope, tmpl, orgTmpl, opts.boundaryID, wfv1.NodePending)
}
// Check if the output of this container is referenced elsewhere in the Workflow. If so, make sure to include it during
// execution.
includeScriptOutput, err := woc.includeScriptOutput(nodeName, opts.boundaryID)
if err != nil {
return node, err
}
woc.log.Debugf("Executing node %s with container template: %v\n", nodeName, tmpl)
_, err = woc.createWorkflowPod(nodeName, *tmpl.Container, tmpl, &createWorkflowPodOpts{
includeScriptOutput: includeScriptOutput,
onExitPod: opts.onExitTemplate,
executionDeadline: opts.executionDeadline,
})
if err != nil {
return woc.requeueIfTransientErr(err, node.Name)
}
return node, err
}
func (woc *wfOperationCtx) getOutboundNodes(nodeID string) []string {
node := woc.wf.Status.Nodes[nodeID]
switch node.Type {
case wfv1.NodeTypePod, wfv1.NodeTypeSkipped, wfv1.NodeTypeSuspend:
return []string{node.ID}
case wfv1.NodeTypeTaskGroup:
if len(node.Children) == 0 {
return []string{node.ID}
}
outboundNodes := make([]string, 0)
for _, child := range node.Children {
outboundNodes = append(outboundNodes, woc.getOutboundNodes(child)...)
}
return outboundNodes
case wfv1.NodeTypeRetry:
numChildren := len(node.Children)
if numChildren > 0 {
return []string{node.Children[numChildren-1]}
}
}
outbound := make([]string, 0)
for _, outboundNodeID := range node.OutboundNodes {
outNode := woc.wf.Status.Nodes[outboundNodeID]
if outNode.Type == wfv1.NodeTypePod {
outbound = append(outbound, outboundNodeID)
} else {
subOutIDs := woc.getOutboundNodes(outboundNodeID)
outbound = append(outbound, subOutIDs...)
}
}
return outbound
}
// getTemplateOutputsFromScope resolves a template's outputs from the scope of the template
func getTemplateOutputsFromScope(tmpl *wfv1.Template, scope *wfScope) (*wfv1.Outputs, error) {
if !tmpl.Outputs.HasOutputs() {
return nil, nil
}
var outputs wfv1.Outputs
if len(tmpl.Outputs.Parameters) > 0 {
outputs.Parameters = make([]wfv1.Parameter, 0)
for _, param := range tmpl.Outputs.Parameters {
if param.ValueFrom == nil {
return nil, fmt.Errorf("output parameters must have a valueFrom specified")
}
val, err := scope.resolveParameter(param.ValueFrom.Parameter)
if err != nil {
// We have a default value to use instead of returning an error
if param.ValueFrom.Default != nil {
val = param.ValueFrom.Default.String()
} else {
return nil, err
}
}
param.Value = wfv1.Int64OrStringPtr(val)
param.ValueFrom = nil
outputs.Parameters = append(outputs.Parameters, param)
}
}
if len(tmpl.Outputs.Artifacts) > 0 {
outputs.Artifacts = make([]wfv1.Artifact, 0)
for _, art := range tmpl.Outputs.Artifacts {
resolvedArt, err := scope.resolveArtifact(art.From, art.SubPath)
if err != nil {
// If the artifact was not found and is optional, don't mark an error
if strings.Contains(err.Error(), "Unable to resolve") && art.Optional {
log.Warnf("Optional artifact '%s' was not found; it won't be available as an output", art.Name)
continue
}
return nil, fmt.Errorf("unable to resolve outputs from scope: %s", err)
}
resolvedArt.Name = art.Name
outputs.Artifacts = append(outputs.Artifacts, *resolvedArt)
}
}
return &outputs, nil
}
// hasOutputResultRef will check given template output has any reference
func hasOutputResultRef(name string, parentTmpl *wfv1.Template) bool {
var variableRefName string
if parentTmpl.DAG != nil {
variableRefName = "{{tasks." + name + ".outputs.result}}"
} else if parentTmpl.Steps != nil {
variableRefName = "{{steps." + name + ".outputs.result}}"
}
jsonValue, err := json.Marshal(parentTmpl)
if err != nil {
log.Warnf("Unable to marshal the template. %v, %v", parentTmpl, err)
}
return strings.Contains(string(jsonValue), variableRefName)
}
// getStepOrDAGTaskName will extract the node from NodeStatus Name
func getStepOrDAGTaskName(nodeName string) string {
if strings.Contains(nodeName, ".") {
name := nodeName[strings.LastIndex(nodeName, ".")+1:]
// Retry, withItems and withParam scenario
if indx := strings.Index(name, "("); indx > 0 {
return name[0:indx]
}
return name
}
return nodeName
}
func (woc *wfOperationCtx) executeScript(nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfv1.NodeTypePod, templateScope, tmpl, orgTmpl, opts.boundaryID, wfv1.NodePending)
} else if !node.Pending() {
return node, nil
}
// Check if the output of this script is referenced elsewhere in the Workflow. If so, make sure to include it during
// execution.
includeScriptOutput, err := woc.includeScriptOutput(nodeName, opts.boundaryID)
if err != nil {
return node, err
}
mainCtr := tmpl.Script.Container
mainCtr.Args = append(mainCtr.Args, common.ExecutorScriptSourcePath)
_, err = woc.createWorkflowPod(nodeName, mainCtr, tmpl, &createWorkflowPodOpts{
includeScriptOutput: includeScriptOutput,
onExitPod: opts.onExitTemplate,
executionDeadline: opts.executionDeadline,
})
if err != nil {
return woc.requeueIfTransientErr(err, node.Name)
}
return node, err
}
func (woc *wfOperationCtx) requeueIfTransientErr(err error, nodeName string) (*wfv1.NodeStatus, error) {
if errorsutil.IsTransientErr(err) {
// Our error was most likely caused by a lack of resources.
woc.requeue(defaultRequeueTime)
return woc.markNodePending(nodeName, err), nil
}
return nil, err
}
// buildLocalScope adds all of a nodes outputs to the local scope with the given prefix, as well
// as the global scope, if specified with a globalName
func (woc *wfOperationCtx) buildLocalScope(scope *wfScope, prefix string, node *wfv1.NodeStatus) {
// It may be that the node is a retry node, in which case we want to get the outputs of the last node
// in the retry group instead of the retry node itself.
if node.Type == wfv1.NodeTypeRetry {
node = getChildNodeIndex(node, woc.wf.Status.Nodes, -1)
}
if node.ID != "" {
key := fmt.Sprintf("%s.id", prefix)
scope.addParamToScope(key, node.ID)
}
if !node.StartedAt.Time.IsZero() {
key := fmt.Sprintf("%s.startedAt", prefix)
scope.addParamToScope(key, node.StartedAt.Time.Format(time.RFC3339))
}
if !node.FinishedAt.Time.IsZero() {
key := fmt.Sprintf("%s.finishedAt", prefix)
scope.addParamToScope(key, node.FinishedAt.Time.Format(time.RFC3339))
}
if node.PodIP != "" {
key := fmt.Sprintf("%s.ip", prefix)
scope.addParamToScope(key, node.PodIP)
}
if node.Phase != "" {
key := fmt.Sprintf("%s.status", prefix)
scope.addParamToScope(key, string(node.Phase))
}
woc.addOutputsToLocalScope(prefix, node.Outputs, scope)
}
func (woc *wfOperationCtx) addOutputsToLocalScope(prefix string, outputs *wfv1.Outputs, scope *wfScope) {
if outputs == nil || scope == nil {
return
}
if prefix != "workflow" && outputs.Result != nil {
scope.addParamToScope(fmt.Sprintf("%s.outputs.result", prefix), *outputs.Result)
}
if prefix != "workflow" && outputs.ExitCode != nil {
scope.addParamToScope(fmt.Sprintf("%s.exitCode", prefix), *outputs.ExitCode)
}
for _, param := range outputs.Parameters {
if param.Value != nil {
scope.addParamToScope(fmt.Sprintf("%s.outputs.parameters.%s", prefix, param.Name), param.Value.String())
}
}
for _, art := range outputs.Artifacts {
scope.addArtifactToScope(fmt.Sprintf("%s.outputs.artifacts.%s", prefix, art.Name), art)
}
}
func (woc *wfOperationCtx) addOutputsToGlobalScope(outputs *wfv1.Outputs) {
if outputs == nil {
return
}
for _, param := range outputs.Parameters {
woc.addParamToGlobalScope(param)
}
for _, art := range outputs.Artifacts {
woc.addArtifactToGlobalScope(art, nil)
}
}
// loopNodes is a node list which supports sorting by loop index
type loopNodes []wfv1.NodeStatus
func (n loopNodes) Len() int {
return len(n)
}
func parseLoopIndex(s string) int {
s = strings.SplitN(s, "(", 2)[1]
s = strings.SplitN(s, ":", 2)[0]
val, err := strconv.Atoi(s)
if err != nil {
panic(fmt.Sprintf("failed to parse '%s' as int: %v", s, err))
}
return val
}
func (n loopNodes) Less(i, j int) bool {
left := parseLoopIndex(n[i].DisplayName)
right := parseLoopIndex(n[j].DisplayName)
return left < right
}
func (n loopNodes) Swap(i, j int) {
n[i], n[j] = n[j], n[i]
}
// processAggregateNodeOutputs adds the aggregated outputs of a withItems/withParam template as a
// parameter in the form of a JSON list
func (woc *wfOperationCtx) processAggregateNodeOutputs(tmpl *wfv1.Template, scope *wfScope, prefix string, childNodes []wfv1.NodeStatus) error {
if len(childNodes) == 0 {
return nil
}
// need to sort the child node list so that the order of outputs are preserved
sort.Sort(loopNodes(childNodes))
paramList := make([]map[string]string, 0)
resultsList := make([]wfv1.Item, 0)
for _, node := range childNodes {
if node.Outputs == nil {
continue
}
if len(node.Outputs.Parameters) > 0 {
param := make(map[string]string)
for _, p := range node.Outputs.Parameters {
param[p.Name] = p.Value.String()
}
paramList = append(paramList, param)
}
if node.Outputs.Result != nil {
// Support the case where item may be a map
var item wfv1.Item
err := json.Unmarshal([]byte(*node.Outputs.Result), &item)
if err != nil {
return err
}
resultsList = append(resultsList, item)
}
}
if tmpl.GetType() == wfv1.TemplateTypeScript {
resultsJSON, err := json.Marshal(resultsList)
if err != nil {
return err
}
key := fmt.Sprintf("%s.outputs.result", prefix)
scope.addParamToScope(key, string(resultsJSON))
}
outputsJSON, err := json.Marshal(paramList)
if err != nil {
return err
}
key := fmt.Sprintf("%s.outputs.parameters", prefix)
scope.addParamToScope(key, string(outputsJSON))
return nil
}
// addParamToGlobalScope exports any desired node outputs to the global scope, and adds it to the global outputs.
func (woc *wfOperationCtx) addParamToGlobalScope(param wfv1.Parameter) {
if param.GlobalName == "" {
return
}
index := -1
if woc.wf.Status.Outputs != nil {
for i, gParam := range woc.wf.Status.Outputs.Parameters {
if gParam.Name == param.GlobalName {
index = i
break
}
}
} else {
woc.wf.Status.Outputs = &wfv1.Outputs{}
}
paramName := fmt.Sprintf("workflow.outputs.parameters.%s", param.GlobalName)
woc.globalParams[paramName] = param.Value.String()
if index == -1 {
woc.log.Infof("setting %s: '%s'", paramName, param.Value)
gParam := wfv1.Parameter{Name: param.GlobalName, Value: param.Value}
woc.wf.Status.Outputs.Parameters = append(woc.wf.Status.Outputs.Parameters, gParam)
woc.updated = true
} else {
prevVal := *woc.wf.Status.Outputs.Parameters[index].Value
if prevVal != *param.Value {
woc.log.Infof("overwriting %s: '%s' -> '%s'", paramName, woc.wf.Status.Outputs.Parameters[index].Value, param.Value)
woc.wf.Status.Outputs.Parameters[index].Value = param.Value
woc.updated = true
}
}
}
// addArtifactToGlobalScope exports any desired node outputs to the global scope
// Optionally adds to a local scope if supplied
func (woc *wfOperationCtx) addArtifactToGlobalScope(art wfv1.Artifact, scope *wfScope) {
if art.GlobalName == "" {
return
}
globalArtName := fmt.Sprintf("workflow.outputs.artifacts.%s", art.GlobalName)
if woc.wf.Status.Outputs != nil {
for i, gArt := range woc.wf.Status.Outputs.Artifacts {
if gArt.Name == art.GlobalName {
// global output already exists. overwrite the value if different
art.Name = art.GlobalName
art.GlobalName = ""
art.Path = ""
if !reflect.DeepEqual(woc.wf.Status.Outputs.Artifacts[i], art) {
woc.wf.Status.Outputs.Artifacts[i] = art
if scope != nil {
scope.addArtifactToScope(globalArtName, art)
}
woc.log.Infof("overwriting %s: %v", globalArtName, art)
woc.updated = true
}
return
}
}
} else {
woc.wf.Status.Outputs = &wfv1.Outputs{}
}
// global output does not yet exist
art.Name = art.GlobalName
art.GlobalName = ""
art.Path = ""
woc.log.Infof("setting %s: %v", globalArtName, art)
woc.wf.Status.Outputs.Artifacts = append(woc.wf.Status.Outputs.Artifacts, art)
if scope != nil {
scope.addArtifactToScope(globalArtName, art)
}
woc.updated = true
}
// addChildNode adds a nodeID as a child to a parent
// parent and child are both node names
func (woc *wfOperationCtx) addChildNode(parent string, child string) {
parentID := woc.wf.NodeID(parent)
childID := woc.wf.NodeID(child)
node, ok := woc.wf.Status.Nodes[parentID]
if !ok {
panic(fmt.Sprintf("parent node %s not initialized", parent))
}
for _, nodeID := range node.Children {
if childID == nodeID {
// already exists
return
}
}
node.Children = append(node.Children, childID)
woc.wf.Status.Nodes[parentID] = node
woc.updated = true
}
// executeResource is runs a kubectl command against a manifest
func (woc *wfOperationCtx) executeResource(nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfv1.NodeTypePod, templateScope, tmpl, orgTmpl, opts.boundaryID, wfv1.NodePending)
} else if !node.Pending() {
return node, nil
}
tmpl = tmpl.DeepCopy()
// Try to unmarshal the given manifest.
obj := unstructured.Unstructured{}
err := yaml.Unmarshal([]byte(tmpl.Resource.Manifest), &obj)
if err != nil {
return node, err
}
if tmpl.Resource.SetOwnerReference {
ownerReferences := obj.GetOwnerReferences()
obj.SetOwnerReferences(append(ownerReferences, *metav1.NewControllerRef(woc.wf, wfv1.SchemeGroupVersion.WithKind(workflow.WorkflowKind))))
bytes, err := yaml.Marshal(obj.Object)
if err != nil {
return node, err
}
tmpl.Resource.Manifest = string(bytes)
}
mainCtr := woc.newExecContainer(common.MainContainerName, tmpl)
mainCtr.Command = []string{"argoexec", "resource", tmpl.Resource.Action}
_, err = woc.createWorkflowPod(nodeName, *mainCtr, tmpl, &createWorkflowPodOpts{onExitPod: opts.onExitTemplate, executionDeadline: opts.executionDeadline})
if err != nil {
return woc.requeueIfTransientErr(err, node.Name)
}
return node, err
}
func (woc *wfOperationCtx) executeSuspend(nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfv1.NodeTypeSuspend, templateScope, tmpl, orgTmpl, opts.boundaryID, wfv1.NodePending)
}
woc.log.Infof("node %s suspended", nodeName)
// If there is either an active workflow deadline, or if this node is suspended with a duration, then the workflow
// will need to be requeued after a certain amount of time
var requeueTime *time.Time
if tmpl.Suspend.Duration != "" {
node := woc.wf.GetNodeByName(nodeName)
suspendDuration, err := parseStringToDuration(tmpl.Suspend.Duration)
if err != nil {
return node, err
}
suspendDeadline := node.StartedAt.Add(suspendDuration)
requeueTime = &suspendDeadline
if time.Now().UTC().After(suspendDeadline) {
// Suspension is expired, node can be resumed
woc.log.Infof("auto resuming node %s", nodeName)
_ = woc.markNodePhase(nodeName, wfv1.NodeSucceeded)
return node, nil
}
}
// workflowDeadline is the time when the workflow will be timed out, if any
if workflowDeadline := woc.getWorkflowDeadline(); workflowDeadline != nil {
// There is an active workflow deadline. If this node is suspended with a duration, choose the earlier time
// between the two, otherwise choose the deadline time.
if requeueTime == nil || workflowDeadline.Before(*requeueTime) {
requeueTime = workflowDeadline
}
}
if requeueTime != nil {
woc.requeue(time.Until(*requeueTime))
}
_ = woc.markNodePhase(nodeName, wfv1.NodeRunning)
return node, nil
}
func addRawOutputFields(node *wfv1.NodeStatus, tmpl *wfv1.Template) *wfv1.NodeStatus {
if tmpl.GetType() != wfv1.TemplateTypeSuspend || node.Type != wfv1.NodeTypeSuspend {
panic("addRawOutputFields should only be used for nodes and templates of type suspend")
}
for _, param := range tmpl.Outputs.Parameters {
if param.ValueFrom.Supplied != nil {
if node.Outputs == nil {
node.Outputs = &wfv1.Outputs{Parameters: []wfv1.Parameter{}}
}
node.Outputs.Parameters = append(node.Outputs.Parameters, param)
}
}
return node
}
func parseStringToDuration(durationString string) (time.Duration, error) {
var suspendDuration time.Duration
// If no units are attached, treat as seconds
if val, err := strconv.Atoi(durationString); err == nil {
suspendDuration = time.Duration(val) * time.Second
} else if duration, err := time.ParseDuration(durationString); err == nil {
suspendDuration = duration
} else {
return 0, fmt.Errorf("unable to parse %s as a duration", durationString)
}
return suspendDuration, nil
}
func processItem(fstTmpl *fasttemplate.Template, name string, index int, item wfv1.Item, obj interface{}) (string, error) {
replaceMap := make(map[string]string)
var newName string
switch item.GetType() {
case wfv1.String, wfv1.Number, wfv1.Bool:
replaceMap["item"] = fmt.Sprintf("%v", item)
newName = generateNodeName(name, index, item)
case wfv1.Map:
// Handle the case when withItems is a list of maps.
// vals holds stringified versions of the map items which are incorporated as part of the step name.
// For example if the item is: {"name": "jesse","group":"developer"}
// the vals would be: ["name:jesse", "group:developer"]
// This would eventually be part of the step name (group:developer,name:jesse)
vals := make([]string, 0)
mapVal := item.GetMapVal()
for itemKey, itemVal := range mapVal {
replaceMap[fmt.Sprintf("item.%s", itemKey)] = fmt.Sprintf("%v", itemVal)
vals = append(vals, fmt.Sprintf("%s:%v", itemKey, itemVal))
}
jsonByteVal, err := json.Marshal(mapVal)
if err != nil {
return "", errors.InternalWrapError(err)
}
replaceMap["item"] = string(jsonByteVal)
// sort the values so that the name is deterministic
sort.Strings(vals)
newName = generateNodeName(name, index, strings.Join(vals, ","))
case wfv1.List:
listVal := item.GetListVal()
byteVal, err := json.Marshal(listVal)
if err != nil {
return "", errors.InternalWrapError(err)
}
replaceMap["item"] = string(byteVal)
newName = generateNodeName(name, index, listVal)
default:
return "", errors.Errorf(errors.CodeBadRequest, "withItems[%d] expected string, number, list, or map. received: %v", index, item)
}
newStepStr, err := common.Replace(fstTmpl, replaceMap, false)
if err != nil {
return "", err
}
err = json.Unmarshal([]byte(newStepStr), &obj)
if err != nil {
return "", errors.InternalWrapError(err)
}
return newName, nil
}
func generateNodeName(name string, index int, desc interface{}) string {
newName := fmt.Sprintf("%s(%d:%v)", name, index, desc)
if out := util.RecoverIndexFromNodeName(newName); out != index {
panic(fmt.Sprintf("unrecoverable digit in generateName; wanted '%d' and got '%d'", index, out))
}
return newName
}
func expandSequence(seq *wfv1.Sequence) ([]wfv1.Item, error) {
var start, end int
var err error
if seq.Start != nil {
start, err = strconv.Atoi(seq.Start.String())
if err != nil {
return nil, err
}
}
if seq.End != nil {
end, err = strconv.Atoi(seq.End.String())
if err != nil {
return nil, err
}
} else if seq.Count != nil {
count, err := strconv.Atoi(seq.Count.String())
if err != nil {
return nil, err
}
if count == 0 {
return []wfv1.Item{}, nil
}
end = start + count - 1
} else {
return nil, errors.InternalError("neither end nor count was specified in withSequence")
}
items := make([]wfv1.Item, 0)
format := "%d"
if seq.Format != "" {
format = seq.Format
}
if start <= end {
for i := start; i <= end; i++ {
item, err := wfv1.ParseItem(`"` + fmt.Sprintf(format, i) + `"`)
if err != nil {
return nil, err
}
items = append(items, item)
}
} else {
for i := start; i >= end; i-- {
item, err := wfv1.ParseItem(`"` + fmt.Sprintf(format, i) + `"`)
if err != nil {
return nil, err
}
items = append(items, item)
}
}
return items, nil
}
func (woc *wfOperationCtx) substituteParamsInVolumes(params map[string]string) error {
if woc.volumes == nil {
return nil
}
volumes := woc.volumes
volumesBytes, err := json.Marshal(volumes)
if err != nil {
return errors.InternalWrapError(err)
}
fstTmpl, err := fasttemplate.NewTemplate(string(volumesBytes), "{{", "}}")
if err != nil {
return fmt.Errorf("unable to parse argo varaible: %w", err)
}
newVolumesStr, err := common.Replace(fstTmpl, params, true)
if err != nil {
return err
}
var newVolumes []apiv1.Volume
err = json.Unmarshal([]byte(newVolumesStr), &newVolumes)
if err != nil {
return errors.InternalWrapError(err)
}
woc.volumes = newVolumes
return nil
}
// createTemplateContext creates a new template context.
func (woc *wfOperationCtx) createTemplateContext(scope wfv1.ResourceScope, resourceName string) (*templateresolution.Context, error) {
var clusterWorkflowTemplateGetter templateresolution.ClusterWorkflowTemplateGetter
if woc.controller.cwftmplInformer != nil {
clusterWorkflowTemplateGetter = woc.controller.cwftmplInformer.Lister()
} else {
clusterWorkflowTemplateGetter = &templateresolution.NullClusterWorkflowTemplateGetter{}
}
ctx := templateresolution.NewContext(woc.controller.wftmplInformer.Lister().WorkflowTemplates(woc.wf.Namespace), clusterWorkflowTemplateGetter, woc.execWf, woc.wf)
switch scope {
case wfv1.ResourceScopeNamespaced:
return ctx.WithWorkflowTemplate(resourceName)
case wfv1.ResourceScopeCluster:
return ctx.WithClusterWorkflowTemplate(resourceName)
default:
return ctx, nil
}
}
func (woc *wfOperationCtx) runOnExitNode(templateRef, parentDisplayName, parentNodeName, boundaryID string, tmplCtx *templateresolution.Context) (bool, *wfv1.NodeStatus, error) {
if templateRef != "" && woc.wf.Spec.Shutdown.ShouldExecute(true) {
woc.log.Infof("Running OnExit handler: %s", templateRef)
onExitNodeName := common.GenerateOnExitNodeName(parentDisplayName)
onExitNode, err := woc.executeTemplate(onExitNodeName, &wfv1.WorkflowStep{Template: templateRef}, tmplCtx, woc.execWf.Spec.Arguments, &executeTemplateOpts{
boundaryID: boundaryID,
onExitTemplate: true,
})
woc.addChildNode(parentNodeName, onExitNodeName)
return true, onExitNode, err
}
return false, nil, nil
}
func (woc *wfOperationCtx) computeMetrics(metricList []*wfv1.Prometheus, localScope map[string]string, realTimeScope map[string]func() float64, realTimeOnly bool) {
for _, metricTmpl := range metricList {
// Don't process real time metrics after execution
if realTimeOnly && !metricTmpl.IsRealtime() {
continue
}
if metricTmpl.Help == "" {
woc.reportMetricEmissionError(fmt.Sprintf("metric '%s' must contain a help string under 'help: ' field", metricTmpl.Name))
continue
}
// Substitute parameters in non-value fields of the template to support variables in places such as labels,
// name, and help. We do not substitute value fields here (i.e. gauge, histogram, counter) here because they
// might be realtime ({{workflow.duration}} will not be substituted the same way if it's realtime or if it isn't).
metricTmplBytes, err := json.Marshal(metricTmpl)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to substitute parameters for metric '%s' (marshal): %s", metricTmpl.Name, err))
continue
}
fstTmpl, err := fasttemplate.NewTemplate(string(metricTmplBytes), "{{", "}}")
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to parse argo varaible for metric '%s': %s", metricTmpl.Name, err))
continue
}
replacedValue, err := common.Replace(fstTmpl, localScope, false)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to substitute parameters for metric '%s': %s", metricTmpl.Name, err))
continue
}
var metricTmplSubstituted wfv1.Prometheus
err = json.Unmarshal([]byte(replacedValue), &metricTmplSubstituted)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to substitute parameters for metric '%s' (unmarshal): %s", metricTmpl.Name, err))
continue
}
// Only substitute non-value fields here. Value field substitution happens below
metricTmpl.Name = metricTmplSubstituted.Name
metricTmpl.Help = metricTmplSubstituted.Help
metricTmpl.Labels = metricTmplSubstituted.Labels
metricTmpl.When = metricTmplSubstituted.When
proceed, err := shouldExecute(metricTmpl.When)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to compute 'when' clause for metric '%s': %s", woc.wf.ObjectMeta.Name, err))
continue
}
if !proceed {
continue
}
if metricTmpl.IsRealtime() {
// Finally substitute value parameters
value := metricTmpl.Gauge.Value
if !(strings.HasPrefix(value, "{{") && strings.HasSuffix(value, "}}")) {
woc.reportMetricEmissionError("real time metrics can only be used with metric variables")
continue
}
value = strings.TrimSuffix(strings.TrimPrefix(value, "{{"), "}}")
valueFunc, ok := realTimeScope[value]
if !ok {
woc.reportMetricEmissionError(fmt.Sprintf("'%s' is not available as a real time metric", value))
continue
}
updatedMetric, err := metrics.ConstructRealTimeGaugeMetric(metricTmpl, valueFunc)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("could not construct metric '%s': %s", metricTmpl.Name, err))
continue
}
err = woc.controller.metrics.UpsertCustomMetric(metricTmpl.GetDesc(), string(woc.wf.UID), updatedMetric, true)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("could not construct metric '%s': %s", metricTmpl.Name, err))
continue
}
continue
} else {
metricSpec := metricTmpl.DeepCopy()
// Finally substitute value parameters
fstTmpl, err = fasttemplate.NewTemplate(metricSpec.GetValueString(), "{{", "}}")
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to parse argo varaible for metric '%s': %s", metricTmpl.Name, err))
continue
}
replacedValue, err := common.Replace(fstTmpl, localScope, false)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to substitute parameters for metric '%s': %s", metricSpec.Name, err))
continue
}
metricSpec.SetValueString(replacedValue)
metric := woc.controller.metrics.GetCustomMetric(metricSpec.GetDesc())
// It is valid to pass a nil metric to ConstructOrUpdateMetric, in that case the metric will be created for us
updatedMetric, err := metrics.ConstructOrUpdateMetric(metric, metricSpec)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("could not construct metric '%s': %s", metricSpec.Name, err))
continue
}
err = woc.controller.metrics.UpsertCustomMetric(metricSpec.GetDesc(), string(woc.wf.UID), updatedMetric, false)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("could not construct metric '%s': %s", metricSpec.Name, err))
continue
}
continue
}
}
}
func (woc *wfOperationCtx) reportMetricEmissionError(errorString string) {
woc.wf.Status.Conditions.UpsertConditionMessage(
wfv1.Condition{
Status: metav1.ConditionTrue,
Type: wfv1.ConditionTypeMetricsError,
Message: errorString,
})
woc.updated = true
woc.log.Error(errorString)
}
func (woc *wfOperationCtx) createPDBResource() error {
if woc.execWf.Spec.PodDisruptionBudget == nil {
return nil
}
pdb, err := woc.controller.kubeclientset.PolicyV1beta1().PodDisruptionBudgets(woc.wf.Namespace).Get(woc.wf.Name, metav1.GetOptions{})
if err != nil && !apierr.IsNotFound(err) {
return err
}
if pdb != nil && pdb.Name != "" {
return nil
}
pdbSpec := *woc.execWf.Spec.PodDisruptionBudget
if pdbSpec.Selector == nil {
pdbSpec.Selector = &metav1.LabelSelector{
MatchLabels: map[string]string{common.LabelKeyWorkflow: woc.wf.Name},
}
}
newPDB := policyv1beta.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: woc.wf.Name,
Labels: map[string]string{common.LabelKeyWorkflow: woc.wf.Name},
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(woc.wf, wfv1.SchemeGroupVersion.WithKind(workflow.WorkflowKind)),
},
},
Spec: pdbSpec,
}
_, err = woc.controller.kubeclientset.PolicyV1beta1().PodDisruptionBudgets(woc.wf.Namespace).Create(&newPDB)
if err != nil {
return err
}
woc.log.Infof("Created PDB resource for workflow.")
woc.updated = true
return nil
}
func (woc *wfOperationCtx) deletePDBResource() error {
if woc.execWf.Spec.PodDisruptionBudget == nil {
return nil
}
err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
err := woc.controller.kubeclientset.PolicyV1beta1().PodDisruptionBudgets(woc.wf.Namespace).Delete(woc.wf.Name, &metav1.DeleteOptions{})
if err != nil && !apierr.IsNotFound(err) {
woc.log.WithField("err", err).Warn("Failed to delete PDB.")
if !errorsutil.IsTransientErr(err) {
return false, err
}
return false, nil
}
return true, nil
})
if err != nil {
woc.log.WithField("err", err).Error("Unable to delete PDB resource for workflow.")
return err
}
woc.log.Infof("Deleted PDB resource for workflow.")
return nil
}
// Check if the output of this node is referenced elsewhere in the Workflow. If so, make sure to include it during
// execution.
func (woc *wfOperationCtx) includeScriptOutput(nodeName, boundaryID string) (bool, error) {
if boundaryNode, ok := woc.wf.Status.Nodes[boundaryID]; ok {
tmplCtx, err := woc.createTemplateContext(boundaryNode.GetTemplateScope())
if err != nil {
return false, err
}
_, parentTemplate, templateStored, err := tmplCtx.ResolveTemplate(&boundaryNode)
if err != nil {
return false, err
}
// A new template was stored during resolution, persist it
if templateStored {
woc.updated = true
}
name := getStepOrDAGTaskName(nodeName)
return hasOutputResultRef(name, parentTemplate), nil
}
return false, nil
}
func (woc *wfOperationCtx) getArtifactRepositoryByRef(arRef *wfv1.ArtifactRepositoryRef) (*config.ArtifactRepository, error) {
namespaces := []string{woc.wf.ObjectMeta.Namespace, woc.controller.namespace}
for _, namespace := range namespaces {
cm, err := woc.controller.kubeclientset.CoreV1().ConfigMaps(namespace).Get(arRef.GetConfigMap(), metav1.GetOptions{})
if err != nil {
if apierr.IsNotFound(err) {
continue
}
return nil, err
}
value, ok := cm.Data[arRef.Key]
if !ok {
continue
}
woc.log.WithFields(log.Fields{"namespace": namespace, "name": cm.Name}).Debug("Found artifact repository by ref")
ar := &config.ArtifactRepository{}
err = yaml.Unmarshal([]byte(value), ar)
if err != nil {
return nil, err
}
return ar, nil
}
return nil, fmt.Errorf("failed to find artifactory ref {%s}/%s#%s", strings.Join(namespaces, ","), arRef.GetConfigMap(), arRef.Key)
}
func (woc *wfOperationCtx) fetchWorkflowSpec() (*wfv1.WorkflowSpec, error) {
if woc.wf.Spec.WorkflowTemplateRef == nil {
return nil, fmt.Errorf("cannot fetch workflow spec without workflowTemplateRef")
}
var specHolder wfv1.WorkflowSpecHolder
var err error
// Logic for workflow refers Workflow template
if woc.wf.Spec.WorkflowTemplateRef.ClusterScope {
specHolder, err = woc.controller.cwftmplInformer.Lister().Get(woc.wf.Spec.WorkflowTemplateRef.Name)
} else {
specHolder, err = woc.controller.wftmplInformer.Lister().WorkflowTemplates(woc.wf.Namespace).Get(woc.wf.Spec.WorkflowTemplateRef.Name)
}
if err != nil {
return nil, err
}
return specHolder.GetWorkflowSpec(), nil
}
func (woc *wfOperationCtx) loadExecutionSpec() (wfv1.TemplateReferenceHolder, wfv1.Arguments, error) {
executionParameters := woc.wf.Spec.Arguments
if woc.wf.Spec.WorkflowTemplateRef == nil {
if woc.controller.Config.WorkflowRestrictions.MustUseReference() {
return nil, executionParameters, fmt.Errorf("workflows must use workflowTemplateRef to be executed when the controller is in reference mode")
}
// Set the WorkflowDefaults from Configmap
err := woc.controller.setWorkflowDefaults(woc.wf)
if err != nil {
log.WithFields(log.Fields{"key": woc.wf.Name, "error": err}).Warn("Failed to apply default workflow values")
return nil, executionParameters, err
}
tmplRef := &wfv1.WorkflowStep{Template: woc.wf.Spec.Entrypoint}
return tmplRef, executionParameters, nil
}
if woc.wf.Status.StoredWorkflowSpec == nil {
wftSpec, err := woc.fetchWorkflowSpec()
if err != nil {
return nil, executionParameters, err
}
woc.wf.Status.StoredWorkflowSpec = wftSpec
woc.updated = true
} else if woc.controller.Config.WorkflowRestrictions.MustNotChangeSpec() {
// If the controller is in reference mode, ensure that the stored spec is identical to the reference spec at every operation
wftSpec, err := woc.fetchWorkflowSpec()
if err != nil {
return nil, executionParameters, err
}
if woc.wf.Status.StoredWorkflowSpec.String() != wftSpec.String() {
return nil, executionParameters, fmt.Errorf("workflowTemplateRef reference may not change during execution when the controller is in reference mode")
}
}
//In WorkflowTemplateRef scenario, we need to merge the Workflow spec, :StoredWorkflowspec and Workflow Default for execWf.
// Overlay
targetWf := wfv1.Workflow{Spec: *woc.wf.Spec.DeepCopy()}
err := wfutil.MergeTo(&wfv1.Workflow{Spec: *woc.wf.Status.StoredWorkflowSpec}, &targetWf)
if err != nil {
return nil, executionParameters, err
}
err = woc.controller.setWorkflowDefaults(&targetWf)
if err != nil {
log.WithFields(log.Fields{"key": woc.wf.Name, "error": err}).Warn("Failed to apply default workflow values")
return nil, executionParameters, err
}
// Setting the merged workflow to executable workflow
woc.execWf = &targetWf
woc.volumes = woc.execWf.Spec.DeepCopy().Volumes
tmplRef := &wfv1.WorkflowStep{TemplateRef: woc.wf.Spec.WorkflowTemplateRef.ToTemplateRef(woc.execWf.Spec.Entrypoint)}
if len(woc.execWf.Spec.Arguments.Parameters) > 0 {
executionParameters.Parameters = util.MergeParameters(executionParameters.Parameters, woc.execWf.Spec.Arguments.Parameters)
}
executionParameters.Artifacts = util.MergeArtifacts(executionParameters.Artifacts, woc.execWf.Spec.Arguments.Artifacts)
return tmplRef, executionParameters, nil
}
func (woc *wfOperationCtx) retryStrategy(tmpl *wfv1.Template) *wfv1.RetryStrategy {
if tmpl != nil && tmpl.RetryStrategy != nil {
return tmpl.RetryStrategy
}
return woc.execWf.Spec.RetryStrategy
}
|
[
"\"INFORMER_WRITE_BACK\""
] |
[] |
[
"INFORMER_WRITE_BACK"
] |
[]
|
["INFORMER_WRITE_BACK"]
|
go
| 1 | 0 | |
share/qt/extract_strings_qt.py
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/scrivstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *scriv_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("scriv-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
[] |
[] |
[
"XGETTEXT"
] |
[]
|
["XGETTEXT"]
|
python
| 1 | 0 | |
example/idtoken/app.go
|
/*
This is an example application to demonstrate parsing an ID Token.
*/
package main
import (
"encoding/json"
"log"
"net/http"
"os"
oidc "github.com/peterzandbergen/go-oidc"
"golang.org/x/net/context"
"golang.org/x/oauth2"
)
var (
clientID = os.Getenv("GOOGLE_OAUTH2_CLIENT_ID")
clientSecret = os.Getenv("GOOGLE_OAUTH2_CLIENT_SECRET")
)
func main() {
ctx := context.Background()
provider, err := oidc.NewProvider(ctx, "https://accounts.google.com")
if err != nil {
log.Fatal(err)
}
oidcConfig := &oidc.Config{
ClientID: clientID,
}
verifier := provider.Verifier(oidcConfig)
config := oauth2.Config{
ClientID: clientID,
ClientSecret: clientSecret,
Endpoint: provider.Endpoint(),
RedirectURL: "http://127.0.0.1:5556/auth/google/callback",
Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
}
state := "foobar" // Don't do this in production.
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, config.AuthCodeURL(state), http.StatusFound)
})
http.HandleFunc("/auth/google/callback", func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("state") != state {
http.Error(w, "state did not match", http.StatusBadRequest)
return
}
oauth2Token, err := config.Exchange(ctx, r.URL.Query().Get("code"))
if err != nil {
http.Error(w, "Failed to exchange token: "+err.Error(), http.StatusInternalServerError)
return
}
rawIDToken, ok := oauth2Token.Extra("id_token").(string)
if !ok {
http.Error(w, "No id_token field in oauth2 token.", http.StatusInternalServerError)
return
}
idToken, err := verifier.Verify(ctx, rawIDToken)
if err != nil {
http.Error(w, "Failed to verify ID Token: "+err.Error(), http.StatusInternalServerError)
return
}
oauth2Token.AccessToken = "*REDACTED*"
resp := struct {
OAuth2Token *oauth2.Token
IDTokenClaims *json.RawMessage // ID Token payload is just JSON.
}{oauth2Token, new(json.RawMessage)}
if err := idToken.Claims(&resp.IDTokenClaims); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data, err := json.MarshalIndent(resp, "", " ")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Write(data)
})
log.Printf("listening on http://%s/", "127.0.0.1:5556")
log.Fatal(http.ListenAndServe("127.0.0.1:5556", nil))
}
|
[
"\"GOOGLE_OAUTH2_CLIENT_ID\"",
"\"GOOGLE_OAUTH2_CLIENT_SECRET\""
] |
[] |
[
"GOOGLE_OAUTH2_CLIENT_ID",
"GOOGLE_OAUTH2_CLIENT_SECRET"
] |
[]
|
["GOOGLE_OAUTH2_CLIENT_ID", "GOOGLE_OAUTH2_CLIENT_SECRET"]
|
go
| 2 | 0 | |
0_Companies/ATHENA2.py
|
# Athena Health Preliminary Test - II
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'moves' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY arr as parameter.
#
def moves(arr):
ee=e=0
for i in range(0,len(arr)):
if(arr[i]%2==0): e+=1
for i in range(0,len(arr)):
if(arr[i]%2==0):
if(i>=e): ee+=1
return ee
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr_count = int(input().strip())
arr = []
for _ in range(arr_count):
arr_item = int(input().strip())
arr.append(arr_item)
result = moves(arr)
fptr.write(str(result) + '\n')
fptr.close()
|
[] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
python
| 1 | 0 | |
example/object/put_with_transport.go
|
package main
import (
"context"
"fmt"
"net"
"net/http"
"net/url"
"os"
"time"
"github.com/tencentyun/cos-go-sdk-v5"
)
func log_status(err error) {
if err == nil {
return
}
if cos.IsNotFoundError(err) {
// WARN
fmt.Println("WARN: Resource is not existed")
} else if e, ok := cos.IsCOSError(err); ok {
fmt.Printf("ERROR: Code: %v\n", e.Code)
fmt.Printf("ERROR: Message: %v\n", e.Message)
fmt.Printf("ERROR: Resource: %v\n", e.Resource)
fmt.Printf("ERROR: RequestId: %v\n", e.RequestID)
// ERROR
} else {
fmt.Printf("ERROR: %v\n", err)
// ERROR
}
}
func main() {
u, _ := url.Parse("https://test-1259654469.cos.ap-guangzhou.myqcloud.com")
b := &cos.BaseURL{BucketURL: u}
c := cos.NewClient(b, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: os.Getenv("COS_SECRETID"),
SecretKey: os.Getenv("COS_SECRETKEY"),
// base on http.DefaultTransport
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
// ResponseHeaderTimeout: 1 * time.Second,
// MaxIdleConnsPerHost: 100,
// MaxIdleConns: 100,
},
},
})
// Case1 上传对象
name := "test/example"
// Case3 通过本地文件上传对象
_, err := c.Object.PutFromFile(context.Background(), name, "./test", nil) // 请求的超时时间为 min{context超时时间, HTTP超时时间}
log_status(err)
}
|
[
"\"COS_SECRETID\"",
"\"COS_SECRETKEY\""
] |
[] |
[
"COS_SECRETKEY",
"COS_SECRETID"
] |
[]
|
["COS_SECRETKEY", "COS_SECRETID"]
|
go
| 2 | 0 | |
logger.go
|
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"fmt"
"io"
"net/http"
"os"
"time"
"github.com/mattn/go-isatty"
)
type consoleColorModeValue int
const (
autoColor consoleColorModeValue = iota
disableColor
forceColor
)
const (
green = "\033[97;42m"
white = "\033[90;47m"
yellow = "\033[90;43m"
red = "\033[97;41m"
blue = "\033[97;44m"
magenta = "\033[97;45m"
cyan = "\033[97;46m"
reset = "\033[0m"
)
var consoleColorMode = autoColor
// LoggerConfig defines the config for Logger middleware.
type LoggerConfig struct {
// Optional. Default value is gin.defaultLogFormatter
Formatter LogFormatter
// Output is a writer where logs are written.
// Optional. Default value is gin.DefaultWriter.
Output io.Writer
// SkipPaths is a url path array which logs are not written.
// Optional.
SkipPaths []string
}
// LogFormatter gives the signature of the formatter function passed to LoggerWithFormatter
type LogFormatter func(params LogFormatterParams) string
// LogFormatterParams is the structure any formatter will be handed when time to log comes
type LogFormatterParams struct {
Request *http.Request
// TimeStamp shows the time after the server returns a response.
TimeStamp time.Time
// StatusCode is HTTP response code.
StatusCode int
// Latency is how much time the server cost to process a certain request.
Latency time.Duration
// ClientIP equals Context's ClientIP method.
ClientIP string
// Method is the HTTP method given to the request.
Method string
// Path is a path the client requests.
Path string
// ErrorMessage is set if error has occurred in processing the request.
ErrorMessage string
// isTerm shows whether does gin's output descriptor refers to a terminal.
isTerm bool
// BodySize is the size of the Response Body
BodySize int
// Keys are the keys set on the request's context.
Keys map[string]interface{}
}
// StatusCodeColor is the ANSI color for appropriately logging http status code to a terminal.
func (p *LogFormatterParams) StatusCodeColor() string {
code := p.StatusCode
switch {
case code >= http.StatusOK && code < http.StatusMultipleChoices:
return green
case code >= http.StatusMultipleChoices && code < http.StatusBadRequest:
return white
case code >= http.StatusBadRequest && code < http.StatusInternalServerError:
return yellow
default:
return red
}
}
// MethodColor is the ANSI color for appropriately logging http method to a terminal.
func (p *LogFormatterParams) MethodColor() string {
method := p.Method
switch method {
case http.MethodGet:
return blue
case http.MethodPost:
return cyan
case http.MethodPut:
return yellow
case http.MethodDelete:
return red
case http.MethodPatch:
return green
case http.MethodHead:
return magenta
case http.MethodOptions:
return white
default:
return reset
}
}
// ResetColor resets all escape attributes.
func (p *LogFormatterParams) ResetColor() string {
return reset
}
// IsOutputColor indicates whether can colors be outputted to the log.
func (p *LogFormatterParams) IsOutputColor() bool {
return consoleColorMode == forceColor || (consoleColorMode == autoColor && p.isTerm)
}
// defaultLogFormatter is the default log format function Logger middleware uses.
var defaultLogFormatter = func(param LogFormatterParams) string {
var statusColor, methodColor, resetColor string
if param.IsOutputColor() {
statusColor = param.StatusCodeColor()
methodColor = param.MethodColor()
resetColor = param.ResetColor()
}
if param.Latency > time.Minute {
// Truncate in a golang < 1.8 safe way
param.Latency = param.Latency - param.Latency%time.Second
}
return fmt.Sprintf("[GIN] %v |%s %3d %s| %13v | %15s |%s %-7s %s %s\n%s",
param.TimeStamp.Format("2006/01/02 - 15:04:05"),
statusColor, param.StatusCode, resetColor,
param.Latency,
param.ClientIP,
methodColor, param.Method, resetColor,
param.Path,
param.ErrorMessage,
)
}
// DisableConsoleColor disables color output in the console.
func DisableConsoleColor() {
consoleColorMode = disableColor
}
// ForceConsoleColor force color output in the console.
func ForceConsoleColor() {
consoleColorMode = forceColor
}
// ErrorLogger returns a handlerfunc for any error type.
func ErrorLogger() HandlerFunc {
return ErrorLoggerT(ErrorTypeAny)
}
// ErrorLoggerT returns a handlerfunc for a given error type.
func ErrorLoggerT(typ ErrorType) HandlerFunc {
return func(c *Context) {
c.Next()
errors := c.Errors.ByType(typ)
if len(errors) > 0 {
c.JSON(-1, errors)
}
}
}
// Logger instances a Logger middleware that will write the logs to gin.DefaultWriter.
// By default gin.DefaultWriter = os.Stdout.
func Logger() HandlerFunc {
return LoggerWithConfig(LoggerConfig{})
}
// LoggerWithFormatter instance a Logger middleware with the specified log format function.
func LoggerWithFormatter(f LogFormatter) HandlerFunc {
return LoggerWithConfig(LoggerConfig{
Formatter: f,
})
}
// LoggerWithWriter instance a Logger middleware with the specified writer buffer.
// Example: os.Stdout, a file opened in write mode, a socket...
func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc {
return LoggerWithConfig(LoggerConfig{
Output: out,
SkipPaths: notlogged,
})
}
// LoggerWithConfig instance a Logger middleware with config.
func LoggerWithConfig(conf LoggerConfig) HandlerFunc {
formatter := conf.Formatter
if formatter == nil {
formatter = defaultLogFormatter
}
out := conf.Output
if out == nil {
out = DefaultWriter
}
notlogged := conf.SkipPaths
isTerm := true
if w, ok := out.(*os.File); !ok || os.Getenv("TERM") == "dumb" ||
(!isatty.IsTerminal(w.Fd()) && !isatty.IsCygwinTerminal(w.Fd())) {
isTerm = false
}
var skip map[string]struct{}
if length := len(notlogged); length > 0 {
skip = make(map[string]struct{}, length)
for _, path := range notlogged {
skip[path] = struct{}{}
}
}
return func(c *Context) {
// Start timer
start := time.Now()
path := c.Request.URL.Path
raw := c.Request.URL.RawQuery
// Process request
c.Next()
// Log only when path is not being skipped
if _, ok := skip[path]; !ok {
param := LogFormatterParams{
Request: c.Request,
isTerm: isTerm,
Keys: c.Keys,
}
// Stop timer
param.TimeStamp = time.Now()
param.Latency = param.TimeStamp.Sub(start)
param.ClientIP = c.ClientIP()
param.Method = c.Request.Method
param.StatusCode = c.Writer.Status()
param.ErrorMessage = c.Errors.ByType(ErrorTypePrivate).String()
param.BodySize = c.Writer.Size()
if raw != "" {
path = path + "?" + raw
}
param.Path = path
fmt.Fprint(out, formatter(param))
}
}
}
|
[
"\"TERM\""
] |
[] |
[
"TERM"
] |
[]
|
["TERM"]
|
go
| 1 | 0 | |
cmd/edith/main.go
|
// This is the cli client for edithd (the server daemon)
package main
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/yeboahnanaosei/edith"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
)
// showUsage prints to stdout details of how to use this tool
func showUsage() {
usage := `
USAGE: edith ACTION RECIPIENT ITEM PAYLOAD
Send a one line text to Nana
eg: edith send nana text "The text you want to send"
Send a file to nana
eg: edith send nana file /path/to/file/you/want/to/send.txt
Send a multi-line text to Kwakye
eg: edith send kwakye text
Type or paste your multiline
text here. The lines can be as
many as you want
Ctrl+D (Linux/Mac) or Ctrl+Z(Windows) to send
Send some credentials to Danny
eg: edith send danny text
Username: username
Password: password
Ctrl+D (Linux/Mac) or Ctrl+Z(Windows) to send
DETAILS:
ACTION
ACTION is what you want edith to do
value: send or get
eg: edith send RECIPIENT ITEM PAYLOAD
RECIPIENT
RECIPIENT is the person you are sending to
value: name of a colleague eg. kwakye, nana etc.
eg: edith ACTION nana ITEM PAYLOAD
ITEM
ITEM is the thing you want to send either a text or file
value: text or file
eg: edith ACTION RECIPIENT text PAYLOAD
eg: edith ACTION RECIPIENT file PAYLOAD
PAYLOAD
PAYLOAD is the actual thing you are sending
value:
if ITEM is "file", then value for PAYLOAD is the path to the file
eg. edith send nana file /path/to/the/file.ext
if ITEM is "text", then value for PAYLOAD is the text you want to send
eg. edith send nana text "The text I want to send"
If you want to send a multiline text, don't supply payload. Just press
enter and type your text. When you are done just hit the ENTER key
followed by Ctrl+D on Linux/Mac or Ctrl+Z on Windows
Notice no payload after 'text'
eg. edith send nana text
Type or paste your multi-line text here.
You are free to type as many lines as you want
Ctrl+D or Ctrl+Z // To finish your text
`
fmt.Fprintf(os.Stdout, usage)
}
// performFirstRun takes the user's name and saves it in the config file
func performFirstRun(configFile io.Writer) (string, error) {
fmt.Println("\nThis appears to be your first run. Let's get you setup")
fmt.Print("Please enter your name: ")
var name string
fmt.Scanln(&name)
if len(name) == 0 {
return "", errors.New("no name supplied")
}
_, err := configFile.Write([]byte(fmt.Sprintf("name: %s", name)))
if err != nil {
return "", fmt.Errorf("could not write name to config file: %v", err)
}
fmt.Println()
return name, nil
}
// getUserName gets the name of the user calling this tool.
// We need the name of the user to prepare a request to be sent to the server.
// If it is the first time the program is being called, performFirstRun() will be
// called
func getUserName() (string, error) {
configFilePath := filepath.Join(os.Getenv("HOME"), ".edithrc")
configFile, err := os.OpenFile(configFilePath, os.O_CREATE|os.O_RDWR, os.ModePerm)
if os.IsNotExist(err) {
return performFirstRun(configFile)
}
defer configFile.Close()
finfo, err := configFile.Stat()
if err != nil {
return "", fmt.Errorf("could not stat config file: %v", err)
}
if finfo.Size() < 1 {
return performFirstRun(configFile)
}
b, err := ioutil.ReadAll(configFile)
if err != nil {
return "", fmt.Errorf("could not read from config file: %v", err)
}
return string(bytes.Split(b, []byte(": "))[1]), nil
}
func getTextInput() ([]byte, error) {
var input string
fmt.Print("You can type or paste the text you want to send below:\n(Ctrl+D to send | Ctrl+C to cancel)\n\n")
s := bufio.NewScanner(os.Stdin)
for s.Scan() {
input += s.Text() + "\n"
}
if len(input) == 0 {
return nil, errors.New("cannot continue with empty text. you need to supply some text")
}
return []byte(input), nil
}
// prepareRequest parses the args from the command line and prepares an appropriate
// request object to send to the server
func prepareRequest(args []string) (*edith.Request, error) {
if len(args) < 3 {
return nil, errors.New("not enough arguments supplied. at least three args must be provided")
}
// actions := []string{"send", "get"} // actions are the actions that edith can perform. edith can send or get items
// items := []string{"text", "file"} // items are the things that can be sent by edith. edith can send text or files
sender, err := getUserName()
if err != nil {
return nil, fmt.Errorf("failed to get sender name: %v", err)
}
action := strings.ToLower(args[0])
recipient := strings.ToLower(args[1])
item := strings.ToLower(args[2])
if action != "send" && action != "get" {
return nil, fmt.Errorf("unknown action '%s', expected one of [send, get]", action)
}
if recipient == "" {
return nil, errors.New("you did not supply the name of your recipient. recipient cannot be empty")
}
if item != "file" && item != "text" {
return nil, fmt.Errorf("unknown item '%s'. expected one of [file, text]", item)
}
req := &edith.Request{}
switch action {
case "send":
// Handle if item type it text
if item == "text" && len(args) > 3 {
req.Body = []byte(strings.Join(args[3:], " "))
if len(req.Body) == 0 {
return nil, errors.New("cannot continue with empty text. you need to supply some text")
}
req.Type = "text"
} else if item == "text" && len(args) == 3 {
req.Body, err = getTextInput()
if err != nil {
return nil, err
}
if len(req.Body) == 0 {
return nil, errors.New("cannot continue with empty text. you need to supply some text")
}
req.Type = "text"
}
// Handle if user wants to send a file
if item == "file" && len(args) == 3 {
return nil, errors.New("you are attempting to send a file but you did not supply the path to the file")
} else if item == "file" && len(args) > 3 {
abs, err := filepath.Abs(args[3])
if err != nil {
return nil, fmt.Errorf("could not determine the absolute path to the file you supplied %s: %v", args[3], err)
}
req.Body, err = ioutil.ReadFile(abs)
if err != nil {
return nil, fmt.Errorf("could not get the contents of the file you supplied %s: %v", args[3], err)
}
if len(req.Body) == 0 {
return nil, errors.New("file appears to be empty")
}
req.Filename = args[3]
req.Type = "file"
}
req.Sender, req.Recipient = sender, recipient
case "get":
req.Sender, req.Recipient, req.Type = recipient, sender, item
}
return req, nil
}
// makeRequest makes the request to the server
func makeRequest(ctx context.Context, action string, client edith.EdithClient, request *edith.Request) (interface{}, error) {
action = strings.ToLower(action)
if action == "send" {
switch request.Type {
case "file":
return client.SendFile(ctx, request)
case "text":
return client.SendText(ctx, request)
default:
return nil, errors.New("unknown item type: " + request.Type)
}
}
if action == "get" {
switch request.Type {
case "file":
return client.GetFile(ctx, request)
case "text":
return client.GetText(ctx, request)
default:
return nil, errors.New("unknown item type: " + request.Type)
}
}
return nil, errors.New("unknown action type: " + action)
}
// This can be changed using ldflags when building for release
var serverAddr = "localhost:54920"
// createClient creates the edith grpc client for communication with the server
func createClient() (edith.EdithClient, error) {
con, err := grpc.Dial(serverAddr, grpc.WithInsecure())
if err != nil {
return nil, err
}
return edith.NewEdithClient(con), nil
}
func main() {
if len(os.Args) < 4 {
fmt.Fprintf(os.Stderr, "edith: not enough arguments supplied\n")
showUsage()
os.Exit(1)
}
request, err := prepareRequest(os.Args[1:])
if err != nil {
fmt.Fprintf(os.Stderr, "edith: error preparing request: %v\n", err)
os.Exit(1)
}
client, err := createClient()
if err != nil {
fmt.Fprintf(os.Stderr, "edith: error creating edith client: %v\n", err)
os.Exit(1)
}
res, err := makeRequest(context.Background(), os.Args[1], client, request)
st, ok := status.FromError(err)
if ok && st.Err() != nil {
fmt.Fprintf(os.Stderr, "edith: %v\n", st.Message())
os.Exit(1)
}
switch r := res.(type) {
case *edith.RequestItems:
itemsLen := len(r.Texts)
if itemsLen == 0 {
fmt.Fprintf(os.Stdout, "\nYou have no items from %s\n", strings.Title(os.Args[2]))
os.Exit(0)
}
fmt.Fprintf(os.Stdout, "\nLast 5 texts from %s:\n\n", strings.Title(os.Args[2]))
for x := itemsLen; x > 0; x-- {
fmt.Fprintf(os.Stdout, "%d.\n%s\n\n", x, r.Texts[x-1].Body)
}
os.Exit(0)
case *edith.Response:
fmt.Fprintf(os.Stdout, "edith: %v\n", r.Msg)
os.Exit(0)
default:
fmt.Fprintf(os.Stderr, "edith: unknown type of res: %T", res)
os.Exit(1)
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
cmd/jmc/main.go
|
// Copyright 2021 zs. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"os"
"github.com/zs5460/jmc"
"github.com/zs5460/my"
)
var (
m string
i string
p string
)
func main() {
flag.StringVar(&m, "m", "encode", "mode:encode or decode")
flag.StringVar(&i, "i", "config.json", "input file name")
flag.Parse()
p = os.Getenv("JMC_K")
if len(p) == 16 || len(p) == 24 || len(p) == 32 {
jmc.K = p
}
cnt, err := my.ReadText(i)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
switch m {
case "encode":
cnt = jmc.Encode(cnt)
case "decode":
cnt, err = jmc.Decode(cnt)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
default:
fmt.Println("invalid mode,only support encode and decode.")
os.Exit(1)
}
err = my.WriteText(i, cnt)
if err != nil {
fmt.Println(err)
}
}
|
[
"\"JMC_K\""
] |
[] |
[
"JMC_K"
] |
[]
|
["JMC_K"]
|
go
| 1 | 0 | |
services/softlayer_network_storage_test.go
|
package services_test
import (
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"errors"
slclientfakes "github.com/maximilien/softlayer-go/client/fakes"
datatypes "github.com/maximilien/softlayer-go/data_types"
"github.com/maximilien/softlayer-go/softlayer"
fakeServices "github.com/maximilien/softlayer-go/softlayer/fakes"
testhelpers "github.com/maximilien/softlayer-go/test_helpers"
)
var _ = Describe("SoftLayer_Network_Storage", func() {
var (
username, apiKey string
fakeClient *slclientfakes.FakeSoftLayerClient
volume datatypes.SoftLayer_Network_Storage
billingItem datatypes.SoftLayer_Billing_Item
networkStorageService softlayer.SoftLayer_Network_Storage_Service
err error
fake_softlayer_product_package_service fakeServices.FakeSoftLayer_Product_Package_Service
)
BeforeEach(func() {
username = os.Getenv("SL_USERNAME")
Expect(username).ToNot(Equal(""))
apiKey = os.Getenv("SL_API_KEY")
Expect(apiKey).ToNot(Equal(""))
fakeClient = slclientfakes.NewFakeSoftLayerClient(username, apiKey)
Expect(fakeClient).ToNot(BeNil())
networkStorageService, err = fakeClient.GetSoftLayer_Network_Storage_Service()
Expect(err).ToNot(HaveOccurred())
Expect(networkStorageService).ToNot(BeNil())
volume = datatypes.SoftLayer_Network_Storage{}
})
Context("#GetName", func() {
It("returns the name for the service", func() {
name := networkStorageService.GetName()
Expect(name).To(Equal("SoftLayer_Network_Storage"))
})
})
Context("#CreateIscsiVolume", func() {
BeforeEach(func() {
fakeClient.FakeHttpClient.DoRawHttpRequestResponse, err = testhelpers.ReadJsonTestFixtures("services", "SoftLayer_Network_Storage_Service_getIscsiVolume.json")
Expect(err).ToNot(HaveOccurred())
})
It("fails with error if the volume size is negative", func() {
volume, err = networkStorageService.CreateNetworkStorage(-1, 1000, "fake-location", true)
Expect(err).To(HaveOccurred())
})
Context("when HTTP client returns error codes 40x or 50x", func() {
It("fails for error code 40x", func() {
errorCodes := []int{400, 401, 499}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
_, err = networkStorageService.CreateNetworkStorage(-1, 1000, "fake-location", true)
Expect(err).To(HaveOccurred())
}
})
It("fails for error code 50x", func() {
errorCodes := []int{500, 501, 599}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
_, err = networkStorageService.CreateNetworkStorage(-1, 1000, "fake-location", true)
Expect(err).To(HaveOccurred())
}
})
})
})
Context("#GetIscsiVolume", func() {
It("returns the iSCSI volume object based on volume id", func() {
fakeClient.FakeHttpClient.DoRawHttpRequestResponse, err = testhelpers.ReadJsonTestFixtures("services", "SoftLayer_Network_Storage_Service_getIscsiVolume.json")
Expect(err).ToNot(HaveOccurred())
volume, err = networkStorageService.GetNetworkStorage(1)
Expect(err).ToNot(HaveOccurred())
Expect(volume.Id).To(Equal(1))
Expect(volume.Username).To(Equal("test_username"))
Expect(volume.Password).To(Equal("test_password"))
Expect(volume.CapacityGb).To(Equal(20))
Expect(volume.ServiceResourceBackendIpAddress).To(Equal("1.1.1.1"))
})
Context("when HTTP client returns error codes 40x or 50x", func() {
BeforeEach(func() {
fakeClient.FakeHttpClient.DoRawHttpRequestResponse, err = testhelpers.ReadJsonTestFixtures("services", "SoftLayer_Network_Storage_Service_getIscsiVolume.json")
Expect(err).ToNot(HaveOccurred())
})
It("fails for error code 40x", func() {
errorCodes := []int{400, 401, 499}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
_, err = networkStorageService.GetNetworkStorage(1)
Expect(err).To(HaveOccurred())
}
})
It("fails for error code 50x", func() {
errorCodes := []int{500, 501, 599}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
_, err = networkStorageService.GetNetworkStorage(1)
Expect(err).To(HaveOccurred())
}
})
})
Context("when SL API endpoint is stable, no need to retry", func() {
BeforeEach(func() {
fileNames := []string{
"SoftLayer_Product_Package_getItemPrices.json",
"SoftLayer_Product_Package_getItemPricesBySizeAndIops.json",
"SoftLayer_Product_Package_getItems.json",
"SoftLayer_Product_Order_PlaceContainerOrderNetworkPerformanceStorageIscsi.json",
"SoftLayer_Account_Service_getIscsiNetworkStorage.json",
}
testhelpers.SetTestFixturesForFakeSoftLayerClient(fakeClient, fileNames)
})
It("orders an iSCSI volume successfully", func() {
volume, err = networkStorageService.CreateNetworkStorage(20, 1000, "fake-location", true)
Expect(err).ToNot(HaveOccurred())
})
})
Context("when SL API endpoint is unstable, timeout after several times of retries", func() {
BeforeEach(func() {
fileNames := []string{
"SoftLayer_Product_Package_getItemPrices.json",
"SoftLayer_Product_Package_getItemPricesBySizeAndIops.json",
"SoftLayer_Product_Package_getItems.json",
"SoftLayer_Product_Order_PlaceContainerOrderNetworkPerformanceStorageIscsi.json",
"SoftLayer_Account_Service_getIscsiNetworkStorage.json",
"SoftLayer_Account_Service_getIscsiNetworkStorage.json",
"SoftLayer_Account_Service_getIscsiNetworkStorage.json",
"SoftLayer_Account_Service_getIscsiNetworkStorage.json",
"SoftLayer_Account_Service_getIscsiNetworkStorage.json",
}
testhelpers.SetTestFixturesForFakeSoftLayerClient(fakeClient, fileNames)
fakeClient.FakeHttpClient.DoRawHttpRequestError = errors.New("Timeout due to unstable Softalyer endpoint")
os.Setenv("SL_CREATE_ISCSI_VOLUME_TIMEOUT", "3")
os.Setenv("SL_CREATE_ISCSI_VOLUME_POLLING_INTERVAL", "1")
})
It("fails to order an iSCSI volume", func() {
volume, err = networkStorageService.CreateNetworkStorage(20, 1000, "fake-location", true)
Expect(err).To(HaveOccurred())
})
})
Context("when ordering diffrent size of iSCSI disks without specifying IOPS", func() {
var (
itemPrices []datatypes.SoftLayer_Product_Item_Price
)
BeforeEach(func() {
fakeClient.SoftLayerServices["SoftLayer_Product_Package"] = &fake_softlayer_product_package_service
itemPrices = []datatypes.SoftLayer_Product_Item_Price{
datatypes.SoftLayer_Product_Item_Price{
Id: 1233,
LocationGroupId: 0,
Categories: []datatypes.Category{
datatypes.Category{
Id: 1233,
CategoryCode: "1",
},
},
Item: &datatypes.Item{
Id: 5298,
Capacity: "800",
},
},
}
fake_softlayer_product_package_service.GetItemPricesReturns(itemPrices, nil)
fileNames := []string{
"SoftLayer_Product_Order_PlaceContainerOrderNetworkPerformanceStorageIscsi.json",
"SoftLayer_Account_Service_getIscsiNetworkStorage.json",
}
testhelpers.SetTestFixturesForFakeSoftLayerClient(fakeClient, fileNames)
})
It("retrieve IOPS of 100G disk", func() {
volume, err = networkStorageService.CreateNetworkStorage(100, 0, "fake-location", true)
Expect(err).ToNot(HaveOccurred())
Expect(fake_softlayer_product_package_service.GetItemPricesCallCount()).To(Equal(2))
})
It("retrieve IOPS of 250G disk", func() {
volume, err = networkStorageService.CreateNetworkStorage(250, 0, "fake-location", true)
Expect(err).ToNot(HaveOccurred())
})
It("retrieve IOPS of 2000G disk", func() {
volume, err = networkStorageService.CreateNetworkStorage(2000, 0, "fake-location", true)
Expect(err).ToNot(HaveOccurred())
})
})
})
Context("#GetBillingItem", func() {
BeforeEach(func() {
fakeClient.FakeHttpClient.DoRawHttpRequestResponse, err = testhelpers.ReadJsonTestFixtures("services", "SoftLayer_Network_Storage_Service_getBillingItem.json")
Expect(err).ToNot(HaveOccurred())
})
It("returns the billing item object based on volume id", func() {
billingItem, err = networkStorageService.GetBillingItem(1)
Expect(err).ToNot(HaveOccurred())
Expect(billingItem.Id).To(Equal(12345678))
})
Context("when HTTP client returns error codes 40x or 50x", func() {
It("fails for error code 40x", func() {
errorCodes := []int{400, 401, 499}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
_, err = networkStorageService.GetBillingItem(1)
Expect(err).To(HaveOccurred())
}
})
It("fails for error code 50x", func() {
errorCodes := []int{500, 501, 599}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
_, err = networkStorageService.GetBillingItem(1)
Expect(err).To(HaveOccurred())
}
})
})
})
Context("#HasAllowedVirtualGuest", func() {
It("virtual guest allows to access volume", func() {
fakeClient.FakeHttpClient.DoRawHttpRequestResponse, err = testhelpers.ReadJsonTestFixtures("services", "SoftLayer_Network_Storage_Service_getAllowedVirtualGuests.json")
Expect(err).ToNot(HaveOccurred())
_, err := networkStorageService.HasAllowedVirtualGuest(123, 456)
Expect(err).ToNot(HaveOccurred())
})
Context("when HTTP client returns error codes 40x or 50x", func() {
It("fails for error code 40x", func() {
errorCodes := []int{400, 401, 499}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
_, err := networkStorageService.HasAllowedVirtualGuest(123, 456)
Expect(err).To(HaveOccurred())
}
})
It("fails for error code 50x", func() {
errorCodes := []int{500, 501, 599}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
_, err := networkStorageService.HasAllowedVirtualGuest(123, 456)
Expect(err).To(HaveOccurred())
}
})
})
})
Context("#HasAllowedHardware", func() {
It("hardware allows to access volume", func() {
fakeClient.FakeHttpClient.DoRawHttpRequestResponse, err = testhelpers.ReadJsonTestFixtures("services", "SoftLayer_Network_Storage_Service_getAllowedHardware.json")
Expect(err).ToNot(HaveOccurred())
_, err := networkStorageService.HasAllowedHardware(123, 456)
Expect(err).ToNot(HaveOccurred())
})
Context("when HTTP client returns error codes 40x or 50x", func() {
It("fails for error code 40x", func() {
errorCodes := []int{400, 401, 499}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
_, err := networkStorageService.HasAllowedHardware(123, 456)
Expect(err).To(HaveOccurred())
}
})
It("fails for error code 50x", func() {
errorCodes := []int{500, 501, 599}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
_, err := networkStorageService.HasAllowedHardware(123, 456)
Expect(err).To(HaveOccurred())
}
})
})
})
Context("#AttachNetworkStorageToVirtualGuest", func() {
var virtualGuest datatypes.SoftLayer_Virtual_Guest
BeforeEach(func() {
virtualGuest = datatypes.SoftLayer_Virtual_Guest{
AccountId: 123456,
DedicatedAccountHostOnlyFlag: false,
Domain: "softlayer.com",
FullyQualifiedDomainName: "fake.softlayer.com",
Hostname: "fake-hostname",
Id: 1234567,
MaxCpu: 2,
MaxCpuUnits: "CORE",
MaxMemory: 1024,
StartCpus: 2,
StatusId: 1001,
Uuid: "fake-uuid",
GlobalIdentifier: "fake-globalIdentifier",
PrimaryBackendIpAddress: "fake-primary-backend-ip",
PrimaryIpAddress: "fake-primary-ip",
}
})
It("Allow access to storage from virutal guest", func() {
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("true")
resp, err := networkStorageService.AttachNetworkStorageToVirtualGuest(virtualGuest, 123)
Expect(err).ToNot(HaveOccurred())
Expect(resp).To(Equal(true))
})
Context("when HTTP client returns error codes 40x or 50x", func() {
It("fails for error code 40x", func() {
errorCodes := []int{400, 401, 499}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("true")
_, err := networkStorageService.AttachNetworkStorageToVirtualGuest(virtualGuest, 123)
Expect(err).To(HaveOccurred())
}
})
It("fails for error code 50x", func() {
errorCodes := []int{500, 501, 599}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("true")
_, err := networkStorageService.AttachNetworkStorageToVirtualGuest(virtualGuest, 123)
Expect(err).To(HaveOccurred())
}
})
})
})
Context("#AttachNetworkStorageToHardware", func() {
var hardware datatypes.SoftLayer_Hardware
BeforeEach(func() {
hardware = datatypes.SoftLayer_Hardware{
Domain: "softlayer.com",
FullyQualifiedDomainName: "fake.softlayer.com",
Hostname: "fake-hostname",
Id: 1234567,
GlobalIdentifier: "fake-globalIdentifier",
PrimaryBackendIpAddress: "fake-primary-backend-ip",
PrimaryIpAddress: "fake-primary-ip",
}
})
It("Allow access to storage from hardware", func() {
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("true")
resp, err := networkStorageService.AttachNetworkStorageToHardware(hardware, 123)
Expect(err).ToNot(HaveOccurred())
Expect(resp).To(Equal(true))
})
Context("when HTTP client returns error codes 40x or 50x", func() {
It("fails for error code 40x", func() {
errorCodes := []int{400, 401, 499}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("true")
_, err := networkStorageService.AttachNetworkStorageToHardware(hardware, 123)
Expect(err).To(HaveOccurred())
}
})
It("fails for error code 50x", func() {
errorCodes := []int{500, 501, 599}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("true")
_, err := networkStorageService.AttachNetworkStorageToHardware(hardware, 123)
Expect(err).To(HaveOccurred())
}
})
})
})
Context("#DetachNetworkStorageFromVirtualGuest", func() {
var virtualGuest datatypes.SoftLayer_Virtual_Guest
BeforeEach(func() {
virtualGuest = datatypes.SoftLayer_Virtual_Guest{
AccountId: 123456,
DedicatedAccountHostOnlyFlag: false,
Domain: "softlayer.com",
FullyQualifiedDomainName: "fake.softlayer.com",
Hostname: "fake-hostname",
Id: 1234567,
MaxCpu: 2,
MaxCpuUnits: "CORE",
MaxMemory: 1024,
StartCpus: 2,
StatusId: 1001,
Uuid: "fake-uuid",
GlobalIdentifier: "fake-globalIdentifier",
PrimaryBackendIpAddress: "fake-primary-backend-ip",
PrimaryIpAddress: "fake-primary-ip",
}
})
It("Revoke access to storage from virtual guest", func() {
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("true")
err = networkStorageService.DetachNetworkStorageFromVirtualGuest(virtualGuest, 1234567)
Expect(err).ToNot(HaveOccurred())
})
Context("when HTTP client returns error codes 40x or 50x", func() {
It("fails for error code 40x", func() {
errorCodes := []int{400, 401, 499}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("true")
err = networkStorageService.DetachNetworkStorageFromVirtualGuest(virtualGuest, 1234567)
Expect(err).To(HaveOccurred())
}
})
It("fails for error code 50x", func() {
errorCodes := []int{500, 501, 599}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("true")
err = networkStorageService.DetachNetworkStorageFromVirtualGuest(virtualGuest, 1234567)
Expect(err).To(HaveOccurred())
}
})
})
})
Context("#DetachNetworkStorageFromHardware", func() {
var hardware datatypes.SoftLayer_Hardware
BeforeEach(func() {
hardware = datatypes.SoftLayer_Hardware{
Domain: "softlayer.com",
FullyQualifiedDomainName: "fake.softlayer.com",
Hostname: "fake-hostname",
Id: 1234567,
GlobalIdentifier: "fake-globalIdentifier",
PrimaryBackendIpAddress: "fake-primary-backend-ip",
PrimaryIpAddress: "fake-primary-ip",
}
})
It("Revoke access to storage from virtual guest", func() {
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("true")
err = networkStorageService.DetachNetworkStorageFromHardware(hardware, 1234567)
Expect(err).ToNot(HaveOccurred())
})
Context("when HTTP client returns error codes 40x or 50x", func() {
It("fails for error code 40x", func() {
errorCodes := []int{400, 401, 499}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("true")
err = networkStorageService.DetachNetworkStorageFromHardware(hardware, 1234567)
Expect(err).To(HaveOccurred())
}
})
It("fails for error code 50x", func() {
errorCodes := []int{500, 501, 599}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("true")
err = networkStorageService.DetachNetworkStorageFromHardware(hardware, 1234567)
Expect(err).To(HaveOccurred())
}
})
})
})
Context("#DeleteObject", func() {
BeforeEach(func() {
volume.Id = 1234567
})
It("sucessfully deletes the SoftLayer_Network_Storage volume", func() {
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("true")
deleted, err := networkStorageService.DeleteObject(volume.Id)
Expect(err).ToNot(HaveOccurred())
Expect(deleted).To(BeTrue())
})
It("fails to delete the SoftLayer_Network_Storage volume", func() {
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("false")
deleted, err := networkStorageService.DeleteObject(volume.Id)
Expect(err).To(HaveOccurred())
Expect(deleted).To(BeFalse())
})
Context("when HTTP client returns error codes 40x or 50x", func() {
It("fails for error code 40x", func() {
errorCodes := []int{400, 401, 499}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("false")
_, err := networkStorageService.DeleteObject(volume.Id)
Expect(err).To(HaveOccurred())
}
})
It("fails for error code 50x", func() {
errorCodes := []int{500, 501, 599}
for _, errorCode := range errorCodes {
fakeClient.FakeHttpClient.DoRawHttpRequestInt = errorCode
fakeClient.FakeHttpClient.DoRawHttpRequestResponse = []byte("false")
_, err := networkStorageService.DeleteObject(volume.Id)
Expect(err).To(HaveOccurred())
}
})
})
})
})
|
[
"\"SL_USERNAME\"",
"\"SL_API_KEY\""
] |
[] |
[
"SL_API_KEY",
"SL_USERNAME"
] |
[]
|
["SL_API_KEY", "SL_USERNAME"]
|
go
| 2 | 0 | |
xoodyak/xoodyakTb.py
|
import random
from random import randint
import sys
import os
import inspect
import itertools
from cocotb.handle import SimHandleBase
from functools import partial
from pprint import pprint
pprint = partial(pprint, sort_dicts=False)
script_dir = os.path.realpath(os.path.dirname(
inspect.getfile(inspect.currentframe())))
seed = random.randrange(sys.maxsize)
random = random.Random(seed)
print("Python random seed is:", seed)
try:
from .pyxoodyak import Xoodyak, XoodyakCref
except:
if script_dir not in sys.path:
sys.path.append(script_dir)
from pyxoodyak import Xoodyak, XoodyakCref
try:
from .cocolight import *
except:
cocolight_dir = os.path.dirname(script_dir)
if cocolight_dir not in sys.path:
sys.path.append(cocolight_dir)
from cocolight import *
class XoodyakRefCheckerTb(LwcRefCheckerTb):
def __init__(self, dut: SimHandleBase, debug, max_in_stalls, max_out_stalls) -> None:
# ref = Xoodyak(debug)
ref = XoodyakCref()
super().__init__(dut, ref, debug=debug, max_in_stalls=max_in_stalls,
max_out_stalls=max_out_stalls)
@cocotb.test()
async def blanket_test_simple(dut: SimHandleBase):
debug = os.environ.get('XOODYAK_DEBUG', False)
try:
debug = bool(int(debug))
except:
debug = bool(debug)
print(f'XOODYAK_DEBUG={debug}')
tb = XoodyakRefCheckerTb(
dut, debug=debug, max_in_stalls=10, max_out_stalls=10)
short_size = [0, 1, 15, 16, 43, 61, 64, 179] + \
[randint(2, 180) for _ in range(20)]
await tb.start()
for ad_size, pt_size in itertools.product(short_size, short_size):
await tb.xdec_test(ad_size=ad_size, ct_size=pt_size)
await tb.xhash_test(pt_size)
for ad_size, pt_size in itertools.product(short_size, short_size):
await tb.xenc_test(ad_size=ad_size, pt_size=pt_size)
await tb.xdec_test(ad_size=1536, ct_size=0)
await tb.xenc_test(ad_size=1536, pt_size=0)
await tb.xdec_test(ad_size=0, ct_size=1536)
await tb.xenc_test(ad_size=0, pt_size=1536)
await tb.xdec_test(ad_size=0, ct_size=1535)
await tb.launch_monitors()
await tb.launch_drivers()
await tb.join_drivers()
await tb.join_monitors()
@cocotb.test()
async def randomized_tests(dut: SimHandleBase):
debug = os.environ.get('XOODYAK_DEBUG', False)
# debug = True
try:
debug = bool(int(debug))
except:
debug = bool(debug)
print(f'XOODYAK_DEBUG={debug}')
tb = XoodyakRefCheckerTb(
dut, debug=debug, max_in_stalls=5, max_out_stalls=5)
sizes = [0, 1, 15, 16, 17, 23, 24, 25, 31, 32, 33, 43, 44, 45, 47, 48, 49, 61, 64, 65, 67] + \
[randint(2, 300) for _ in range(30)]
sizes = list(set(sizes)) # unique
random.shuffle(sizes)
sizes2 = sizes[:]
random.shuffle(sizes2)
await tb.start()
for size1, size2 in itertools.product(sizes, sizes2):
op = randint(0, 2)
if (op == 0):
await tb.xenc_test(ad_size=size1, pt_size=size2)
elif (op == 1):
await tb.xdec_test(ad_size=size1, ct_size=size2)
else:
await tb.xhash_test(hm_size=size1)
await tb.launch_monitors()
await tb.launch_drivers()
await tb.join_drivers()
await tb.join_monitors()
@cocotb.test()
async def debug_enc(dut: SimHandleBase):
debug = True
max_stalls = 0
tb = LwcRefCheckerTb(
dut, ref=Xoodyak(debug=debug), debug=debug, max_in_stalls=max_stalls, max_out_stalls=max_stalls)
await tb.start()
await tb.xenc_test(ad_size=4, pt_size=4)
await tb.xenc_test(ad_size=4, pt_size=24)
await tb.xenc_test(ad_size=44, pt_size=32)
await tb.xenc_test(ad_size=45, pt_size=0)
await tb.xenc_test(ad_size=44, pt_size=0)
await tb.xenc_test(ad_size=0, pt_size=45)
await tb.xenc_test(ad_size=65, pt_size=65)
await tb.launch_monitors()
await tb.launch_drivers()
await tb.join_drivers(10000)
await tb.join_monitors(10000)
@cocotb.test()
async def debug_dec(dut: SimHandleBase):
debug = True
max_stalls = 0
tb = XoodyakRefCheckerTb(
dut, debug=debug, max_in_stalls=max_stalls, max_out_stalls=max_stalls)
await tb.start()
await tb.xdec_test(ad_size=45, ct_size=0)
await tb.xdec_test(ad_size=44, ct_size=0)
await tb.xdec_test(ad_size=0, ct_size=45)
await tb.xdec_test(ad_size=65, ct_size=65)
await tb.launch_monitors()
await tb.launch_drivers()
await tb.join_drivers(10000)
await tb.join_monitors(10000)
@cocotb.test()
async def debug_hash(dut: SimHandleBase):
debug = True
max_stalls = 0
tb = LwcRefCheckerTb(
dut, ref=Xoodyak(debug), debug=debug, max_in_stalls=max_stalls, max_out_stalls=max_stalls)
await tb.start()
# await tb.xhash_test(15)
# await tb.xhash_test(16)
await tb.xhash_test(32)
# await tb.xhash_test(99)
await tb.launch_monitors()
await tb.launch_drivers()
await tb.join_drivers(10000)
await tb.join_monitors(10000)
@cocotb.test()
async def measure_timings(dut: SimHandleBase):
debug = os.environ.get('XOODYAK_DEBUG', False)
# debug = True
try:
debug = bool(int(debug))
except:
debug = bool(debug)
print(f'XOODYAK_DEBUG={debug}')
max_stalls = 0 # for timing measurements
tb = XoodyakRefCheckerTb(
dut, debug=debug, max_in_stalls=max_stalls, max_out_stalls=max_stalls)
await tb.start()
all_results = {}
block_sizes = {'AD': 352 // 8, 'PT/CT': 192 // 8, 'HM': 128 // 8}
sizes = [16, 64, 1536]
for op in ['enc', 'dec']:
results = {}
bt = 'AD'
for sz in sizes:
cycles = await tb.measure_op(dict(op=op, ad_size=sz, xt_size=0))
results[f'{bt} {sz}'] = cycles
for x in [4, 5]:
cycles = await tb.measure_op(dict(op=op, ad_size=x*block_sizes[bt], xt_size=0))
results[f'{bt} {x}BS'] = cycles
results[f'{bt} Long'] = results[f'{bt} 5BS'] - results[f'{bt} 4BS']
bt = 'PT/CT'
for sz in sizes:
cycles = await tb.measure_op(dict(op=op, ad_size=0, xt_size=sz))
results[f'{bt} {sz}'] = cycles
for x in [4, 5]:
cycles = await tb.measure_op(dict(op=op, ad_size=0, xt_size=x*block_sizes[bt]))
results[f'{bt} {x}BS'] = cycles
results[f'{bt} Long'] = results[f'{bt} 5BS'] - results[f'{bt} 4BS']
bt = 'AD+PT/CT'
for sz in sizes:
cycles = await tb.measure_op(dict(op=op, ad_size=sz, xt_size=sz))
# print(f'{op} PT={sz} AD=0: {cycles}')
results[f'{bt} {sz}'] = cycles
for x in [4, 5]:
cycles = await tb.measure_op(dict(op=op, ad_size=x*block_sizes['AD'], xt_size=x*block_sizes['PT/CT']))
# print(f'{op} PT={sz} AD=0: {cycles}')
results[f'{bt} {x}BS'] = cycles
results[f'{bt} Long'] = results[f'{bt} 5BS'] - results[f'{bt} 4BS']
all_results[op] = results
results = {}
op = 'hash'
bt = 'HM'
for sz in sizes:
cycles = await tb.measure_op(dict(op=op, hm_size=sz))
# print(f'hash HM={sz}: {cycles}')
results[f'{bt} {sz}'] = cycles
for x in [4, 5]:
cycles = await tb.measure_op(dict(op=op, hm_size=x*block_sizes[bt]))
results[f'{bt} {x}BS'] = cycles
results[f'{bt} Long'] = results[f'{bt} 5BS'] - results[f'{bt} 4BS']
all_results[op] = results
pprint(all_results)
if __name__ == "__main__":
print("should be run as a cocotb module")
|
[] |
[] |
[
"XOODYAK_DEBUG"
] |
[]
|
["XOODYAK_DEBUG"]
|
python
| 1 | 0 | |
src/robotide/context/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import wx
from . import logger
from ..robotapi import ROBOT_LOGGER
from ..version import VERSION
APP = None
LOG = logger.Logger()
ROBOT_LOGGER.unregister_console_logger()
ROBOT_LOGGER.register_logger(LOG)
IS_WINDOWS = os.sep == '\\'
IS_MAC = sys.platform == 'darwin'
IS_LINUX = sys.platform == 'linux'
WX_VERSION = wx.VERSION_STRING
if IS_WINDOWS:
SETTINGS_DIRECTORY = os.path.join(
os.environ['APPDATA'], 'RobotFramework', 'ride')
else:
SETTINGS_DIRECTORY = os.path.join(
os.path.expanduser('~/.robotframework'), 'ride')
LIBRARY_XML_DIRECTORY = os.path.join(SETTINGS_DIRECTORY, 'library_xmls')
if not os.path.isdir(LIBRARY_XML_DIRECTORY):
os.makedirs(LIBRARY_XML_DIRECTORY)
SETTING_EDITOR_WIDTH = 450
SETTING_LABEL_WIDTH = 150
SETTING_ROW_HEIGHT = 25
# TODO: Make this colour configurable
POPUP_BACKGROUND = (240, 242, 80) # (255, 255, 187)
POPUP_FOREGROUND = (40, 40, 0) # (255, 255, 187)
pyversion = '.'.join(str(v) for v in sys.version_info[:3])
SYSTEM_INFO = "Started RIDE %s using python version %s with wx version %s in %s." % \
(VERSION, pyversion, WX_VERSION, sys.platform)
ABOUT_RIDE = '''<h3>RIDE -- Robot Framework Test Data Editor</h3>
<p>RIDE %s running on Python %s.</p>
<p>RIDE is a test data editor for <a href="http://robotframework.org">Robot Framework</a>.
For more information, see project pages at
<a href="https://github.com/robotframework/RIDE">https://github.com/robotframework/RIDE</a>.</p>
<p>Some of the icons are from <a href="http://www.famfamfam.com/lab/icons/silk/">Silk Icons</a>.</p>
<p><br/><br/><a href="https://github.com/HelioGuilherme66">Hélio Guilherme</a> the maintainer of the project thanks the
original authors and all users and collaborators.<br/>
A very special thanks to <b><a href="https://github.com/Nyral">Nyral</a></b> and <b><a href="https://github.com/jnhyperi
on">Johnny.H</a></b> the most commited in helping RIDE development and maintenance.</p>
''' % (VERSION, pyversion)
def ctrl_or_cmd():
if IS_MAC:
return wx.ACCEL_CMD
return wx.ACCEL_CTRL
def bind_keys_to_evt_menu(target, actions):
accelrators = []
for accel, keycode, handler in actions:
_id = wx.NewIdRef()
target.Bind(wx.EVT_MENU, handler, id=_id)
accelrators.append((accel, keycode, _id))
target.SetAcceleratorTable(wx.AcceleratorTable(accelrators))
SHORTCUT_KEYS = '''\
<h2>Shortcut keys in RIDE</h2>
<table>
<tr align="left">
<th><b>Shortcut</b></th>
<th><b>What it does</b></th>
</tr>
<tr>
<td>CtrlCmd-S</td>
<td>Save</td>
</tr>
<tr>
<td>CtrlCmd-Shift-S</td>
<td>Save all</td>
</tr>
<tr>
<td>CtrlCmd-O</td>
<td>Open</td>
</tr>
<tr>
<td>CtrlCmd-Shift-O</td>
<td>Open directory</td>
</tr>
<tr>
<td>CtrlCmd-R</td>
<td>Open resource</td>
</tr>
<tr>
<td>Shift-CtrlCmd-R</td>
<td>Refresh directory</td>
</tr>
<tr>
<td>CtrlCmd-N</td>
<td>New project</td>
</tr>
<tr>
<td>Shift-CtrlCmd-N</td>
<td>New resource</td>
</tr>
<tr>
<td>CtrlCmd-Q</td>
<td>Quit RIDE</td>
</tr>
<tr>
<td>Alt-X</td>
<td>Go Forward</td>
</tr>
<tr>
<td>Alt-Z</td>
<td>Go Back</td>
</tr>
<tr>
<td>F6</td>
<td>Open preview</td>
</tr>
<tr>
<td>F5</td>
<td>Open search keywords dialog</td>
</tr>
<tr>
<td>F3</td>
<td>Open search tests dialog</td>
</tr>
<tr>
<td>F8</td>
<td>Run test suite</td>
</tr>
<tr>
<td>CtrlCmd-F8</td>
<td>Stop running test suite</td>
</tr>
</table>
<h3>Grid</h3>
<table>
<tr align="left">
<th><b>Shortcut</b></th>
<th><b>What it does</b></th>
</tr>
<tr>
<td>Ctrl-Space or Alt-Space</td>
<td>Suggestions and auto completion</td>
</tr>
<tr>
<td>CtrlCmd</td>
<td>Help for cell content</td>
</tr>
<tr>
<td>CtrlCmd-Shift-J</td>
<td>Pop-up JSON Editor</td>
</tr>
<tr>
<td>CtrlCmd-I</td>
<td>Insert row(s)</td>
</tr>
<tr>
<td>CtrlCmd-D</td>
<td>Remove row(s)</td>
</tr>
<tr>
<td>Shift-CtrlCmd-I</td>
<td>Insert cell(s)</td>
</tr>
<tr>
<td>Shift-CtrlCmd-D</td>
<td>Remove cell(s)</td>
</tr>
<tr>
<td>CtrlCmd-Z</td>
<td>Undo</td>
</tr>
<tr>
<td>CtrlCmd-Y</td>
<td>Redo</td>
</tr>
<tr>
<td>CtrlCmd-1</td>
<td>Make scalar variable body</td>
</tr>
<tr>
<td>CtrlCmd-2</td>
<td>Make list variable body</td>
</tr>
<tr>
<td>CtrlCmd-3</td>
<td>Comment row(s)</td>
</tr>
<tr>
<td>CtrlCmd-4</td>
<td>Uncomment row(s)</td>
</tr>
<tr>
<td>CtrlCmd-5</td>
<td>Make dictionary variable body</td>
</tr>
<tr>
<td>Alt-Up</td>
<td>Move row(s) up</td>
</tr>
<tr>
<td>Alt-Down</td>
<td>Move row(s) down</td>
</tr>
<tr>
<td>Alt-Enter</td>
<td>Move cursor down</td>
</tr>
<tr>
<td>CtrlCmd-A</td>
<td>Select all</td>
</tr>
<tr>
<td>CtrlCmd-X</td>
<td>Cut (does not remove cells or rows)</td>
</tr>
<tr>
<td>CtrlCmd-C</td>
<td>Copy</td>
</tr>
<tr>
<td>CtrlCmd-V</td>
<td>Paste (does not move cells or rows)</td>
</tr>
<tr>
<td>Shift-CtrlCmd-V</td>
<td>Insert (adds empty rows and pastes data)</td>
</tr>
<tr>
<td>Delete</td>
<td>Remove cell content</td>
</tr>
</table>
<h3>Tree view</h3>
<table>
<tr align="left">
<th><b>Shortcut</b></th>
<th><b>What it does</b></th>
</tr>
<tr>
<td>Shift-CtrlCmd-T</td>
<td>Add new test case</td>
</tr>
<tr>
<td>Shift-CtrlCmd-K</td>
<td>Add new keyword</td>
</tr>
<tr>
<td>Shift-CtrlCmd-V</td>
<td>Add new scalar variable</td>
</tr>
<tr>
<td>Shift-CtrlCmd-L</td>
<td>Add new list variable</td>
</tr>
<tr>
<td>F2</td>
<td>Rename</td>
</tr>
<tr>
<td>Shift-CtrlCmd-C</td>
<td>Clone/Copy selected keyword/test case</td>
</tr>
<tr>
<td>CtrlCmd-Up</td>
<td>Move item up</td>
</tr>
<tr>
<td>CtrlCmd-Down</td>
<td>Move item down</td>
</tr>
</table>
<h3>Text editor</h3>
<table>
<tr align="left">
<th><b>Shortcut</b></th>
<th><b>What it does</b></th>
</tr>
<tr>
<td>Ctrl-Space or Alt-Space</td>
<td>Suggestions and auto completion</td>
</tr>
<tr>
<td>CtrlCmd-T</td>
<td>Swap current row up</td>
</tr>
<tr>
<td>Tab</td>
<td>Inserts the defined number of spaces</td>
</tr>
<tr>
<td>Shift-Tab</td>
<td>Moves cursor to the left the defined number of spaces</td>
</tr>
<tr>
<td>Ctrl-MouseWheel Roll</td>
<td>Increases or Decreases font size (Zoom +/-)</td>
</tr>
<tr>
<td>CtrlCmd-F</td>
<td>Find in text</td>
</tr>
<tr>
<td>CtrlCmd-G</td>
<td>Find next search result</td>
</tr>
<tr>
<td>Shift-CtrlCmd-G</td>
<td>Find previous search result</td>
</tr>
<tr>
<td>CtrlCmd-1</td>
<td>Make scalar variable body</td>
</tr>
<tr>
<td>CtrlCmd-2</td>
<td>Make list variable body</td>
</tr>
<tr>
<td>CtrlCmd-3</td>
<td>Comment row(s)</td>
</tr>
<tr>
<td>CtrlCmd-4</td>
<td>Uncomment row(s)</td>
</tr>
<tr>
<td>CtrlCmd-5</td>
<td>Make dictionary variable body</td>
</tr>
<tr>
<td>Enter</td>
<td>When focus is in the search field, find next search result</td>
</tr>
<tr>
<td>Shift-Enter</td>
<td>When focus is in the search field, find previous search result</td>
</tr>
</table>
<h3>Run tab</h3>
<table>
<tr align="left">
<th><b>Shortcut</b></th>
<th><b>What it does</b></th>
</tr>
<tr>
<td>CtrlCmd-C</td>
<td>Copy from text output when text selected</td>
</tr>
<tr>
<td>CtrlCmd-L</td>
<td>Open HTML log</td>
</tr>
<tr>
<td>CtrlCmd-R</td>
<td>Show HTML report</td>
</tr>
<tr>
<td>Ctrl-MouseWheel Roll</td>
<td>Increases or Decreases font size (Zoom +/-)</td>
</tr>
</table>
'''
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
test/e2e/framework/framework.go
|
// Copyright 2016 The etcd-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package framework
import (
"bytes"
"flag"
"fmt"
"os"
"os/exec"
"path"
"strconv"
"time"
"github.com/coreos/etcd-operator/pkg/client"
"github.com/coreos/etcd-operator/pkg/generated/clientset/versioned"
"github.com/coreos/etcd-operator/pkg/util/constants"
"github.com/coreos/etcd-operator/pkg/util/k8sutil"
"github.com/coreos/etcd-operator/pkg/util/probe"
"github.com/coreos/etcd-operator/pkg/util/retryutil"
"github.com/coreos/etcd-operator/test/e2e/e2eutil"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/sirupsen/logrus"
"k8s.io/api/core/v1"
v1beta1storage "k8s.io/api/storage/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
var Global *Framework
const (
etcdBackupOperatorName = "etcd-backup-operator"
etcdRestoreOperatorName = "etcd-restore-operator"
etcdRestoreOperatorServiceName = "etcd-restore-operator"
etcdRestoreServicePort = 19999
)
type Framework struct {
opImage string
KubeClient kubernetes.Interface
CRClient versioned.Interface
Namespace string
S3Cli *s3.S3
S3Bucket string
StorageClassName string
Provisioner string
}
// Setup setups a test framework and points "Global" to it.
func Setup() error {
kubeconfig := flag.String("kubeconfig", "", "kube config path, e.g. $HOME/.kube/config")
opImage := flag.String("operator-image", "", "operator image, e.g. gcr.io/coreos-k8s-scale-testing/etcd-operator")
pvProvisioner := flag.String("pv-provisioner", "kubernetes.io/gce-pd", "persistent volume provisioner type: the default is kubernetes.io/gce-pd. This should be set according to where the tests are running")
ns := flag.String("namespace", "default", "e2e test namespace")
flag.Parse()
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
return err
}
cli, err := kubernetes.NewForConfig(config)
if err != nil {
return err
}
Global = &Framework{
KubeClient: cli,
CRClient: client.MustNew(config),
Namespace: *ns,
opImage: *opImage,
S3Bucket: os.Getenv("TEST_S3_BUCKET"),
StorageClassName: "e2e-" + path.Base(*pvProvisioner),
Provisioner: *pvProvisioner,
}
return Global.setup()
}
func Teardown() error {
if err := Global.deleteEtcdOperator(); err != nil {
return err
}
err := Global.KubeClient.CoreV1().Pods(Global.Namespace).Delete(etcdBackupOperatorName, metav1.NewDeleteOptions(1))
if err != nil {
return fmt.Errorf("failed to delete etcd backup operator: %v", err)
}
err = Global.KubeClient.CoreV1().Pods(Global.Namespace).Delete(etcdRestoreOperatorName, metav1.NewDeleteOptions(1))
if err != nil {
return fmt.Errorf("failed to delete etcd restore operator pod: %v", err)
}
err = Global.KubeClient.CoreV1().Services(Global.Namespace).Delete(etcdRestoreOperatorServiceName, metav1.NewDeleteOptions(1))
if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("failed to delete etcd restore operator service: %v", err)
}
// TODO: check all deleted and wait
Global = nil
logrus.Info("e2e teardown successfully")
return nil
}
func (f *Framework) setup() error {
if err := f.setupStorageClass(); err != nil {
return fmt.Errorf("failed to setup storageclass(%v): %v", f.StorageClassName, err)
}
if err := f.SetupEtcdOperator(); err != nil {
return fmt.Errorf("failed to setup etcd operator: %v", err)
}
logrus.Info("etcd operator created successfully")
if os.Getenv("AWS_TEST_ENABLED") == "true" {
if err := f.setupAWS(); err != nil {
return fmt.Errorf("fail to setup aws: %v", err)
}
}
err := f.SetupEtcdBackupOperator()
if err != nil {
return fmt.Errorf("failed to create etcd backup operator: %v", err)
}
logrus.Info("etcd backup operator created successfully")
err = f.SetupEtcdRestoreOperatorAndService()
if err != nil {
return err
}
logrus.Info("etcd restore operator pod and service created successfully")
logrus.Info("e2e setup successfully")
return nil
}
func (f *Framework) SetupEtcdOperator() error {
// TODO: unify this and the yaml file in example/
cmd := []string{"/usr/local/bin/etcd-operator"}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "etcd-operator",
Labels: map[string]string{"name": "etcd-operator"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "etcd-operator",
Image: f.opImage,
ImagePullPolicy: v1.PullAlways,
Command: cmd,
Env: []v1.EnvVar{
{
Name: constants.EnvOperatorPodNamespace,
ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.namespace"}},
},
{
Name: constants.EnvOperatorPodName,
ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.name"}},
},
},
ReadinessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: probe.HTTPReadyzEndpoint,
Port: intstr.IntOrString{Type: intstr.Int, IntVal: 8080},
},
},
InitialDelaySeconds: 3,
PeriodSeconds: 3,
FailureThreshold: 3,
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
p, err := k8sutil.CreateAndWaitPod(f.KubeClient, f.Namespace, pod, 60*time.Second)
if err != nil {
describePod(f.Namespace, "etcd-operator")
return err
}
logrus.Infof("etcd operator pod is running on node (%s)", p.Spec.NodeName)
return e2eutil.WaitUntilOperatorReady(f.KubeClient, f.Namespace, "etcd-operator")
}
func describePod(ns, name string) {
// assuming `kubectl` installed on $PATH
cmd := exec.Command("kubectl", "-n", ns, "describe", "pod", name)
var out bytes.Buffer
cmd.Stdout = &out
cmd.Run() // Just ignore the error...
logrus.Infof("describing %s pod: %s", name, out.String())
}
func (f *Framework) DeleteEtcdOperatorCompletely() error {
err := f.deleteEtcdOperator()
if err != nil {
return err
}
// On k8s 1.6.1, grace period isn't accurate. It took ~10s for operator pod to completely disappear.
// We work around by increasing the wait time. Revisit this later.
err = retryutil.Retry(5*time.Second, 6, func() (bool, error) {
_, err := f.KubeClient.CoreV1().Pods(f.Namespace).Get("etcd-operator", metav1.GetOptions{})
if err == nil {
return false, nil
}
if k8sutil.IsKubernetesResourceNotFoundError(err) {
return true, nil
}
return false, err
})
if err != nil {
return fmt.Errorf("fail to wait etcd operator pod gone from API: %v", err)
}
return nil
}
// SetupEtcdBackupOperator creates a etcd backup operator pod with name as "etcd-backup-operator".
func (f *Framework) SetupEtcdBackupOperator() error {
cmd := []string{"/usr/local/bin/etcd-backup-operator"}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: etcdBackupOperatorName,
Labels: map[string]string{"name": etcdBackupOperatorName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: etcdBackupOperatorName,
Image: f.opImage,
ImagePullPolicy: v1.PullAlways,
Command: cmd,
Env: []v1.EnvVar{
{
Name: constants.EnvOperatorPodNamespace,
ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.namespace"}},
},
{
Name: constants.EnvOperatorPodName,
ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.name"}},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
p, err := k8sutil.CreateAndWaitPod(f.KubeClient, f.Namespace, pod, 60*time.Second)
if err != nil {
describePod(f.Namespace, etcdBackupOperatorName)
return err
}
logrus.Infof("etcd backup operator pod is running on node (%s)", p.Spec.NodeName)
return nil
}
// SetupEtcdRestoreOperatorAndService creates an etcd restore operator pod and the restore operator service.
func (f *Framework) SetupEtcdRestoreOperatorAndService() error {
cmd := []string{"/usr/local/bin/etcd-restore-operator"}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: etcdRestoreOperatorName,
Labels: map[string]string{"name": etcdRestoreOperatorName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: etcdRestoreOperatorName,
Image: f.opImage,
ImagePullPolicy: v1.PullAlways,
Command: cmd,
Env: []v1.EnvVar{
{
Name: constants.EnvOperatorPodNamespace,
ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.namespace"}},
},
{
Name: constants.EnvOperatorPodName,
ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.name"}},
},
{
Name: constants.EnvRestoreOperatorServiceName,
Value: etcdRestoreOperatorServiceName + ":" + strconv.Itoa(etcdRestoreServicePort),
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
p, err := k8sutil.CreateAndWaitPod(f.KubeClient, f.Namespace, pod, 60*time.Second)
if err != nil {
describePod(f.Namespace, etcdRestoreOperatorName)
return fmt.Errorf("create restore-operator pod failed: %v", err)
}
logrus.Infof("restore-operator pod is running on node (%s)", p.Spec.NodeName)
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: etcdRestoreOperatorServiceName,
Labels: map[string]string{"name": etcdRestoreOperatorServiceName},
},
Spec: v1.ServiceSpec{
Selector: map[string]string{"name": etcdRestoreOperatorName},
Ports: []v1.ServicePort{{
Protocol: v1.ProtocolTCP,
Port: etcdRestoreServicePort,
}},
},
}
_, err = f.KubeClient.CoreV1().Services(f.Namespace).Create(svc)
if err != nil {
return fmt.Errorf("create restore-operator service failed: %v", err)
}
return nil
}
func (f *Framework) deleteEtcdOperator() error {
return f.KubeClient.CoreV1().Pods(f.Namespace).Delete("etcd-operator", metav1.NewDeleteOptions(1))
}
func (f *Framework) setupAWS() error {
if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", os.Getenv("AWS_CREDENTIAL")); err != nil {
return err
}
if err := os.Setenv("AWS_CONFIG_FILE", os.Getenv("AWS_CONFIG")); err != nil {
return err
}
sess, err := session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
})
if err != nil {
return err
}
f.S3Cli = s3.New(sess)
return nil
}
func (f *Framework) setupStorageClass() error {
class := &v1beta1storage.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: f.StorageClassName,
},
Provisioner: f.Provisioner,
}
_, err := f.KubeClient.StorageV1beta1().StorageClasses().Create(class)
if err != nil && !k8sutil.IsKubernetesResourceAlreadyExistError(err) {
return fmt.Errorf("fail to create storage class: %v", err)
}
return nil
}
|
[
"\"TEST_S3_BUCKET\"",
"\"AWS_TEST_ENABLED\"",
"\"AWS_CREDENTIAL\"",
"\"AWS_CONFIG\""
] |
[] |
[
"TEST_S3_BUCKET",
"AWS_CONFIG",
"AWS_CREDENTIAL",
"AWS_TEST_ENABLED"
] |
[]
|
["TEST_S3_BUCKET", "AWS_CONFIG", "AWS_CREDENTIAL", "AWS_TEST_ENABLED"]
|
go
| 4 | 0 | |
context/context.go
|
package context
import (
"errors"
"net/http"
_ "net/http/pprof"
"os"
"github.com/0xAX/notificator"
"github.com/erroneousboat/termui"
termbox "github.com/nsf/termbox-go"
"github.com/erroneousboat/slack-term/config"
"github.com/erroneousboat/slack-term/service"
"github.com/erroneousboat/slack-term/views"
)
const (
CommandMode = "command"
InsertMode = "insert"
SearchMode = "search"
)
type AppContext struct {
Version string
Usage string
EventQueue chan termbox.Event
Service *service.SlackService
Body *termui.Grid
View *views.View
Config *config.Config
Debug bool
Mode string
Notify *notificator.Notificator
}
// CreateAppContext creates an application context which can be passed
// and referenced througout the application
func CreateAppContext(flgConfig string, flgToken string, flgDebug bool, version string, usage string) (*AppContext, error) {
if flgDebug {
go func() {
http.ListenAndServe(":6060", nil)
}()
}
// Loading screen
views.Loading()
// Load config
config, err := config.NewConfig(flgConfig)
if err != nil {
return nil, err
}
// When slack token isn't set in the config file, we'll check
// the command-line flag or the environment variable
if config.SlackToken == "" {
if flgToken != "" {
config.SlackToken = flgToken
} else {
config.SlackToken = os.Getenv("SLACK_TOKEN")
}
}
// Create desktop notifier
var notify *notificator.Notificator
if config.Notify != "" {
notify = notificator.New(notificator.Options{AppName: "slack-term"})
if notify == nil {
return nil, errors.New(
"desktop notifications are not supported for your OS",
)
}
}
// Create Service
svc, err := service.NewSlackService(config)
if err != nil {
return nil, err
}
// Create the main view
view, err := views.CreateView(config, svc)
if err != nil {
return nil, err
}
// Setup the interface
if flgDebug {
termui.Body.AddRows(
termui.NewRow(
termui.NewCol(config.SidebarWidth, 0, view.Channels),
termui.NewCol(config.MainWidth-5, 0, view.Chat),
termui.NewCol(config.MainWidth-6, 0, view.Debug),
),
termui.NewRow(
termui.NewCol(config.SidebarWidth, 0, view.Mode),
termui.NewCol(config.MainWidth, 0, view.Input),
),
)
} else {
termui.Body.AddRows(
termui.NewRow(
termui.NewCol(config.SidebarWidth, 0, view.Channels),
termui.NewCol(config.MainWidth, 0, view.Chat),
),
termui.NewRow(
termui.NewCol(config.SidebarWidth, 0, view.Mode),
termui.NewCol(config.MainWidth, 0, view.Input),
),
)
}
termui.Body.Align()
termui.Render(termui.Body)
return &AppContext{
Version: version,
Usage: usage,
EventQueue: make(chan termbox.Event, 20),
Service: svc,
Body: termui.Body,
View: view,
Config: config,
Debug: flgDebug,
Mode: CommandMode,
Notify: notify,
}, nil
}
|
[
"\"SLACK_TOKEN\""
] |
[] |
[
"SLACK_TOKEN"
] |
[]
|
["SLACK_TOKEN"]
|
go
| 1 | 0 | |
clients/google-api-services-mybusinesslodging/v1/1.31.0/com/google/api/services/mybusinesslodging/v1/MyBusinessLodging.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.mybusinesslodging.v1;
/**
* Service definition for MyBusinessLodging (v1).
*
* <p>
* The My Business Lodging API enables managing lodging business information on Google.
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://developers.google.com/my-business/" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link MyBusinessLodgingRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class MyBusinessLodging extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 ||
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 &&
com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)),
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.31.1 of google-api-client to run version " +
"1.32.1 of the My Business Lodging API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://mybusinesslodging.googleapis.com/";
/**
* The default encoded mTLS root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.31
*/
public static final String DEFAULT_MTLS_ROOT_URL = "https://mybusinesslodging.mtls.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public MyBusinessLodging(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
MyBusinessLodging(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the Locations collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code MyBusinessLodging mybusinesslodging = new MyBusinessLodging(...);}
* {@code MyBusinessLodging.Locations.List request = mybusinesslodging.locations().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Locations locations() {
return new Locations();
}
/**
* The "locations" collection of methods.
*/
public class Locations {
/**
* Returns the Lodging of a specific location.
*
* Create a request for the method "locations.getLodging".
*
* This request holds the parameters needed by the mybusinesslodging server. After setting any
* optional parameters, call the {@link GetLodging#execute()} method to invoke the remote operation.
*
* @param name Required. Google identifier for this location in the form: `locations/{location_id}/lodging`
* @return the request
*/
public GetLodging getLodging(java.lang.String name) throws java.io.IOException {
GetLodging result = new GetLodging(name);
initialize(result);
return result;
}
public class GetLodging extends MyBusinessLodgingRequest<com.google.api.services.mybusinesslodging.v1.model.Lodging> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^locations/[^/]+/lodging$");
/**
* Returns the Lodging of a specific location.
*
* Create a request for the method "locations.getLodging".
*
* This request holds the parameters needed by the the mybusinesslodging server. After setting
* any optional parameters, call the {@link GetLodging#execute()} method to invoke the remote
* operation. <p> {@link
* GetLodging#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. Google identifier for this location in the form: `locations/{location_id}/lodging`
* @since 1.13
*/
protected GetLodging(java.lang.String name) {
super(MyBusinessLodging.this, "GET", REST_PATH, null, com.google.api.services.mybusinesslodging.v1.model.Lodging.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^locations/[^/]+/lodging$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public GetLodging set$Xgafv(java.lang.String $Xgafv) {
return (GetLodging) super.set$Xgafv($Xgafv);
}
@Override
public GetLodging setAccessToken(java.lang.String accessToken) {
return (GetLodging) super.setAccessToken(accessToken);
}
@Override
public GetLodging setAlt(java.lang.String alt) {
return (GetLodging) super.setAlt(alt);
}
@Override
public GetLodging setCallback(java.lang.String callback) {
return (GetLodging) super.setCallback(callback);
}
@Override
public GetLodging setFields(java.lang.String fields) {
return (GetLodging) super.setFields(fields);
}
@Override
public GetLodging setKey(java.lang.String key) {
return (GetLodging) super.setKey(key);
}
@Override
public GetLodging setOauthToken(java.lang.String oauthToken) {
return (GetLodging) super.setOauthToken(oauthToken);
}
@Override
public GetLodging setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetLodging) super.setPrettyPrint(prettyPrint);
}
@Override
public GetLodging setQuotaUser(java.lang.String quotaUser) {
return (GetLodging) super.setQuotaUser(quotaUser);
}
@Override
public GetLodging setUploadType(java.lang.String uploadType) {
return (GetLodging) super.setUploadType(uploadType);
}
@Override
public GetLodging setUploadProtocol(java.lang.String uploadProtocol) {
return (GetLodging) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Google identifier for this location in the form:
* `locations/{location_id}/lodging`
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. Google identifier for this location in the form: `locations/{location_id}/lodging`
*/
public java.lang.String getName() {
return name;
}
/**
* Required. Google identifier for this location in the form:
* `locations/{location_id}/lodging`
*/
public GetLodging setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^locations/[^/]+/lodging$");
}
this.name = name;
return this;
}
/**
* Required. The specific fields to return. Use "*" to include all fields. Repeated field
* items cannot be individually specified.
*/
@com.google.api.client.util.Key
private String readMask;
/** Required. The specific fields to return. Use "*" to include all fields. Repeated field items cannot
be individually specified.
*/
public String getReadMask() {
return readMask;
}
/**
* Required. The specific fields to return. Use "*" to include all fields. Repeated field
* items cannot be individually specified.
*/
public GetLodging setReadMask(String readMask) {
this.readMask = readMask;
return this;
}
@Override
public GetLodging set(String parameterName, Object value) {
return (GetLodging) super.set(parameterName, value);
}
}
/**
* Updates the Lodging of a specific location.
*
* Create a request for the method "locations.updateLodging".
*
* This request holds the parameters needed by the mybusinesslodging server. After setting any
* optional parameters, call the {@link UpdateLodging#execute()} method to invoke the remote
* operation.
*
* @param name Required. Google identifier for this location in the form: `locations/{location_id}/lodging`
* @param content the {@link com.google.api.services.mybusinesslodging.v1.model.Lodging}
* @return the request
*/
public UpdateLodging updateLodging(java.lang.String name, com.google.api.services.mybusinesslodging.v1.model.Lodging content) throws java.io.IOException {
UpdateLodging result = new UpdateLodging(name, content);
initialize(result);
return result;
}
public class UpdateLodging extends MyBusinessLodgingRequest<com.google.api.services.mybusinesslodging.v1.model.Lodging> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^locations/[^/]+/lodging$");
/**
* Updates the Lodging of a specific location.
*
* Create a request for the method "locations.updateLodging".
*
* This request holds the parameters needed by the the mybusinesslodging server. After setting
* any optional parameters, call the {@link UpdateLodging#execute()} method to invoke the remote
* operation. <p> {@link UpdateLodging#initialize(com.google.api.client.googleapis.services.Abstra
* ctGoogleClientRequest)} must be called to initialize this instance immediately after invoking
* the constructor. </p>
*
* @param name Required. Google identifier for this location in the form: `locations/{location_id}/lodging`
* @param content the {@link com.google.api.services.mybusinesslodging.v1.model.Lodging}
* @since 1.13
*/
protected UpdateLodging(java.lang.String name, com.google.api.services.mybusinesslodging.v1.model.Lodging content) {
super(MyBusinessLodging.this, "PATCH", REST_PATH, content, com.google.api.services.mybusinesslodging.v1.model.Lodging.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^locations/[^/]+/lodging$");
}
}
@Override
public UpdateLodging set$Xgafv(java.lang.String $Xgafv) {
return (UpdateLodging) super.set$Xgafv($Xgafv);
}
@Override
public UpdateLodging setAccessToken(java.lang.String accessToken) {
return (UpdateLodging) super.setAccessToken(accessToken);
}
@Override
public UpdateLodging setAlt(java.lang.String alt) {
return (UpdateLodging) super.setAlt(alt);
}
@Override
public UpdateLodging setCallback(java.lang.String callback) {
return (UpdateLodging) super.setCallback(callback);
}
@Override
public UpdateLodging setFields(java.lang.String fields) {
return (UpdateLodging) super.setFields(fields);
}
@Override
public UpdateLodging setKey(java.lang.String key) {
return (UpdateLodging) super.setKey(key);
}
@Override
public UpdateLodging setOauthToken(java.lang.String oauthToken) {
return (UpdateLodging) super.setOauthToken(oauthToken);
}
@Override
public UpdateLodging setPrettyPrint(java.lang.Boolean prettyPrint) {
return (UpdateLodging) super.setPrettyPrint(prettyPrint);
}
@Override
public UpdateLodging setQuotaUser(java.lang.String quotaUser) {
return (UpdateLodging) super.setQuotaUser(quotaUser);
}
@Override
public UpdateLodging setUploadType(java.lang.String uploadType) {
return (UpdateLodging) super.setUploadType(uploadType);
}
@Override
public UpdateLodging setUploadProtocol(java.lang.String uploadProtocol) {
return (UpdateLodging) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Google identifier for this location in the form:
* `locations/{location_id}/lodging`
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. Google identifier for this location in the form: `locations/{location_id}/lodging`
*/
public java.lang.String getName() {
return name;
}
/**
* Required. Google identifier for this location in the form:
* `locations/{location_id}/lodging`
*/
public UpdateLodging setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^locations/[^/]+/lodging$");
}
this.name = name;
return this;
}
/**
* Required. The specific fields to update. Use "*" to update all fields, which may include
* unsetting empty fields in the request. Repeated field items cannot be individually updated.
*/
@com.google.api.client.util.Key
private String updateMask;
/** Required. The specific fields to update. Use "*" to update all fields, which may include unsetting
empty fields in the request. Repeated field items cannot be individually updated.
*/
public String getUpdateMask() {
return updateMask;
}
/**
* Required. The specific fields to update. Use "*" to update all fields, which may include
* unsetting empty fields in the request. Repeated field items cannot be individually updated.
*/
public UpdateLodging setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public UpdateLodging set(String parameterName, Object value) {
return (UpdateLodging) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Lodging collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code MyBusinessLodging mybusinesslodging = new MyBusinessLodging(...);}
* {@code MyBusinessLodging.Lodging.List request = mybusinesslodging.lodging().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Lodging lodging() {
return new Lodging();
}
/**
* The "lodging" collection of methods.
*/
public class Lodging {
/**
* Returns the Google updated Lodging of a specific location.
*
* Create a request for the method "lodging.getGoogleUpdated".
*
* This request holds the parameters needed by the mybusinesslodging server. After setting any
* optional parameters, call the {@link GetGoogleUpdated#execute()} method to invoke the remote
* operation.
*
* @param name Required. Google identifier for this location in the form:
* `accounts/{account_id}/locations/{location_id}/lodging`
* @return the request
*/
public GetGoogleUpdated getGoogleUpdated(java.lang.String name) throws java.io.IOException {
GetGoogleUpdated result = new GetGoogleUpdated(name);
initialize(result);
return result;
}
public class GetGoogleUpdated extends MyBusinessLodgingRequest<com.google.api.services.mybusinesslodging.v1.model.GetGoogleUpdatedLodgingResponse> {
private static final String REST_PATH = "v1/{+name}:getGoogleUpdated";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^locations/[^/]+/lodging$");
/**
* Returns the Google updated Lodging of a specific location.
*
* Create a request for the method "lodging.getGoogleUpdated".
*
* This request holds the parameters needed by the the mybusinesslodging server. After setting
* any optional parameters, call the {@link GetGoogleUpdated#execute()} method to invoke the
* remote operation. <p> {@link GetGoogleUpdated#initialize(com.google.api.client.googleapis.servi
* ces.AbstractGoogleClientRequest)} must be called to initialize this instance immediately after
* invoking the constructor. </p>
*
* @param name Required. Google identifier for this location in the form:
* `accounts/{account_id}/locations/{location_id}/lodging`
* @since 1.13
*/
protected GetGoogleUpdated(java.lang.String name) {
super(MyBusinessLodging.this, "GET", REST_PATH, null, com.google.api.services.mybusinesslodging.v1.model.GetGoogleUpdatedLodgingResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^locations/[^/]+/lodging$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public GetGoogleUpdated set$Xgafv(java.lang.String $Xgafv) {
return (GetGoogleUpdated) super.set$Xgafv($Xgafv);
}
@Override
public GetGoogleUpdated setAccessToken(java.lang.String accessToken) {
return (GetGoogleUpdated) super.setAccessToken(accessToken);
}
@Override
public GetGoogleUpdated setAlt(java.lang.String alt) {
return (GetGoogleUpdated) super.setAlt(alt);
}
@Override
public GetGoogleUpdated setCallback(java.lang.String callback) {
return (GetGoogleUpdated) super.setCallback(callback);
}
@Override
public GetGoogleUpdated setFields(java.lang.String fields) {
return (GetGoogleUpdated) super.setFields(fields);
}
@Override
public GetGoogleUpdated setKey(java.lang.String key) {
return (GetGoogleUpdated) super.setKey(key);
}
@Override
public GetGoogleUpdated setOauthToken(java.lang.String oauthToken) {
return (GetGoogleUpdated) super.setOauthToken(oauthToken);
}
@Override
public GetGoogleUpdated setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetGoogleUpdated) super.setPrettyPrint(prettyPrint);
}
@Override
public GetGoogleUpdated setQuotaUser(java.lang.String quotaUser) {
return (GetGoogleUpdated) super.setQuotaUser(quotaUser);
}
@Override
public GetGoogleUpdated setUploadType(java.lang.String uploadType) {
return (GetGoogleUpdated) super.setUploadType(uploadType);
}
@Override
public GetGoogleUpdated setUploadProtocol(java.lang.String uploadProtocol) {
return (GetGoogleUpdated) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Google identifier for this location in the form:
* `accounts/{account_id}/locations/{location_id}/lodging`
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. Google identifier for this location in the form:
`accounts/{account_id}/locations/{location_id}/lodging`
*/
public java.lang.String getName() {
return name;
}
/**
* Required. Google identifier for this location in the form:
* `accounts/{account_id}/locations/{location_id}/lodging`
*/
public GetGoogleUpdated setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^locations/[^/]+/lodging$");
}
this.name = name;
return this;
}
/**
* Required. The specific fields to return. Use "*" to include all fields. Repeated field
* items cannot be individually specified.
*/
@com.google.api.client.util.Key
private String readMask;
/** Required. The specific fields to return. Use "*" to include all fields. Repeated field items cannot
be individually specified.
*/
public String getReadMask() {
return readMask;
}
/**
* Required. The specific fields to return. Use "*" to include all fields. Repeated field
* items cannot be individually specified.
*/
public GetGoogleUpdated setReadMask(String readMask) {
this.readMask = readMask;
return this;
}
@Override
public GetGoogleUpdated set(String parameterName, Object value) {
return (GetGoogleUpdated) super.set(parameterName, value);
}
}
}
}
/**
* Builder for {@link MyBusinessLodging}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) {
// If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint.
// If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS.
// Use the regular endpoint for all other cases.
String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT");
useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint;
if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) {
return DEFAULT_MTLS_ROOT_URL;
}
return DEFAULT_ROOT_URL;
}
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
Builder.chooseEndpoint(transport),
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link MyBusinessLodging}. */
@Override
public MyBusinessLodging build() {
return new MyBusinessLodging(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link MyBusinessLodgingRequestInitializer}.
*
* @since 1.12
*/
public Builder setMyBusinessLodgingRequestInitializer(
MyBusinessLodgingRequestInitializer mybusinesslodgingRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(mybusinesslodgingRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
|
[
"\"GOOGLE_API_USE_MTLS_ENDPOINT\""
] |
[] |
[
"GOOGLE_API_USE_MTLS_ENDPOINT"
] |
[]
|
["GOOGLE_API_USE_MTLS_ENDPOINT"]
|
java
| 1 | 0 | |
frontend/server/src/main/java/com/amazonaws/ml/mms/util/logging/QLogLayout.java
|
/*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.ml.mms.util.logging;
import com.amazonaws.ml.mms.metrics.Dimension;
import com.amazonaws.ml.mms.metrics.Metric;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.spi.LoggingEvent;
public class QLogLayout extends PatternLayout {
/**
* Model server also supports query log formatting.
*
* <p>To enable Query Log format, change the layout as follows
*
* <pre>
* log4j.appender.model_metrics.layout = com.amazonaws.ml.mms.util.logging.QLogLayout
* </pre>
*
* This enables logs which are shown as following
*
* <pre>
* HostName=hostName
* RequestId=004bd136-063c-4102-a070-d7aff5add939
* Marketplace=US
* StartTime=1542275707
* Program=MXNetModelServer
* Metrics=PredictionTime=45 Milliseconds ModelName|squeezenet Level|Model
* EOE
* </pre>
*
* <b>Note</b>: The following entities in this metrics can be customized.
*
* <ul>
* <li><b>Marketplace</b> : This can be customized by setting the "REALM" system environment
* variable.
* <li><b>Program</b> : This entity can be customized by setting "MXNETMODELSERVER_PROGRAM"
* environment variable.
* </ul>
*
* Example: If the above environment variables are set to the following,
*
* <pre>
* $ env
* REALM=someRealm
* MXNETMODELSERVER_PROGRAM=someProgram
* </pre>
*
* This produces the metrics as follows
*
* <pre>
* HostName=hostName
* RequestId=004bd136-063c-4102-a070-d7aff5add939
* Marketplace=someRealm
* StartTime=1542275707
* Program=someProgram
* Metrics=PredictionTime=45 Milliseconds ModelName|squeezenet Level|Model
* EOE
* </pre>
*
* @param event
* @return
*/
@Override
public String format(LoggingEvent event) {
Object eventMessage = event.getMessage();
String programName =
getStringOrDefault(System.getenv("MXNETMODELSERVER_PROGRAM"), "MXNetModelServer");
String domain = getStringOrDefault(System.getenv("DOMAIN"), "Unknown");
long currentTimeInSec = System.currentTimeMillis() / 1000;
if (eventMessage == null) {
return null;
}
if (eventMessage instanceof Metric) {
String marketPlace = System.getenv("REALM");
StringBuilder stringBuilder = new StringBuilder();
Metric metric = (Metric) eventMessage;
stringBuilder.append("HostName=").append(metric.getHostName());
if (metric.getRequestId() != null && !metric.getRequestId().isEmpty()) {
stringBuilder.append("\nRequestId=").append(metric.getRequestId());
}
// Marketplace format should be : <programName>:<domain>:<realm>
if (marketPlace != null && !marketPlace.isEmpty()) {
stringBuilder
.append("\nMarketplace=")
.append(programName)
.append(':')
.append(domain)
.append(':')
.append(marketPlace);
}
stringBuilder
.append("\nStartTime=")
.append(
getStringOrDefault(
metric.getTimestamp(), Long.toString(currentTimeInSec)));
stringBuilder
.append("\nProgram=")
.append(programName)
.append("\nMetrics=")
.append(metric.getMetricName())
.append('=')
.append(metric.getValue())
.append(' ')
.append(metric.getUnit());
for (Dimension dimension : metric.getDimensions()) {
stringBuilder
.append(' ')
.append(dimension.getName())
.append('|')
.append(dimension.getValue())
.append(' ');
}
stringBuilder.append("\nEOE\n");
return stringBuilder.toString();
}
return eventMessage.toString();
}
private static String getStringOrDefault(String val, String defVal) {
if (val == null || val.isEmpty()) {
return defVal;
}
return val;
}
}
|
[
"\"MXNETMODELSERVER_PROGRAM\"",
"\"DOMAIN\"",
"\"REALM\""
] |
[] |
[
"REALM",
"DOMAIN",
"MXNETMODELSERVER_PROGRAM"
] |
[]
|
["REALM", "DOMAIN", "MXNETMODELSERVER_PROGRAM"]
|
java
| 3 | 0 | |
awsapilib/captcha/captcha.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: captcha.py
#
# Copyright 2021 Costas Tyfoxylos
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
"""
Main code for captcha.
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
import base64
import logging
import os
from abc import ABC, abstractmethod
import requests
from twocaptcha import TwoCaptcha, ValidationException, TimeoutException
from twocaptcha.api import ApiException, NetworkException
from awsapilib.authentication import LoggerMixin
from .captchaexceptions import CaptchaError, UnsupportedTerminal
__author__ = '''Costas Tyfoxylos <[email protected]>'''
__docformat__ = '''google'''
__date__ = '''30-06-2021'''
__copyright__ = '''Copyright 2021, Costas Tyfoxylos'''
__credits__ = ["Costas Tyfoxylos"]
__license__ = '''MIT'''
__maintainer__ = '''Costas Tyfoxylos'''
__email__ = '''<[email protected]>'''
__status__ = '''Development''' # "Prototype", "Development", "Production".
# This is the main prefix used for logging
LOGGER_BASENAME = '''captcha'''
LOGGER = logging.getLogger(LOGGER_BASENAME)
LOGGER.addHandler(logging.NullHandler())
class Solver(ABC, LoggerMixin):
"""Interface for a Solver object."""
@abstractmethod
def solve(self, url):
"""Solves a url."""
class Iterm(Solver):
"""Interactive captcha solver for iTerm terminals."""
def __init__(self):
terminal = os.environ.get('TERM_PROGRAM', 'UNKNOWN')
if 'iterm' not in terminal.lower():
raise UnsupportedTerminal(terminal)
def solve(self, url):
"""Presents a captcha image and returns the user's guess for the captcha.
Args:
url (str): The url to provide that should have the captcha image.
Returns:
guess (str): The guess of the user for the captcha.
"""
response = requests.get(url)
if not response.ok:
raise CaptchaError(response.text)
image = base64.b64encode(response.content).decode()
print(f'\033]1337;File=inline=1;width=400px;height=140px:{image}\a\n')
try:
guess = input('Captcha: ')
except KeyboardInterrupt:
raise CaptchaError(f'User interrupted.\nIf the captcha was not showing correctly please check that the url'
f'{url} indeed points to a valid captcha image..') from None
return guess
class Terminal(Solver):
"""Interactive captcha solver for standard terminals."""
def solve(self, url):
"""Presents a captcha image url and returns the user's guess for the captcha.
Args:
url (str): The url to provide that should have the captcha image.
Returns:
guess (str): The guess of the user for the captcha.
"""
print(f'Please follow {url} and provide the solution.')
try:
guess = input('Captcha: ')
except KeyboardInterrupt:
raise CaptchaError('User interrupted.') from None
return guess
class Captcha2(Solver):
"""2captcha solver."""
def __init__(self, api_key):
self.solver = TwoCaptcha(api_key)
def __call__(self, *args, **kwargs):
return self
def solve(self, url):
"""Presents a captcha image url and returns the captcha.
Args:
url (str): The url to provide that should have the captcha image.
Returns:
guess (str): The captcha.
"""
captcha_parameters = {'numeric': 4,
'minLength': 6,
'maxLength': 6,
'phrase': 0,
'caseSensitive': 1,
'calc': 0,
'lang': 'en'}
try:
self.logger.debug(f'Trying to get captcha image from url : {url}')
response = requests.get(url)
image = base64.b64encode(response.content).decode("utf-8")
self.logger.debug('Waiting for the solved captcha from 2captcha service.')
result = self.solver.normal(image, **captcha_parameters)
self.logger.debug(f'Result for captcha was : {result}')
except (ValidationException, NetworkException, ApiException, TimeoutException) as msg:
raise CaptchaError(msg) from None
return result.get('code')
|
[] |
[] |
[
"TERM_PROGRAM"
] |
[]
|
["TERM_PROGRAM"]
|
python
| 1 | 0 | |
web/application.py
|
"""
Web application
(from web.py)
"""
import webapi as web
import webapi, wsgi, utils
import debugerror
import httpserver
from utils import lstrips, safeunicode
import sys
import urllib
import traceback
import itertools
import os
import types
from exceptions import SystemExit
try:
import wsgiref.handlers
except ImportError:
pass # don't break people with old Pythons
__all__ = [
"application", "auto_application",
"subdir_application", "subdomain_application",
"loadhook", "unloadhook",
"autodelegate"
]
class application:
"""
Application to delegate requests based on path.
>>> urls = ("/hello", "hello")
>>> app = application(urls, globals())
>>> class hello:
... def GET(self): return "hello"
>>>
>>> app.request("/hello").data
'hello'
"""
def __init__(self, mapping=(), fvars={}, autoreload=None):
if autoreload is None:
autoreload = web.config.get('debug', False)
self.init_mapping(mapping)
self.fvars = fvars
self.processors = []
self.add_processor(loadhook(self._load))
self.add_processor(unloadhook(self._unload))
if autoreload:
def main_module_name():
mod = sys.modules['__main__']
file = getattr(mod, '__file__', None) # make sure this works even from python interpreter
return file and os.path.splitext(os.path.basename(file))[0]
def modname(fvars):
"""find name of the module name from fvars."""
file, name = fvars.get('__file__'), fvars.get('__name__')
if file is None or name is None:
return None
if name == '__main__':
# Since the __main__ module can't be reloaded, the module has
# to be imported using its file name.
name = main_module_name()
return name
mapping_name = utils.dictfind(fvars, mapping)
module_name = modname(fvars)
def reload_mapping():
"""loadhook to reload mapping and fvars."""
mod = __import__(module_name, None, None, [''])
mapping = getattr(mod, mapping_name, None)
if mapping:
self.fvars = mod.__dict__
self.init_mapping(mapping)
self.add_processor(loadhook(Reloader()))
if mapping_name and module_name:
self.add_processor(loadhook(reload_mapping))
# load __main__ module usings its filename, so that it can be reloaded.
if main_module_name() and '__main__' in sys.argv:
try:
__import__(main_module_name())
except ImportError:
pass
def _load(self):
web.ctx.app_stack.append(self)
def _unload(self):
web.ctx.app_stack = web.ctx.app_stack[:-1]
if web.ctx.app_stack:
# this is a sub-application, revert ctx to earlier state.
oldctx = web.ctx.get('_oldctx')
if oldctx:
web.ctx.home = oldctx.home
web.ctx.homepath = oldctx.homepath
web.ctx.path = oldctx.path
web.ctx.fullpath = oldctx.fullpath
def _cleanup(self):
# Threads can be recycled by WSGI servers.
# Clearing up all thread-local state to avoid interefereing with subsequent requests.
utils.ThreadedDict.clear_all()
def init_mapping(self, mapping):
self.mapping = list(utils.group(mapping, 2))
def add_mapping(self, pattern, classname):
self.mapping.append((pattern, classname))
def add_processor(self, processor):
"""
Adds a processor to the application.
>>> urls = ("/(.*)", "echo")
>>> app = application(urls, globals())
>>> class echo:
... def GET(self, name): return name
...
>>>
>>> def hello(handler): return "hello, " + handler()
...
>>> app.add_processor(hello)
>>> app.request("/web.py").data
'hello, web.py'
"""
self.processors.append(processor)
def request(self, localpart='/', method='GET', data=None,
host="0.0.0.0:8080", headers=None, https=False, **kw):
"""Makes request to this application for the specified path and method.
Response will be a storage object with data, status and headers.
>>> urls = ("/hello", "hello")
>>> app = application(urls, globals())
>>> class hello:
... def GET(self):
... web.header('Content-Type', 'text/plain')
... return "hello"
...
>>> response = app.request("/hello")
>>> response.data
'hello'
>>> response.status
'200 OK'
>>> response.headers['Content-Type']
'text/plain'
To use https, use https=True.
>>> urls = ("/redirect", "redirect")
>>> app = application(urls, globals())
>>> class redirect:
... def GET(self): raise web.seeother("/foo")
...
>>> response = app.request("/redirect")
>>> response.headers['Location']
'http://0.0.0.0:8080/foo'
>>> response = app.request("/redirect", https=True)
>>> response.headers['Location']
'https://0.0.0.0:8080/foo'
The headers argument specifies HTTP headers as a mapping object
such as a dict.
>>> urls = ('/ua', 'uaprinter')
>>> class uaprinter:
... def GET(self):
... return 'your user-agent is ' + web.ctx.env['HTTP_USER_AGENT']
...
>>> app = application(urls, globals())
>>> app.request('/ua', headers = {
... 'User-Agent': 'a small jumping bean/1.0 (compatible)'
... }).data
'your user-agent is a small jumping bean/1.0 (compatible)'
"""
path, maybe_query = urllib.splitquery(localpart)
query = maybe_query or ""
if 'env' in kw:
env = kw['env']
else:
env = {}
env = dict(env, HTTP_HOST=host, REQUEST_METHOD=method, PATH_INFO=path, QUERY_STRING=query, HTTPS=str(https))
headers = headers or {}
for k, v in headers.items():
env['HTTP_' + k.upper().replace('-', '_')] = v
if 'HTTP_CONTENT_LENGTH' in env:
env['CONTENT_LENGTH'] = env.pop('HTTP_CONTENT_LENGTH')
if 'HTTP_CONTENT_TYPE' in env:
env['CONTENT_TYPE'] = env.pop('HTTP_CONTENT_TYPE')
if method not in ["HEAD", "GET"]:
data = data or ''
import StringIO
if isinstance(data, dict):
q = urllib.urlencode(data)
else:
q = data
env['wsgi.input'] = StringIO.StringIO(q)
if not env.get('CONTENT_TYPE', '').lower().startswith('multipart/') and 'CONTENT_LENGTH' not in env:
env['CONTENT_LENGTH'] = len(q)
response = web.storage()
def start_response(status, headers):
response.status = status
response.headers = dict(headers)
response.header_items = headers
response.data = "".join(self.wsgifunc()(env, start_response))
return response
def browser(self):
import browser
return browser.AppBrowser(self)
def handle(self):
fn, args = self._match(self.mapping, web.ctx.path)
return self._delegate(fn, self.fvars, args)
def handle_with_processors(self):
def process(processors):
try:
if processors:
p, processors = processors[0], processors[1:]
return p(lambda: process(processors))
else:
return self.handle()
except web.HTTPError:
raise
except (KeyboardInterrupt, SystemExit):
raise
except:
print >> web.debug, traceback.format_exc()
raise self.internalerror()
# processors must be applied in the resvere order. (??)
return process(self.processors)
def wsgifunc(self, *middleware):
"""Returns a WSGI-compatible function for this application."""
def peep(iterator):
"""Peeps into an iterator by doing an iteration
and returns an equivalent iterator.
"""
# wsgi requires the headers first
# so we need to do an iteration
# and save the result for later
try:
firstchunk = iterator.next()
except StopIteration:
firstchunk = ''
return itertools.chain([firstchunk], iterator)
def is_generator(x): return x and hasattr(x, 'next')
def wsgi(env, start_resp):
# clear threadlocal to avoid inteference of previous requests
self._cleanup()
self.load(env)
try:
# allow uppercase methods only
if web.ctx.method.upper() != web.ctx.method:
raise web.nomethod()
result = self.handle_with_processors()
if is_generator(result):
result = peep(result)
else:
result = [result]
except web.HTTPError, e:
result = [e.data]
result = web.safestr(iter(result))
status, headers = web.ctx.status, web.ctx.headers
start_resp(status, headers)
def cleanup():
self._cleanup()
yield '' # force this function to be a generator
return itertools.chain(result, cleanup())
for m in middleware:
wsgi = m(wsgi)
return wsgi
def run(self, *middleware):
"""
Starts handling requests. If called in a CGI or FastCGI context, it will follow
that protocol. If called from the command line, it will start an HTTP
server on the port named in the first command line argument, or, if there
is no argument, on port 8080.
`middleware` is a list of WSGI middleware which is applied to the resulting WSGI
function.
"""
return wsgi.runwsgi(self.wsgifunc(*middleware))
def stop(self):
"""Stops the http server started by run.
"""
if httpserver.server:
httpserver.server.stop()
httpserver.server = None
def cgirun(self, *middleware):
"""
Return a CGI handler. This is mostly useful with Google App Engine.
There you can just do:
main = app.cgirun()
"""
wsgiapp = self.wsgifunc(*middleware)
try:
from google.appengine.ext.webapp.util import run_wsgi_app
return run_wsgi_app(wsgiapp)
except ImportError:
# we're not running from within Google App Engine
return wsgiref.handlers.CGIHandler().run(wsgiapp)
def load(self, env):
"""Initializes ctx using env."""
ctx = web.ctx
ctx.clear()
ctx.status = '200 OK'
ctx.headers = []
ctx.output = ''
ctx.environ = ctx.env = env
ctx.host = env.get('HTTP_HOST')
if env.get('wsgi.url_scheme') in ['http', 'https']:
ctx.protocol = env['wsgi.url_scheme']
elif env.get('HTTPS', '').lower() in ['on', 'true', '1']:
ctx.protocol = 'https'
else:
ctx.protocol = 'http'
ctx.homedomain = ctx.protocol + '://' + env.get('HTTP_HOST', '[unknown]')
ctx.homepath = os.environ.get('REAL_SCRIPT_NAME', env.get('SCRIPT_NAME', ''))
ctx.home = ctx.homedomain + ctx.homepath
#@@ home is changed when the request is handled to a sub-application.
#@@ but the real home is required for doing absolute redirects.
ctx.realhome = ctx.home
ctx.ip = env.get('REMOTE_ADDR')
ctx.method = env.get('REQUEST_METHOD')
ctx.path = env.get('PATH_INFO')
# http://trac.lighttpd.net/trac/ticket/406 requires:
if env.get('SERVER_SOFTWARE', '').startswith('lighttpd/'):
ctx.path = lstrips(env.get('REQUEST_URI').split('?')[0], ctx.homepath)
# Apache and CherryPy webservers unquote the url but lighttpd doesn't.
# unquote explicitly for lighttpd to make ctx.path uniform across all servers.
ctx.path = urllib.unquote(ctx.path)
if env.get('QUERY_STRING'):
ctx.query = '?' + env.get('QUERY_STRING', '')
else:
ctx.query = ''
ctx.fullpath = ctx.path + ctx.query
for k, v in ctx.iteritems():
# convert all string values to unicode values and replace
# malformed data with a suitable replacement marker.
if isinstance(v, str):
ctx[k] = v.decode('utf-8', 'replace')
# status must always be str
ctx.status = '200 OK'
ctx.app_stack = []
def _delegate(self, f, fvars, args=[]):
def handle_class(cls):
meth = web.ctx.method
if meth == 'HEAD' and not hasattr(cls, meth):
meth = 'GET'
if not hasattr(cls, meth):
raise web.nomethod(cls)
tocall = getattr(cls(), meth)
return tocall(*args)
def is_class(o): return isinstance(o, (types.ClassType, type))
if f is None:
raise web.notfound()
elif isinstance(f, application):
return f.handle_with_processors()
elif is_class(f):
return handle_class(f)
elif isinstance(f, basestring):
if f.startswith('redirect '):
url = f.split(' ', 1)[1]
if web.ctx.method == "GET":
x = web.ctx.env.get('QUERY_STRING', '')
if x:
url += '?' + x
raise web.redirect(url)
elif '.' in f:
mod, cls = f.rsplit('.', 1)
mod = __import__(mod, None, None, [''])
cls = getattr(mod, cls)
else:
cls = fvars[f]
return handle_class(cls)
elif hasattr(f, '__call__'):
return f()
else:
return web.notfound()
def _match(self, mapping, value):
for pat, what in mapping:
if isinstance(what, application):
if value.startswith(pat):
f = lambda: self._delegate_sub_application(pat, what)
return f, None
else:
continue
elif isinstance(what, basestring):
what, result = utils.re_subm('^' + pat + '$', what, value)
else:
result = utils.re_compile('^' + pat + '$').match(value)
if result: # it's a match
return what, [x for x in result.groups()]
return None, None
def _delegate_sub_application(self, dir, app):
"""Deletes request to sub application `app` rooted at the directory `dir`.
The home, homepath, path and fullpath values in web.ctx are updated to mimic request
to the subapp and are restored after it is handled.
@@Any issues with when used with yield?
"""
web.ctx._oldctx = web.storage(web.ctx)
web.ctx.home += dir
web.ctx.homepath += dir
web.ctx.path = web.ctx.path[len(dir):]
web.ctx.fullpath = web.ctx.fullpath[len(dir):]
return app.handle_with_processors()
def get_parent_app(self):
if self in web.ctx.app_stack:
index = web.ctx.app_stack.index(self)
if index > 0:
return web.ctx.app_stack[index-1]
def notfound(self):
"""Returns HTTPError with '404 not found' message"""
parent = self.get_parent_app()
if parent:
return parent.notfound()
else:
return web._NotFound()
def internalerror(self):
"""Returns HTTPError with '500 internal error' message"""
parent = self.get_parent_app()
if parent:
return parent.internalerror()
elif web.config.get('debug'):
import debugerror
return debugerror.debugerror()
else:
return web._InternalError()
class auto_application(application):
"""Application similar to `application` but urls are constructed
automatiacally using metaclass.
>>> app = auto_application()
>>> class hello(app.page):
... def GET(self): return "hello, world"
...
>>> class foo(app.page):
... path = '/foo/.*'
... def GET(self): return "foo"
>>> app.request("/hello").data
'hello, world'
>>> app.request('/foo/bar').data
'foo'
"""
def __init__(self):
application.__init__(self)
class metapage(type):
def __init__(klass, name, bases, attrs):
type.__init__(klass, name, bases, attrs)
path = attrs.get('path', '/' + name)
# path can be specified as None to ignore that class
# typically required to create a abstract base class.
if path is not None:
self.add_mapping(path, klass)
class page:
path = None
__metaclass__ = metapage
self.page = page
# The application class already has the required functionality of subdir_application
subdir_application = application
class subdomain_application(application):
"""
Application to delegate requests based on the host.
>>> urls = ("/hello", "hello")
>>> app = application(urls, globals())
>>> class hello:
... def GET(self): return "hello"
>>>
>>> mapping = (r"hello\.example\.com", app)
>>> app2 = subdomain_application(mapping)
>>> app2.request("/hello", host="hello.example.com").data
'hello'
>>> response = app2.request("/hello", host="something.example.com")
>>> response.status
'404 Not Found'
>>> response.data
'not found'
"""
def handle(self):
host = web.ctx.host.split(':')[0] #strip port
fn, args = self._match(self.mapping, host)
return self._delegate(fn, self.fvars, args)
def _match(self, mapping, value):
for pat, what in mapping:
if isinstance(what, basestring):
what, result = utils.re_subm('^' + pat + '$', what, value)
else:
result = utils.re_compile('^' + pat + '$').match(value)
if result: # it's a match
return what, [x for x in result.groups()]
return None, None
def loadhook(h):
"""
Converts a load hook into an application processor.
>>> app = auto_application()
>>> def f(): "something done before handling request"
...
>>> app.add_processor(loadhook(f))
"""
def processor(handler):
h()
return handler()
return processor
def unloadhook(h):
"""
Converts an unload hook into an application processor.
>>> app = auto_application()
>>> def f(): "something done after handling request"
...
>>> app.add_processor(unloadhook(f))
"""
def processor(handler):
try:
result = handler()
is_generator = result and hasattr(result, 'next')
except:
# run the hook even when handler raises some exception
h()
raise
if is_generator:
return wrap(result)
else:
h()
return result
def wrap(result):
def next():
try:
return result.next()
except:
# call the hook at the and of iterator
h()
raise
result = iter(result)
while True:
yield next()
return processor
def autodelegate(prefix=''):
"""
Returns a method that takes one argument and calls the method named prefix+arg,
calling `notfound()` if there isn't one. Example:
urls = ('/prefs/(.*)', 'prefs')
class prefs:
GET = autodelegate('GET_')
def GET_password(self): pass
def GET_privacy(self): pass
`GET_password` would get called for `/prefs/password` while `GET_privacy` for
`GET_privacy` gets called for `/prefs/privacy`.
If a user visits `/prefs/password/change` then `GET_password(self, '/change')`
is called.
"""
def internal(self, arg):
if '/' in arg:
first, rest = arg.split('/', 1)
func = prefix + first
args = ['/' + rest]
else:
func = prefix + arg
args = []
if hasattr(self, func):
try:
return getattr(self, func)(*args)
except TypeError:
raise web.notfound()
else:
raise web.notfound()
return internal
class Reloader:
"""Checks to see if any loaded modules have changed on disk and,
if so, reloads them.
"""
"""File suffix of compiled modules."""
if sys.platform.startswith('java'):
SUFFIX = '$py.class'
else:
SUFFIX = '.pyc'
def __init__(self):
self.mtimes = {}
def __call__(self):
for mod in sys.modules.values():
self.check(mod)
def check(self, mod):
# jython registers java packages as modules but they either
# don't have a __file__ attribute or its value is None
if not (mod and hasattr(mod, '__file__') and mod.__file__):
return
try:
mtime = os.stat(mod.__file__).st_mtime
except (OSError, IOError):
return
if mod.__file__.endswith(self.__class__.SUFFIX) and os.path.exists(mod.__file__[:-1]):
mtime = max(os.stat(mod.__file__[:-1]).st_mtime, mtime)
if mod not in self.mtimes:
self.mtimes[mod] = mtime
elif self.mtimes[mod] < mtime:
try:
reload(mod)
self.mtimes[mod] = mtime
except ImportError:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[] |
[] |
[
"REAL_SCRIPT_NAME"
] |
[]
|
["REAL_SCRIPT_NAME"]
|
python
| 1 | 0 | |
chalice/cli/factory.py
|
import sys
import os
import json
import importlib
import logging
import functools
import click
from botocore.config import Config as BotocoreConfig
from botocore.session import Session
from typing import Any, Optional, Dict, MutableMapping # noqa
from chalice import __version__ as chalice_version
from chalice.awsclient import TypedAWSClient
from chalice.app import Chalice # noqa
from chalice.config import Config
from chalice.config import DeployedResources # noqa
from chalice.package import create_app_packager
from chalice.package import AppPackager # noqa
from chalice.constants import DEFAULT_STAGE_NAME
from chalice.constants import DEFAULT_APIGATEWAY_STAGE_NAME
from chalice.constants import DEFAULT_ENDPOINT_TYPE
from chalice.logs import LogRetriever
from chalice import local
from chalice.utils import UI # noqa
from chalice.utils import PipeReader # noqa
from chalice.deploy import deployer # noqa
from chalice.deploy import validate
from chalice.invoke import LambdaInvokeHandler
from chalice.invoke import LambdaInvoker
from chalice.invoke import LambdaResponseFormatter
OptStr = Optional[str]
OptInt = Optional[int]
def create_botocore_session(profile=None, debug=False,
connection_timeout=None,
read_timeout=None,
max_retries=None):
# type: (OptStr, bool, OptInt, OptInt, OptInt) -> Session
s = Session(profile=profile)
_add_chalice_user_agent(s)
if debug:
_inject_large_request_body_filter()
config_args = {} # type: Dict[str, Any]
if connection_timeout is not None:
config_args['connect_timeout'] = connection_timeout
if read_timeout is not None:
config_args['read_timeout'] = read_timeout
if max_retries is not None:
config_args['retries'] = {'max_attempts': max_retries}
if config_args:
config = BotocoreConfig(**config_args)
s.set_default_client_config(config)
return s
def _add_chalice_user_agent(session):
# type: (Session) -> None
suffix = '%s/%s' % (session.user_agent_name, session.user_agent_version)
session.user_agent_name = 'aws-chalice'
session.user_agent_version = chalice_version
session.user_agent_extra = suffix
def _inject_large_request_body_filter():
# type: () -> None
log = logging.getLogger('botocore.endpoint')
log.addFilter(LargeRequestBodyFilter())
class NoSuchFunctionError(Exception):
"""The specified function could not be found."""
def __init__(self, name):
# type: (str) -> None
self.name = name
super(NoSuchFunctionError, self).__init__()
class UnknownConfigFileVersion(Exception):
def __init__(self, version):
# type: (str) -> None
super(UnknownConfigFileVersion, self).__init__(
"Unknown version '%s' in config.json" % version)
class LargeRequestBodyFilter(logging.Filter):
def filter(self, record):
# type: (Any) -> bool
# Note: the proper type should be "logging.LogRecord", but
# the typechecker complains about 'Invalid index type "int" for "dict"'
# so we're using Any for now.
if record.msg.startswith('Making request'):
if record.args[0].name in ['UpdateFunctionCode', 'CreateFunction']:
# When using the ZipFile argument (which is used in chalice),
# the entire deployment package zip is sent as a base64 encoded
# string. We don't want this to clutter the debug logs
# so we don't log the request body for lambda operations
# that have the ZipFile arg.
record.args = (record.args[:-1] +
('(... omitted from logs due to size ...)',))
return True
class CLIFactory(object):
def __init__(self, project_dir, debug=False, profile=None, environ=None):
# type: (str, bool, Optional[str], Optional[MutableMapping]) -> None
self.project_dir = project_dir
self.debug = debug
self.profile = profile
if environ is None:
environ = dict(os.environ)
self._environ = environ
def create_botocore_session(self, connection_timeout=None,
read_timeout=None, max_retries=None):
# type: (OptInt, OptInt, OptInt) -> Session
return create_botocore_session(profile=self.profile,
debug=self.debug,
connection_timeout=connection_timeout,
read_timeout=read_timeout,
max_retries=max_retries)
def create_default_deployer(self, session, config, ui):
# type: (Session, Config, UI) -> deployer.Deployer
return deployer.create_default_deployer(session, config, ui)
def create_deletion_deployer(self, session, ui):
# type: (Session, UI) -> deployer.Deployer
return deployer.create_deletion_deployer(
TypedAWSClient(session), ui)
def create_deployment_reporter(self, ui):
# type: (UI) -> deployer.DeploymentReporter
return deployer.DeploymentReporter(ui=ui)
def create_config_obj(self, chalice_stage_name=DEFAULT_STAGE_NAME,
autogen_policy=None,
api_gateway_stage=None):
# type: (str, Optional[bool], str) -> Config
user_provided_params = {} # type: Dict[str, Any]
default_params = {'project_dir': self.project_dir,
'api_gateway_stage': DEFAULT_APIGATEWAY_STAGE_NAME,
'api_gateway_endpoint_type': DEFAULT_ENDPOINT_TYPE,
'autogen_policy': True}
try:
config_from_disk = self.load_project_config()
except (OSError, IOError):
raise RuntimeError("Unable to load the project config file. "
"Are you sure this is a chalice project?")
except ValueError as err:
raise RuntimeError("Unable to load the project config file: %s"
% err)
self._validate_config_from_disk(config_from_disk)
if autogen_policy is not None:
user_provided_params['autogen_policy'] = autogen_policy
if self.profile is not None:
user_provided_params['profile'] = self.profile
if api_gateway_stage is not None:
user_provided_params['api_gateway_stage'] = api_gateway_stage
config = Config(chalice_stage=chalice_stage_name,
user_provided_params=user_provided_params,
config_from_disk=config_from_disk,
default_params=default_params)
user_provided_params['chalice_app'] = functools.partial(
self.load_chalice_app, config.environment_variables)
return config
def _validate_config_from_disk(self, config):
# type: (Dict[str, Any]) -> None
string_version = config.get('version', '1.0')
try:
version = float(string_version)
if version > 2.0:
raise UnknownConfigFileVersion(string_version)
except ValueError:
raise UnknownConfigFileVersion(string_version)
def create_app_packager(self, config, package_format):
# type: (Config, str) -> AppPackager
return create_app_packager(config, package_format)
def create_log_retriever(self, session, lambda_arn):
# type: (Session, str) -> LogRetriever
client = TypedAWSClient(session)
retriever = LogRetriever.create_from_lambda_arn(client, lambda_arn)
return retriever
def create_stdin_reader(self):
# type: () -> PipeReader
stream = click.get_binary_stream('stdin')
reader = PipeReader(stream)
return reader
def create_lambda_invoke_handler(self, name, stage):
# type: (str, str) -> LambdaInvokeHandler
config = self.create_config_obj(stage)
deployed = config.deployed_resources(stage)
try:
resource = deployed.resource_values(name)
arn = resource['lambda_arn']
except (KeyError, ValueError):
raise NoSuchFunctionError(name)
function_scoped_config = config.scope(stage, name)
# The session for max retries needs to be set to 0 for invoking a
# lambda function because in the case of a timeout or other retriable
# error the underlying client will call the function again.
session = self.create_botocore_session(
read_timeout=function_scoped_config.lambda_timeout,
max_retries=0,
)
client = TypedAWSClient(session)
invoker = LambdaInvoker(arn, client)
handler = LambdaInvokeHandler(
invoker,
LambdaResponseFormatter(),
UI(),
)
return handler
def load_chalice_app(self, environment_variables=None,
validate_feature_flags=True):
# type: (Optional[MutableMapping], Optional[bool]) -> Chalice
# validate_features indicates that we should validate that
# any expiremental features used have the appropriate feature flags.
if self.project_dir not in sys.path:
sys.path.insert(0, self.project_dir)
# The vendor directory has its contents copied up to the top level of
# the deployment package. This means that imports will work in the
# lambda function as if the vendor directory is on the python path.
# For loading the config locally we must add the vendor directory to
# the path so it will be treated the same as if it were running on
# lambda.
vendor_dir = os.path.join(self.project_dir, 'vendor')
if os.path.isdir(vendor_dir) and vendor_dir not in sys.path:
# This is a tradeoff we have to make for local use.
# The common use case of vendor/ is to include
# extension modules built for AWS Lambda. If you're
# running on a non-linux dev machine, then attempting
# to import these files will raise exceptions. As
# a workaround, the vendor is added to the end of
# sys.path so it's after `./lib/site-packages`.
# This gives you a change to install the correct
# version locally and still keep the lambda
# specific one in vendor/
sys.path.append(vendor_dir)
if environment_variables is not None:
self._environ.update(environment_variables)
try:
app = importlib.import_module('app')
chalice_app = getattr(app, 'app')
except SyntaxError as e:
message = (
'Unable to import your app.py file:\n\n'
'File "%s", line %s\n'
' %s\n'
'SyntaxError: %s'
) % (getattr(e, 'filename'), e.lineno, e.text, e.msg)
raise RuntimeError(message)
if validate_feature_flags:
validate.validate_feature_flags(chalice_app)
return chalice_app
def load_project_config(self):
# type: () -> Dict[str, Any]
"""Load the chalice config file from the project directory.
:raise: OSError/IOError if unable to load the config file.
"""
config_file = os.path.join(self.project_dir, '.chalice', 'config.json')
with open(config_file) as f:
return json.loads(f.read())
def create_local_server(self, app_obj, config, host, port):
# type: (Chalice, Config, str, int) -> local.LocalDevServer
return local.create_local_server(app_obj, config, host, port)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
dist/Platform.app/Contents/Resources/lib/python3.7/numpy/f2py/rules.py
|
#!/usr/bin/env python3
"""
Rules for building C/API module with f2py2e.
Here is a skeleton of a new wrapper function (13Dec2001):
wrapper_function(args)
declarations
get_python_arguments, say, `a' and `b'
get_a_from_python
if (successful) {
get_b_from_python
if (successful) {
callfortran
if (successful) {
put_a_to_python
if (successful) {
put_b_to_python
if (successful) {
buildvalue = ...
}
}
}
}
cleanup_b
}
cleanup_a
return buildvalue
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/08/30 08:58:42 $
Pearu Peterson
"""
__version__ = "$Revision: 1.129 $"[10:-1]
from . import __version__
f2py_version = __version__.version
from .. import version as _numpy_version
numpy_version = _numpy_version.version
import os
import time
import copy
from .auxfuncs import (
applyrules, debugcapi, dictappend, errmess, gentitle, getargs2,
hascallstatement, hasexternals, hasinitvalue, hasnote, hasresultnote,
isarray, isarrayofstrings, iscomplex, iscomplexarray,
iscomplexfunction, iscomplexfunction_warn, isdummyroutine, isexternal,
isfunction, isfunction_wrap, isint1array, isintent_aux, isintent_c,
isintent_callback, isintent_copy, isintent_hide, isintent_inout,
isintent_nothide, isintent_out, isintent_overwrite, islogical,
islong_complex, islong_double, islong_doublefunction, islong_long,
islong_longfunction, ismoduleroutine, isoptional, isrequired, isscalar,
issigned_long_longarray, isstring, isstringarray, isstringfunction,
issubroutine, issubroutine_wrap, isthreadsafe, isunsigned,
isunsigned_char, isunsigned_chararray, isunsigned_long_long,
isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray,
l_and, l_not, l_or, outmess, replace, stripcomma, requiresf90wrapper
)
from . import capi_maps
from . import cfuncs
from . import common_rules
from . import use_rules
from . import f90mod_rules
from . import func2subr
options = {}
sepdict = {}
#for k in ['need_cfuncs']: sepdict[k]=','
for k in ['decl',
'frompyobj',
'cleanupfrompyobj',
'topyarr', 'method',
'pyobjfrom', 'closepyobjfrom',
'freemem',
'userincludes',
'includes0', 'includes', 'typedefs', 'typedefs_generated',
'cppmacros', 'cfuncs', 'callbacks',
'latexdoc',
'restdoc',
'routine_defs', 'externroutines',
'initf2pywraphooks',
'commonhooks', 'initcommonhooks',
'f90modhooks', 'initf90modhooks']:
sepdict[k] = '\n'
#################### Rules for C/API module #################
generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
module_rules = {
'modulebody': """\
/* File: #modulename#module.c
* This file is auto-generated with f2py (version:#f2py_version#).
* f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
* written by Pearu Peterson <[email protected]>.
* Generation date: """ + time.asctime(time.gmtime(generationtime)) + """
* Do not edit this file directly unless you know what you are doing!!!
*/
#ifdef __cplusplus
extern \"C\" {
#endif
""" + gentitle("See f2py2e/cfuncs.py: includes") + """
#includes#
#includes0#
""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """
static PyObject *#modulename#_error;
static PyObject *#modulename#_module;
""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """
#typedefs#
""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """
#typedefs_generated#
""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """
#cppmacros#
""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """
#cfuncs#
""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """
#userincludes#
""" + gentitle("See f2py2e/capi_rules.py: usercode") + """
#usercode#
/* See f2py2e/rules.py */
#externroutines#
""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """
#usercode1#
""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """
#callbacks#
""" + gentitle("See f2py2e/rules.py: buildapi") + """
#body#
""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """
#f90modhooks#
""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """
""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """
#commonhooks#
""" + gentitle("See f2py2e/rules.py") + """
static FortranDataDef f2py_routine_defs[] = {
#routine_defs#
\t{NULL}
};
static PyMethodDef f2py_module_methods[] = {
#pymethoddef#
\t{NULL,NULL}
};
static struct PyModuleDef moduledef = {
\tPyModuleDef_HEAD_INIT,
\t"#modulename#",
\tNULL,
\t-1,
\tf2py_module_methods,
\tNULL,
\tNULL,
\tNULL,
\tNULL
};
PyMODINIT_FUNC PyInit_#modulename#(void) {
\tint i;
\tPyObject *m,*d, *s, *tmp;
\tm = #modulename#_module = PyModule_Create(&moduledef);
\tPy_SET_TYPE(&PyFortran_Type, &PyType_Type);
\timport_array();
\tif (PyErr_Occurred())
\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;}
\td = PyModule_GetDict(m);
\ts = PyUnicode_FromString(\"$R""" + """evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
\tPy_DECREF(s);
\ts = PyUnicode_FromString(
\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
\tPyDict_SetItemString(d, \"__doc__\", s);
\tPy_DECREF(s);
\ts = PyUnicode_FromString(\"""" + numpy_version + """\");
\tPyDict_SetItemString(d, \"__f2py_numpy_version__\", s);
\tPy_DECREF(s);
\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
\t/*
\t * Store the error object inside the dict, so that it could get deallocated.
\t * (in practice, this is a module, so it likely will not and cannot.)
\t */
\tPyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error);
\tPy_DECREF(#modulename#_error);
\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) {
\t\ttmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]);
\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name, tmp);
\t\tPy_DECREF(tmp);
\t}
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
#interface_usercode#
#ifdef F2PY_REPORT_ATEXIT
\tif (! PyErr_Occurred())
\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
\treturn m;
}
#ifdef __cplusplus
}
#endif
""",
'separatorsfor': {'latexdoc': '\n\n',
'restdoc': '\n\n'},
'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n',
'#modnote#\n',
'#latexdoc#'],
'restdoc': ['Module #modulename#\n' + '=' * 80,
'\n#restdoc#']
}
defmod_rules = [
{'body': '/*eof body*/',
'method': '/*eof method*/',
'externroutines': '/*eof externroutines*/',
'routine_defs': '/*eof routine_defs*/',
'initf90modhooks': '/*eof initf90modhooks*/',
'initf2pywraphooks': '/*eof initf2pywraphooks*/',
'initcommonhooks': '/*eof initcommonhooks*/',
'latexdoc': '',
'restdoc': '',
'modnote': {hasnote: '#note#', l_not(hasnote): ''},
}
]
routine_rules = {
'separatorsfor': sepdict,
'body': """
#begintitle#
static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\";
/* #declfortranroutine# */
static PyObject *#apiname#(const PyObject *capi_self,
PyObject *capi_args,
PyObject *capi_keywds,
#functype# (*f2py_func)(#callprotoargument#)) {
PyObject * volatile capi_buildvalue = NULL;
volatile int f2py_success = 1;
#decl#
static char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL};
#usercode#
#routdebugenter#
#ifdef F2PY_REPORT_ATEXIT
f2py_start_clock();
#endif
if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
\"#argformat#|#keyformat##xaformat#:#pyname#\",\\
capi_kwlist#args_capi##keys_capi##keys_xa#))\n return NULL;
#frompyobj#
/*end of frompyobj*/
#ifdef F2PY_REPORT_ATEXIT
f2py_start_call_clock();
#endif
#callfortranroutine#
if (PyErr_Occurred())
f2py_success = 0;
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_call_clock();
#endif
/*end of callfortranroutine*/
if (f2py_success) {
#pyobjfrom#
/*end of pyobjfrom*/
CFUNCSMESS(\"Building return value.\\n\");
capi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#);
/*closepyobjfrom*/
#closepyobjfrom#
} /*if (f2py_success) after callfortranroutine*/
/*cleanupfrompyobj*/
#cleanupfrompyobj#
if (capi_buildvalue == NULL) {
#routdebugfailure#
} else {
#routdebugleave#
}
CFUNCSMESS(\"Freeing memory.\\n\");
#freemem#
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_clock();
#endif
return capi_buildvalue;
}
#endtitle#
""",
'routine_defs': '#routine_def#',
'initf2pywraphooks': '#initf2pywraphook#',
'externroutines': '#declfortranroutine#',
'doc': '#docreturn##name#(#docsignature#)',
'docshort': '#docreturn##name#(#docsignatureshort#)',
'docs': '"\t#docreturn##name#(#docsignature#)\\n"\n',
'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'],
'cppmacros': {debugcapi: '#define DEBUGCFUNCS'},
'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n',
"""
\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)}
#routnote#
#latexdocstrsigns#
"""],
'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80,
]
}
################## Rules for C/API function ##############
rout_rules = [
{ # Init
'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n',
'routdebugleave': '\n', 'routdebugfailure': '\n',
'setjmpbuf': ' || ',
'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n',
'docstrcbs': '\n', 'docstrsigns': '\\n"\n"',
'latexdocstrsigns': '\n',
'latexdocstrreq': '\n', 'latexdocstropt': '\n',
'latexdocstrout': '\n', 'latexdocstrcbs': '\n',
},
'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '',
'docsign': '', 'docsignopt': '', 'decl': '/*decl*/',
'freemem': '/*freemem*/',
'docsignshort': '', 'docsignoptshort': '',
'docstrsigns': '', 'latexdocstrsigns': '',
'docstrreq': '\\nParameters\\n----------',
'docstropt': '\\nOther Parameters\\n----------------',
'docstrout': '\\nReturns\\n-------',
'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n',
'latexdocstrreq': '\\noindent Required arguments:',
'latexdocstropt': '\\noindent Optional arguments:',
'latexdocstrout': '\\noindent Return objects:',
'latexdocstrcbs': '\\noindent Call-back functions:',
'args_capi': '', 'keys_capi': '', 'functype': '',
'frompyobj': '/*frompyobj*/',
# this list will be reversed
'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'],
'pyobjfrom': '/*pyobjfrom*/',
# this list will be reversed
'closepyobjfrom': ['/*end of closepyobjfrom*/'],
'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/',
'routdebugenter': '/*routdebugenter*/',
'routdebugfailure': '/*routdebugfailure*/',
'callfortranroutine': '/*callfortranroutine*/',
'argformat': '', 'keyformat': '', 'need_cfuncs': '',
'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '',
'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '',
'initf2pywraphook': '',
'routnote': {hasnote: '--- #note#', l_not(hasnote): ''},
}, {
'apiname': 'f2py_rout_#modulename#_#name#',
'pyname': '#modulename#.#name#',
'decl': '',
'_check': l_not(ismoduleroutine)
}, {
'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#',
'pyname': '#modulename#.#f90modulename#.#name#',
'decl': '',
'_check': ismoduleroutine
}, { # Subroutine
'functype': 'void',
'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);',
ismoduleroutine: '',
isdummyroutine: ''
},
'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'},
'callfortranroutine': [
{debugcapi: [
"""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe: '\t\t\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement: '''\t\t\t\t#callstatement#;
\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''},
{l_not(l_or(hascallstatement, isdummyroutine))
: '\t\t\t\t(*f2py_func)(#callfortran#);'},
{isthreadsafe: '\t\t\tPy_END_ALLOW_THREADS'},
{hasexternals: """\t\t}"""}
],
'_check': l_and(issubroutine, l_not(issubroutine_wrap)),
}, { # Wrapped function
'functype': 'void',
'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine: '',
},
'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
{
extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
PyObject_SetAttrString(o,"_cpointer", tmp);
Py_DECREF(tmp);
s = PyUnicode_FromString("#name#");
PyObject_SetAttrString(o,"__name__", s);
Py_DECREF(s);
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi: [
"""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine))
: '\t(*f2py_func)(#callfortran#);'},
{hascallstatement:
'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe: '\tPy_END_ALLOW_THREADS'},
{hasexternals: '\t}'}
],
'_check': isfunction_wrap,
}, { # Wrapped subroutine
'functype': 'void',
'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine: '',
},
'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
{
extern void #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
PyObject_SetAttrString(o,"_cpointer", tmp);
Py_DECREF(tmp);
s = PyUnicode_FromString("#name#");
PyObject_SetAttrString(o,"__name__", s);
Py_DECREF(s);
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi: [
"""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine))
: '\t(*f2py_func)(#callfortran#);'},
{hascallstatement:
'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe: '\tPy_END_ALLOW_THREADS'},
{hasexternals: '\t}'}
],
'_check': issubroutine_wrap,
}, { # Function
'functype': '#ctype#',
'docreturn': {l_not(isintent_hide): '#rname#,'},
'docstrout': '#pydocsignout#',
'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasresultnote: '--- #resultnote#'}],
'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\
#ifdef USESCOMPAQFORTRAN
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
#else
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
#endif
"""},
{l_and(debugcapi, l_not(isstringfunction)): """\
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
"""}
],
'_check': l_and(isfunction, l_not(isfunction_wrap))
}, { # Scalar function
'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);',
isdummyroutine: ''
},
'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'decl': [{iscomplexfunction_warn: '\t#ctype# #name#_return_value={0,0};',
l_not(iscomplexfunction): '\t#ctype# #name#_return_value=0;'},
{iscomplexfunction:
'\tPyObject *#name#_return_value_capi = Py_None;'}
],
'callfortranroutine': [
{hasexternals: """\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement: '''\t#callstatement#;
/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/
'''},
{l_not(l_or(hascallstatement, isdummyroutine))
: '\t#name#_return_value = (*f2py_func)(#callfortran#);'},
{isthreadsafe: '\tPy_END_ALLOW_THREADS'},
{hasexternals: '\t}'},
{l_and(debugcapi, iscomplexfunction)
: '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
{l_and(debugcapi, l_not(iscomplexfunction)): '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
'pyobjfrom': {iscomplexfunction: '\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
'need': [{l_not(isdummyroutine): 'F_FUNC'},
{iscomplexfunction: 'pyobj_from_#ctype#1'},
{islong_longfunction: 'long_long'},
{islong_doublefunction: 'long_double'}],
'returnformat': {l_not(isintent_hide): '#rformat#'},
'return': {iscomplexfunction: ',#name#_return_value_capi',
l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'},
'_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap))
}, { # String function # in use for --no-wrap
'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)):
'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c):
'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
},
'decl': ['\t#ctype# #name#_return_value = NULL;',
'\tint #name#_return_value_len = 0;'],
'callfortran':'#name#_return_value,#name#_return_value_len,',
'callfortranroutine':['\t#name#_return_value_len = #rlength#;',
'\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {',
'\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");',
'\t\tf2py_success = 0;',
'\t} else {',
"\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';",
'\t}',
'\tif (f2py_success) {',
{hasexternals: """\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe: '\t\tPy_BEGIN_ALLOW_THREADS'},
"""\
#ifdef USESCOMPAQFORTRAN
\t\t(*f2py_func)(#callcompaqfortran#);
#else
\t\t(*f2py_func)(#callfortran#);
#endif
""",
{isthreadsafe: '\t\tPy_END_ALLOW_THREADS'},
{hasexternals: '\t\t}'},
{debugcapi:
'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
'\t} /* if (f2py_success) after (string)malloc */',
],
'returnformat': '#rformat#',
'return': ',#name#_return_value',
'freemem': '\tSTRINGFREE(#name#_return_value);',
'need': ['F_FUNC', '#ctype#', 'STRINGFREE'],
'_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete
},
{ # Debugging
'routdebugenter': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
'routdebugleave': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
'routdebugfailure': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
'_check': debugcapi
}
]
################ Rules for arguments ##################
typedef_need_dict = {islong_long: 'long_long',
islong_double: 'long_double',
islong_complex: 'complex_long_double',
isunsigned_char: 'unsigned_char',
isunsigned_short: 'unsigned_short',
isunsigned: 'unsigned',
isunsigned_long_long: 'unsigned_long_long',
isunsigned_chararray: 'unsigned_char',
isunsigned_shortarray: 'unsigned_short',
isunsigned_long_longarray: 'unsigned_long_long',
issigned_long_longarray: 'long_long',
}
aux_rules = [
{
'separatorsfor': sepdict
},
{ # Common
'frompyobj': ['\t/* Processing auxiliary variable #varname# */',
{debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ],
'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */',
'need': typedef_need_dict,
},
# Scalars (not complex)
{ # Common
'decl': '\t#ctype# #varname# = 0;',
'need': {hasinitvalue: 'math.h'},
'frompyobj': {hasinitvalue: '\t#varname# = #init#;'},
'_check': l_and(isscalar, l_not(iscomplex)),
},
{
'return': ',#varname#',
'docstrout': '#pydocsignout#',
'docreturn': '#outvarname#,',
'returnformat': '#varrformat#',
'_check': l_and(isscalar, l_not(iscomplex), isintent_out),
},
# Complex scalars
{ # Common
'decl': '\t#ctype# #varname#;',
'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check': iscomplex
},
# String
{ # Common
'decl': ['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
],
'need':['len..'],
'_check':isstring
},
# Array
{ # Common
'decl': ['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
],
'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}],
'_check': isarray
},
# Scalararray
{ # Common
'_check': l_and(isarray, l_not(iscomplexarray))
}, { # Not hidden
'_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide)
},
# Integer*1 array
{'need': '#ctype#',
'_check': isint1array,
'_depend': ''
},
# Integer*-1 array
{'need': '#ctype#',
'_check': isunsigned_chararray,
'_depend': ''
},
# Integer*-2 array
{'need': '#ctype#',
'_check': isunsigned_shortarray,
'_depend': ''
},
# Integer*-8 array
{'need': '#ctype#',
'_check': isunsigned_long_longarray,
'_depend': ''
},
# Complexarray
{'need': '#ctype#',
'_check': iscomplexarray,
'_depend': ''
},
# Stringarray
{
'callfortranappend': {isarrayofstrings: 'flen(#varname#),'},
'need': 'string',
'_check': isstringarray
}
]
arg_rules = [
{
'separatorsfor': sepdict
},
{ # Common
'frompyobj': ['\t/* Processing variable #varname# */',
{debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ],
'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */',
'_depend': '',
'need': typedef_need_dict,
},
# Doc signatures
{
'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'},
'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'},
'docstrout': {isintent_out: '#pydocsignout#'},
'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote, isintent_hide): '--- #note#',
l_and(hasnote, isintent_nothide): '--- See above.'}]},
'depend': ''
},
# Required/Optional arguments
{
'kwlist': '"#varname#",',
'docsign': '#varname#,',
'_check': l_and(isintent_nothide, l_not(isoptional))
},
{
'kwlistopt': '"#varname#",',
'docsignopt': '#varname#=#showinit#,',
'docsignoptshort': '#varname#,',
'_check': l_and(isintent_nothide, isoptional)
},
# Docstring/BuildValue
{
'docreturn': '#outvarname#,',
'returnformat': '#varrformat#',
'_check': isintent_out
},
# Externals (call-back functions)
{ # Common
'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'},
'docsignxashort': {isintent_nothide: '#varname#_extra_args,'},
'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'},
'docstrcbs': '#cbdocstr#',
'latexdocstrcbs': '\\item[] #cblatexdocstr#',
'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'},
'decl': [' #cbname#_t #varname#_cb = { Py_None, NULL, 0 };',
' #cbname#_t *#varname#_cb_ptr = &#varname#_cb;',
' PyTupleObject *#varname#_xa_capi = NULL;',
{l_not(isintent_callback):
' #cbname#_typedef #varname#_cptr;'}
],
'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'},
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'xaformat': {isintent_nothide: 'O!'},
'args_capi': {isrequired: ',&#varname#_cb.capi'},
'keys_capi': {isoptional: ',&#varname#_cb.capi'},
'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi',
'setjmpbuf': '(setjmp(#varname#_cb.jmpbuf))',
'callfortran': {l_not(isintent_callback): '#varname#_cptr,'},
'need': ['#cbname#', 'setjmp.h'],
'_check':isexternal
},
{
'frompyobj': [{l_not(isintent_callback): """\
if(F2PyCapsule_Check(#varname#_cb.capi)) {
#varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_cb.capi);
} else {
#varname#_cptr = #cbname#;
}
"""}, {isintent_callback: """\
if (#varname#_cb.capi==Py_None) {
#varname#_cb.capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\");
if (#varname#_cb.capi) {
if (#varname#_xa_capi==NULL) {
if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
if (capi_tmp) {
#varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
Py_DECREF(capi_tmp);
}
else {
#varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
}
if (#varname#_xa_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
return NULL;
}
}
}
}
if (#varname#_cb.capi==NULL) {
PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\");
return NULL;
}
}
"""},
"""\
if (create_cb_arglist(#varname#_cb.capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#varname#_cb.nofargs,&#varname#_cb.args_capi,\"failed in processing argument list for call-back #varname#.\")) {
""",
{debugcapi: ["""\
fprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#varname#_cb.nofargs);
CFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""",
{l_not(isintent_callback): """ fprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]},
"""\
CFUNCSMESS(\"Saving callback variables for `#varname#`.\\n\");
#varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr);""",
],
'cleanupfrompyobj':
"""\
CFUNCSMESS(\"Restoring callback variables for `#varname#`.\\n\");
#varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr);
Py_DECREF(#varname#_cb.args_capi);
}""",
'need': ['SWAP', 'create_cb_arglist'],
'_check':isexternal,
'_depend':''
},
# Scalars (not complex)
{ # Common
'decl': '\t#ctype# #varname# = 0;',
'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'},
'return': {isintent_out: ',#varname#'},
'_check': l_and(isscalar, l_not(iscomplex))
}, {
'need': {hasinitvalue: 'math.h'},
'_check': l_and(isscalar, l_not(iscomplex)),
}, { # Not hidden
'decl': '\tPyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'pyobjfrom': {isintent_inout: """\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\tif (f2py_success) {"""},
'closepyobjfrom': {isintent_inout: "\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'_check': l_and(isscalar, l_not(iscomplex), isintent_nothide)
}, {
'frompyobj': [
# hasinitvalue...
# if pyobj is None:
# varname = init
# else
# from_pyobj(varname)
#
# isoptional and noinitvalue...
# if pyobj is not None:
# from_pyobj(varname)
# else:
# varname is uninitialized
#
# ...
# from_pyobj(varname)
#
{hasinitvalue: '\tif (#varname#_capi == Py_None) #varname# = #init#; else',
'_depend': ''},
{l_and(isoptional, l_not(hasinitvalue)): '\tif (#varname#_capi != Py_None)',
'_depend': ''},
{l_not(islogical): '''\
\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
\tif (f2py_success) {'''},
{islogical: '''\
\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
\t\tf2py_success = 1;
\tif (f2py_success) {'''},
],
'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname#*/',
'need': {l_not(islogical): '#ctype#_from_pyobj'},
'_check': l_and(isscalar, l_not(iscomplex), isintent_nothide),
'_depend': ''
}, { # Hidden
'frompyobj': {hasinitvalue: '\t#varname# = #init#;'},
'need': typedef_need_dict,
'_check': l_and(isscalar, l_not(iscomplex), isintent_hide),
'_depend': ''
}, { # Common
'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'_check': l_and(isscalar, l_not(iscomplex)),
'_depend': ''
},
# Complex scalars
{ # Common
'decl': '\t#ctype# #varname#;',
'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'},
'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'return': {isintent_out: ',#varname#_capi'},
'_check': iscomplex
}, { # Not hidden
'decl': '\tPyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'pyobjfrom': {isintent_inout: """\
\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\t\tif (f2py_success) {"""},
'closepyobjfrom': {isintent_inout: "\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'_check': l_and(iscomplex, isintent_nothide)
}, {
'frompyobj': [{hasinitvalue: '\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
{l_and(isoptional, l_not(hasinitvalue))
: '\tif (#varname#_capi != Py_None)'},
'\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
'\n\tif (f2py_success) {'],
'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname# frompyobj*/',
'need': ['#ctype#_from_pyobj'],
'_check': l_and(iscomplex, isintent_nothide),
'_depend': ''
}, { # Hidden
'decl': {isintent_out: '\tPyObject *#varname#_capi = Py_None;'},
'_check': l_and(iscomplex, isintent_hide)
}, {
'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check': l_and(iscomplex, isintent_hide),
'_depend': ''
}, { # Common
'pyobjfrom': {isintent_out: '\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'},
'need': ['pyobj_from_#ctype#1'],
'_check': iscomplex
}, {
'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'_check': iscomplex,
'_depend': ''
},
# String
{ # Common
'decl': ['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
'\tPyObject *#varname#_capi = Py_None;'],
'callfortran':'#varname#,',
'callfortranappend':'slen(#varname#),',
'pyobjfrom':{debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'return': {isintent_out: ',#varname#'},
'need': ['len..'], # 'STRINGFREE'],
'_check':isstring
}, { # Common
'frompyobj': """\
\tslen(#varname#) = #length#;
\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\");
\tif (f2py_success) {""",
'cleanupfrompyobj': """\
\t\tSTRINGFREE(#varname#);
\t} /*if (f2py_success) of #varname#*/""",
'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE'],
'_check':isstring,
'_depend':''
}, { # Not hidden
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'pyobjfrom': {isintent_inout: '''\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#);
\tif (f2py_success) {'''},
'closepyobjfrom': {isintent_inout: '\t} /*if (f2py_success) of #varname# pyobjfrom*/'},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'_check': l_and(isstring, isintent_nothide)
}, { # Hidden
'_check': l_and(isstring, isintent_hide)
}, {
'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'_check': isstring,
'_depend': ''
},
# Array
{ # Common
'decl': ['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
'\tPyArrayObject *capi_#varname#_tmp = NULL;',
'\tint capi_#varname#_intent = 0;',
],
'callfortran':'#varname#,',
'return':{isintent_out: ',capi_#varname#_tmp'},
'need': 'len..',
'_check': isarray
}, { # intent(overwrite) array
'decl': '\tint capi_overwrite_#varname# = 1;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
'docsignxa': 'overwrite_#varname#=1,',
'docsignxashort': 'overwrite_#varname#,',
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1',
'_check': l_and(isarray, isintent_overwrite),
}, {
'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_overwrite),
'_depend': '',
},
{ # intent(copy) array
'decl': '\tint capi_overwrite_#varname# = 0;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
'docsignxa': 'overwrite_#varname#=0,',
'docsignxashort': 'overwrite_#varname#,',
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0',
'_check': l_and(isarray, isintent_copy),
}, {
'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_copy),
'_depend': '',
}, {
'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}],
'_check': isarray,
'_depend': ''
}, { # Not hidden
'decl': '\tPyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'_check': l_and(isarray, isintent_nothide)
}, {
'frompyobj': ['\t#setdims#;',
'\tcapi_#varname#_intent |= #intent#;',
{isintent_hide:
'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
{isintent_nothide:
'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
"""\
\tif (capi_#varname#_tmp == NULL) {
\t\tPyObject *exc, *val, *tb;
\t\tPyErr_Fetch(&exc, &val, &tb);
\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb);
\t} else {
\t\t#varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp));
""",
{hasinitvalue: [
{isintent_nothide:
'\tif (#varname#_capi == Py_None) {'},
{isintent_hide: '\t{'},
{iscomplexarray: '\t\t#ctype# capi_c;'},
"""\
\t\tint *_i,capi_i=0;
\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
\t\tif (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) {
\t\t\twhile ((_i = nextforcomb()))
\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
\t\t} else {
\t\t\tPyObject *exc, *val, *tb;
\t\t\tPyErr_Fetch(&exc, &val, &tb);
\t\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
\t\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb);
\t\t\tf2py_success = 0;
\t\t}
\t}
\tif (f2py_success) {"""]},
],
'cleanupfrompyobj': [ # note that this list will be reversed
'\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
{l_not(l_or(isintent_out, isintent_hide)): """\
\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
\t\tPy_XDECREF(capi_#varname#_tmp); }"""},
{l_and(isintent_hide, l_not(isintent_out))
: """\t\tPy_XDECREF(capi_#varname#_tmp);"""},
{hasinitvalue: '\t} /*if (f2py_success) of #varname# init*/'},
],
'_check': isarray,
'_depend': ''
},
# Scalararray
{ # Common
'_check': l_and(isarray, l_not(iscomplexarray))
}, { # Not hidden
'_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide)
},
# Integer*1 array
{'need': '#ctype#',
'_check': isint1array,
'_depend': ''
},
# Integer*-1 array
{'need': '#ctype#',
'_check': isunsigned_chararray,
'_depend': ''
},
# Integer*-2 array
{'need': '#ctype#',
'_check': isunsigned_shortarray,
'_depend': ''
},
# Integer*-8 array
{'need': '#ctype#',
'_check': isunsigned_long_longarray,
'_depend': ''
},
# Complexarray
{'need': '#ctype#',
'_check': iscomplexarray,
'_depend': ''
},
# Stringarray
{
'callfortranappend': {isarrayofstrings: 'flen(#varname#),'},
'need': 'string',
'_check': isstringarray
}
]
################# Rules for checking ###############
check_rules = [
{
'frompyobj': {debugcapi: '\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
'need': 'len..'
}, {
'frompyobj': '\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj': '\t} /*CHECKSCALAR(#check#)*/',
'need': 'CHECKSCALAR',
'_check': l_and(isscalar, l_not(iscomplex)),
'_break': ''
}, {
'frompyobj': '\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj': '\t} /*CHECKSTRING(#check#)*/',
'need': 'CHECKSTRING',
'_check': isstring,
'_break': ''
}, {
'need': 'CHECKARRAY',
'frompyobj': '\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj': '\t} /*CHECKARRAY(#check#)*/',
'_check': isarray,
'_break': ''
}, {
'need': 'CHECKGENERIC',
'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/',
}
]
########## Applying the rules. No need to modify what follows #############
#################### Build C/API module #######################
def buildmodule(m, um):
"""
Return
"""
outmess('\tBuilding module "%s"...\n' % (m['name']))
ret = {}
mod_rules = defmod_rules[:]
vrd = capi_maps.modsign2map(m)
rd = dictappend({'f2py_version': f2py_version}, vrd)
funcwrappers = []
funcwrappers2 = [] # F90 codes
for n in m['interfaced']:
nb = None
for bi in m['body']:
if not bi['block'] == 'interface':
errmess('buildmodule: Expected interface block. Skipping.\n')
continue
for b in bi['body']:
if b['name'] == n:
nb = b
break
if not nb:
errmess(
'buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n' % (n))
continue
nb_list = [nb]
if 'entry' in nb:
for k, a in nb['entry'].items():
nb1 = copy.deepcopy(nb)
del nb1['entry']
nb1['name'] = k
nb1['args'] = a
nb_list.append(nb1)
for nb in nb_list:
# requiresf90wrapper must be called before buildapi as it
# rewrites assumed shape arrays as automatic arrays.
isf90 = requiresf90wrapper(nb)
api, wrap = buildapi(nb)
if wrap:
if isf90:
funcwrappers2.append(wrap)
else:
funcwrappers.append(wrap)
ar = applyrules(api, vrd)
rd = dictappend(rd, ar)
# Construct COMMON block support
cr, wrap = common_rules.buildhooks(m)
if wrap:
funcwrappers.append(wrap)
ar = applyrules(cr, vrd)
rd = dictappend(rd, ar)
# Construct F90 module support
mr, wrap = f90mod_rules.buildhooks(m)
if wrap:
funcwrappers2.append(wrap)
ar = applyrules(mr, vrd)
rd = dictappend(rd, ar)
for u in um:
ar = use_rules.buildusevars(u, m['use'][u['name']])
rd = dictappend(rd, ar)
needs = cfuncs.get_needs()
code = {}
for n in needs.keys():
code[n] = []
for k in needs[n]:
c = ''
if k in cfuncs.includes0:
c = cfuncs.includes0[k]
elif k in cfuncs.includes:
c = cfuncs.includes[k]
elif k in cfuncs.userincludes:
c = cfuncs.userincludes[k]
elif k in cfuncs.typedefs:
c = cfuncs.typedefs[k]
elif k in cfuncs.typedefs_generated:
c = cfuncs.typedefs_generated[k]
elif k in cfuncs.cppmacros:
c = cfuncs.cppmacros[k]
elif k in cfuncs.cfuncs:
c = cfuncs.cfuncs[k]
elif k in cfuncs.callbacks:
c = cfuncs.callbacks[k]
elif k in cfuncs.f90modhooks:
c = cfuncs.f90modhooks[k]
elif k in cfuncs.commonhooks:
c = cfuncs.commonhooks[k]
else:
errmess('buildmodule: unknown need %s.\n' % (repr(k)))
continue
code[n].append(c)
mod_rules.append(code)
for r in mod_rules:
if ('_check' in r and r['_check'](m)) or ('_check' not in r):
ar = applyrules(r, vrd, m)
rd = dictappend(rd, ar)
ar = applyrules(module_rules, rd)
fn = os.path.join(options['buildpath'], vrd['coutput'])
ret['csrc'] = fn
with open(fn, 'w') as f:
f.write(ar['modulebody'].replace('\t', 2 * ' '))
outmess('\tWrote C/API module "%s" to file "%s"\n' % (m['name'], fn))
if options['dorestdoc']:
fn = os.path.join(
options['buildpath'], vrd['modulename'] + 'module.rest')
with open(fn, 'w') as f:
f.write('.. -*- rest -*-\n')
f.write('\n'.join(ar['restdoc']))
outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n' %
(options['buildpath'], vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(
options['buildpath'], vrd['modulename'] + 'module.tex')
ret['ltx'] = fn
with open(fn, 'w') as f:
f.write(
'%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version))
if 'shortlatex' not in options:
f.write(
'\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
f.write('\n'.join(ar['latexdoc']))
if 'shortlatex' not in options:
f.write('\\end{document}')
outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n' %
(options['buildpath'], vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output'])
ret['fsrc'] = wn
with open(wn, 'w') as f:
f.write('C -*- fortran -*-\n')
f.write(
'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
f.write(
'C It contains Fortran 77 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'):
if 0 <= l.find('!') < 66:
# don't split comment lines
lines.append(l + '\n')
elif l and l[0] == ' ':
while len(l) >= 66:
lines.append(l[:66] + '\n &')
l = l[66:]
lines.append(l + '\n')
else:
lines.append(l + '\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
outmess('\tFortran 77 wrappers are saved to "%s"\n' % (wn))
if funcwrappers2:
wn = os.path.join(
options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename']))
ret['fsrc'] = wn
with open(wn, 'w') as f:
f.write('! -*- f90 -*-\n')
f.write(
'! This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
f.write(
'! It contains Fortran 90 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'):
if 0 <= l.find('!') < 72:
# don't split comment lines
lines.append(l + '\n')
elif len(l) > 72 and l[0] == ' ':
lines.append(l[:72] + '&\n &')
l = l[72:]
while len(l) > 66:
lines.append(l[:66] + '&\n &')
l = l[66:]
lines.append(l + '\n')
else:
lines.append(l + '\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
outmess('\tFortran 90 wrappers are saved to "%s"\n' % (wn))
return ret
################## Build C/API function #############
stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th',
6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'}
def buildapi(rout):
rout, wrap = func2subr.assubr(rout)
args, depargs = getargs2(rout)
capi_maps.depargs = depargs
var = rout['vars']
if ismoduleroutine(rout):
outmess('\t\t\tConstructing wrapper function "%s.%s"...\n' %
(rout['modulename'], rout['name']))
else:
outmess('\t\tConstructing wrapper function "%s"...\n' % (rout['name']))
# Routine
vrd = capi_maps.routsign2map(rout)
rd = dictappend({}, vrd)
for r in rout_rules:
if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
ar = applyrules(r, vrd, rout)
rd = dictappend(rd, ar)
# Args
nth, nthk = 0, 0
savevrd = {}
for a in args:
vrd = capi_maps.sign2map(a, var[a])
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
if not isintent_hide(var[a]):
if not isoptional(var[a]):
nth = nth + 1
vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument'
else:
nthk = nthk + 1
vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword'
else:
vrd['nth'] = 'hidden'
savevrd[a] = vrd
for r in _rules:
if '_depend' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
for a in depargs:
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
vrd = savevrd[a]
for r in _rules:
if '_depend' not in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
if 'check' in var[a]:
for c in var[a]['check']:
vrd['check'] = c
ar = applyrules(check_rules, vrd, var[a])
rd = dictappend(rd, ar)
if isinstance(rd['cleanupfrompyobj'], list):
rd['cleanupfrompyobj'].reverse()
if isinstance(rd['closepyobjfrom'], list):
rd['closepyobjfrom'].reverse()
rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#',
{'docsign': rd['docsign'],
'docsignopt': rd['docsignopt'],
'docsignxa': rd['docsignxa']}))
optargs = stripcomma(replace('#docsignopt##docsignxa#',
{'docsignxa': rd['docsignxashort'],
'docsignopt': rd['docsignoptshort']}
))
if optargs == '':
rd['docsignatureshort'] = stripcomma(
replace('#docsign#', {'docsign': rd['docsign']}))
else:
rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]',
{'docsign': rd['docsign'],
'docsignopt': optargs,
})
rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_')
rd['latexdocsignatureshort'] = rd[
'latexdocsignatureshort'].replace(',', ', ')
cfs = stripcomma(replace('#callfortran##callfortranappend#', {
'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']}))
if len(rd['callfortranappend']) > 1:
rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', {
'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']}))
else:
rd['callcompaqfortran'] = cfs
rd['callfortran'] = cfs
if isinstance(rd['docreturn'], list):
rd['docreturn'] = stripcomma(
replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = '
rd['docstrsigns'] = []
rd['latexdocstrsigns'] = []
for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']:
if k in rd and isinstance(rd[k], list):
rd['docstrsigns'] = rd['docstrsigns'] + rd[k]
k = 'latex' + k
if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\
['\\begin{description}'] + rd[k][1:] +\
['\\end{description}']
ar = applyrules(routine_rules, rd)
if ismoduleroutine(rout):
outmess('\t\t\t %s\n' % (ar['docshort']))
else:
outmess('\t\t %s\n' % (ar['docshort']))
return ar, wrap
#################### EOF rules.py #######################
|
[] |
[] |
[
"SOURCE_DATE_EPOCH"
] |
[]
|
["SOURCE_DATE_EPOCH"]
|
python
| 1 | 0 | |
test/extended/util/test.go
|
package util
import (
"fmt"
"os"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/types"
"github.com/onsi/gomega"
"k8s.io/klog"
kapiv1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
kclientset "k8s.io/client-go/kubernetes"
rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/generated"
// this appears to inexplicably auto-register global flags.
_ "k8s.io/kubernetes/test/e2e/storage/drivers"
projectv1 "github.com/openshift/api/project/v1"
securityv1client "github.com/openshift/client-go/security/clientset/versioned"
"github.com/openshift/origin/pkg/version"
)
var (
reportFileName string
syntheticSuite string
quiet bool
)
var TestContext *e2e.TestContextType = &e2e.TestContext
func InitStandardFlags() {
e2e.RegisterCommonFlags()
e2e.RegisterClusterFlags()
// replaced by a bare import above.
//e2e.RegisterStorageFlags()
}
func InitTest(dryRun bool) {
InitDefaultEnvironmentVariables()
// interpret synthetic input in `--ginkgo.focus` and/or `--ginkgo.skip`
ginkgo.BeforeEach(checkSyntheticInput)
TestContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false"
TestContext.VerifyServiceAccount = true
testfiles.AddFileSource(testfiles.BindataFileSource{
Asset: generated.Asset,
AssetNames: generated.AssetNames,
})
TestContext.KubectlPath = "kubectl"
TestContext.KubeConfig = KubeConfigPath()
os.Setenv("KUBECONFIG", TestContext.KubeConfig)
// "debian" is used when not set. At least GlusterFS tests need "custom".
// (There is no option for "rhel" or "centos".)
TestContext.NodeOSDistro = "custom"
TestContext.MasterOSDistro = "custom"
// load and set the host variable for kubectl
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: TestContext.KubeConfig}, &clientcmd.ConfigOverrides{})
cfg, err := clientConfig.ClientConfig()
if err != nil && !dryRun { // we don't need the host when doing a dryrun
FatalErr(err)
}
if cfg != nil {
TestContext.Host = cfg.Host
}
reportFileName = os.Getenv("TEST_REPORT_FILE_NAME")
if reportFileName == "" {
reportFileName = "junit"
}
quiet = os.Getenv("TEST_OUTPUT_QUIET") == "true"
// Ensure that Kube tests run privileged (like they do upstream)
TestContext.CreateTestingNS = createTestingNS
klog.V(2).Infof("Extended test version %s", version.Get().String())
}
func ExecuteTest(t ginkgo.GinkgoTestingT, suite string) {
var r []ginkgo.Reporter
if dir := os.Getenv("TEST_REPORT_DIR"); len(dir) > 0 {
TestContext.ReportDir = dir
}
if TestContext.ReportDir != "" {
if err := os.MkdirAll(TestContext.ReportDir, 0755); err != nil {
klog.Errorf("Failed creating report directory: %v", err)
}
defer e2e.CoreDump(TestContext.ReportDir)
}
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = "Skipped"
}
gomega.RegisterFailHandler(ginkgo.Fail)
if TestContext.ReportDir != "" {
r = append(r, reporters.NewJUnitReporter(path.Join(TestContext.ReportDir, fmt.Sprintf("%s_%02d.xml", reportFileName, config.GinkgoConfig.ParallelNode))))
}
AnnotateTestSuite()
if quiet {
r = append(r, NewSimpleReporter())
ginkgo.RunSpecsWithCustomReporters(t, suite, r)
} else {
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, suite, r)
}
}
func AnnotateTestSuite() {
testRenamer := newGinkgoTestRenamerFromGlobals(e2e.TestContext.Provider)
ginkgo.WalkTests(testRenamer.maybeRenameTest)
}
func newGinkgoTestRenamerFromGlobals(provider string) *ginkgoTestRenamer {
var allLabels []string
matches := make(map[string]*regexp.Regexp)
stringMatches := make(map[string][]string)
excludes := make(map[string]*regexp.Regexp)
for label, items := range testMaps {
sort.Strings(items)
allLabels = append(allLabels, label)
var remain []string
for _, item := range items {
re := regexp.MustCompile(item)
if p, ok := re.LiteralPrefix(); ok {
stringMatches[label] = append(stringMatches[label], p)
} else {
remain = append(remain, item)
}
}
if len(remain) > 0 {
matches[label] = regexp.MustCompile(strings.Join(remain, `|`))
}
}
for label, items := range labelExcludes {
sort.Strings(items)
excludes[label] = regexp.MustCompile(strings.Join(items, `|`))
}
sort.Strings(allLabels)
if provider != "" {
excludedTests = append(excludedTests, fmt.Sprintf(`\[Skipped:%s\]`, provider))
}
klog.Infof("openshift-tests excluded test regex is %q", strings.Join(excludedTests, `|`))
excludedTestsFilter := regexp.MustCompile(strings.Join(excludedTests, `|`))
return &ginkgoTestRenamer{
allLabels: allLabels,
stringMatches: stringMatches,
matches: matches,
excludes: excludes,
excludedTestsFilter: excludedTestsFilter,
}
}
type ginkgoTestRenamer struct {
allLabels []string
stringMatches map[string][]string
matches map[string]*regexp.Regexp
excludes map[string]*regexp.Regexp
excludedTestsFilter *regexp.Regexp
}
func (r *ginkgoTestRenamer) maybeRenameTest(name string, node types.TestNode) {
labels := ""
for {
count := 0
for _, label := range r.allLabels {
if strings.Contains(name, label) {
continue
}
var hasLabel bool
for _, segment := range r.stringMatches[label] {
hasLabel = strings.Contains(name, segment)
if hasLabel {
break
}
}
if !hasLabel {
if re := r.matches[label]; re != nil {
hasLabel = r.matches[label].MatchString(name)
}
}
if hasLabel {
// TODO: remove when we no longer need it
if re, ok := r.excludes[label]; ok && re.MatchString(name) {
continue
}
count++
labels += " " + label
name += " " + label
}
}
if count == 0 {
break
}
}
if !r.excludedTestsFilter.MatchString(name) {
isSerial := strings.Contains(name, "[Serial]")
isConformance := strings.Contains(name, "[Conformance]")
switch {
case isSerial && isConformance:
node.SetText(node.Text() + " [Suite:openshift/conformance/serial/minimal]")
case isSerial:
node.SetText(node.Text() + " [Suite:openshift/conformance/serial]")
case isConformance:
node.SetText(node.Text() + " [Suite:openshift/conformance/parallel/minimal]")
default:
node.SetText(node.Text() + " [Suite:openshift/conformance/parallel]")
}
}
if strings.Contains(node.CodeLocation().FileName, "/origin/test/") && !strings.Contains(node.Text(), "[Suite:openshift") {
node.SetText(node.Text() + " [Suite:openshift]")
}
if strings.Contains(node.CodeLocation().FileName, "/kubernetes/test/e2e/") {
node.SetText(node.Text() + " [Suite:k8s]")
}
node.SetText(node.Text() + labels)
}
// ProwGCPSetup makes sure certain required env vars are available in the case
// that extended tests are invoked directly via calls to ginkgo/extended.test
func InitDefaultEnvironmentVariables() {
if ad := os.Getenv("ARTIFACT_DIR"); len(strings.TrimSpace(ad)) == 0 {
os.Setenv("ARTIFACT_DIR", filepath.Join(os.TempDir(), "artifacts"))
}
}
// TODO: Use either explicit tags (k8s.io) or https://github.com/onsi/ginkgo/pull/228 to implement this.
// isPackage determines wether the test is in a package. Ideally would be implemented in ginkgo.
func isPackage(pkg string) bool {
return strings.Contains(ginkgo.CurrentGinkgoTestDescription().FileName, pkg)
}
// TODO: For both is*Test functions, use either explicit tags (k8s.io) or https://github.com/onsi/ginkgo/pull/228
func isOriginTest() bool {
return isPackage("/origin/test/")
}
func isKubernetesE2ETest() bool {
return isPackage("/kubernetes/test/e2e/")
}
func testNameContains(name string) bool {
return strings.Contains(ginkgo.CurrentGinkgoTestDescription().FullTestText, name)
}
func isOriginUpgradeTest() bool {
return isPackage("/origin/test/e2e/upgrade/")
}
func skipTestNamespaceCustomization() bool {
return (isPackage("/kubernetes/test/e2e/namespace.go") && (testNameContains("should always delete fast") || testNameContains("should delete fast enough")))
}
// createTestingNS ensures that kubernetes e2e tests have their service accounts in the privileged and anyuid SCCs
func createTestingNS(baseName string, c kclientset.Interface, labels map[string]string) (*kapiv1.Namespace, error) {
if !strings.HasPrefix(baseName, "e2e-") {
baseName = "e2e-" + baseName
}
ns, err := e2e.CreateTestingNS(baseName, c, labels)
if err != nil {
return ns, err
}
klog.V(2).Infof("blah=%s", ginkgo.CurrentGinkgoTestDescription().FileName)
// Add anyuid and privileged permissions for upstream tests
if (isKubernetesE2ETest() && !skipTestNamespaceCustomization()) || isOriginUpgradeTest() {
clientConfig, err := getClientConfig(KubeConfigPath())
if err != nil {
return ns, err
}
securityClient, err := securityv1client.NewForConfig(clientConfig)
if err != nil {
return ns, err
}
e2e.Logf("About to run a Kube e2e test, ensuring namespace is privileged")
// add the "privileged" scc to ensure pods that explicitly
// request extra capabilities are not rejected
addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "privileged")
// add the "anyuid" scc to ensure pods that don't specify a
// uid don't get forced into a range (mimics upstream
// behavior)
addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "anyuid")
// add the "hostmount-anyuid" scc to ensure pods using hostPath
// can execute tests
addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "hostmount-anyuid")
// The intra-pod test requires that the service account have
// permission to retrieve service endpoints.
rbacClient, err := rbacv1client.NewForConfig(clientConfig)
if err != nil {
return ns, err
}
addRoleToE2EServiceAccounts(rbacClient, []kapiv1.Namespace{*ns}, "view")
// in practice too many kube tests ignore scheduling constraints
allowAllNodeScheduling(c, ns.Name)
}
return ns, err
}
var (
testMaps = map[string][]string{
// tests that require a local host
"[Local]": {
// Doesn't work on scaled up clusters
`\[Feature:ImagePrune\]`,
},
// alpha features that are not gated
"[Disabled:Alpha]": {
`\[Feature:Initializers\]`, // admission controller disabled
`\[Feature:TTLAfterFinished\]`, // flag gate is off
`\[Feature:GPUDevicePlugin\]`, // GPU node needs to be available
`\[Feature:ExpandCSIVolumes\]`, // off by default . sig-storage
`\[Feature:DynamicAudit\]`, // off by default. sig-master
`\[NodeAlphaFeature:VolumeSubpathEnvExpansion\]`, // flag gate is off
},
// tests for features that are not implemented in openshift
"[Disabled:Unimplemented]": {
`\[Feature:Networking-IPv6\]`, // openshift-sdn doesn't support yet
`Monitoring`, // Not installed, should be
`Cluster level logging`, // Not installed yet
`Kibana`, // Not installed
`Ubernetes`, // Can't set zone labels today
`kube-ui`, // Not installed by default
`Kubernetes Dashboard`, // Not installed by default (also probably slow image pull)
`\[Feature:ServiceLoadBalancer\]`, // Not enabled yet
`\[Feature:RuntimeClass\]`, // disable runtimeclass tests in 4.1 (sig-pod/[email protected])
`\[Feature:CustomResourceWebhookConversion\]`, // webhook conversion is off by default. sig-master/@sttts
`NetworkPolicy between server and client should allow egress access on one named port`, // not yet implemented
`should proxy to cadvisor`, // we don't expose cAdvisor port directly for security reasons
},
// tests that rely on special configuration that we do not yet support
"[Disabled:SpecialConfig]": {
`\[Feature:ImageQuota\]`, // Quota isn't turned on by default, we should do that and then reenable these tests
`\[Feature:Audit\]`, // Needs special configuration
`\[Feature:LocalStorageCapacityIsolation\]`, // relies on a separate daemonset?
`\[sig-cluster-lifecycle\]`, // cluster lifecycle test require a different kind of upgrade hook.
`\[Feature:StatefulUpgrade\]`, // related to cluster lifecycle (in e2e/lifecycle package) and requires an upgrade hook we don't use
`kube-dns-autoscaler`, // Don't run kube-dns
`should check if Kubernetes master services is included in cluster-info`, // Don't run kube-dns
`DNS configMap`, // this tests dns federation configuration via configmap, which we don't support yet
`authentication: OpenLDAP`, // needs separate setup and bucketing for openldap bootstrapping
`NodeProblemDetector`, // requires a non-master node to run on
`Advanced Audit should audit API calls`, // expects to be able to call /logs
`Firewall rule should have correct firewall rules for e2e cluster`, // Upstream-install specific
},
// tests that are known broken and need to be fixed upstream or in openshift
// always add an issue here
"[Disabled:Broken]": {
`mount an API token into pods`, // We add 6 secrets, not 1
`ServiceAccounts should ensure a single API token exists`, // We create lots of secrets
`unchanging, static URL paths for kubernetes api services`, // the test needs to exclude URLs that are not part of conformance (/logs)
`Simple pod should handle in-cluster config`, // kubectl cp is not preserving executable bit
`Services should be able to up and down services`, // we don't have wget installed on nodes
`Network should set TCP CLOSE_WAIT timeout`, // possibly some difference between ubuntu and fedora
`Services should be able to create a functioning NodePort service`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711603
`\[NodeFeature:Sysctls\]`, // needs SCC support
`should check kube-proxy urls`, // previously this test was skipped b/c we reported -1 as the number of nodes, now we report proper number and test fails
`SSH`, // TRIAGE
`should implement service.kubernetes.io/service-proxy-name`, // this is an optional test that requires SSH. sig-network
`should idle the service and DeploymentConfig properly`, // idling with a single service and DeploymentConfig [Conformance]
`\[Driver: csi-hostpath`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711607
`should answer endpoint and wildcard queries for the cluster`, // currently not supported by dns operator https://github.com/openshift/cluster-dns-operator/issues/43
`should propagate mounts to the host`, // requires SSH, https://bugzilla.redhat.com/show_bug.cgi?id=1711600
`should allow ingress access on one named port`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711602
`ClusterDns \[Feature:Example\] should create pod that uses dns`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711601
`should be rejected when no endpoints exist`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711605
`PreemptionExecutionPath runs ReplicaSets to verify preemption running path`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711606
`TaintBasedEvictions`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711608
`\[Driver: iscsi\]`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711627
`\[Driver: nfs\] \[Testpattern: Dynamic PV \(default fs\)\] provisioning should access volume from different nodes`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711688
// Test fails on platforms that use LoadBalancerService and HostNetwork endpoint publishing strategy
`\[Conformance\]\[Area:Networking\]\[Feature:Router\] The HAProxy router should set Forwarded headers appropriately`, // https://bugzilla.redhat.com/show_bug.cgi?id=1752646
// requires a 1.14 kubelet, enable when rhcos is built for 4.2
"when the NodeLease feature is enabled",
"RuntimeClass should reject",
},
// tests that may work, but we don't support them
"[Disabled:Unsupported]": {
`\[Driver: rbd\]`, // OpenShift 4.x does not support Ceph RBD (use CSI instead)
`\[Driver: ceph\]`, // OpenShift 4.x does not support CephFS (use CSI instead)
},
// tests too slow to be part of conformance
"[Slow]": {
`\[sig-scalability\]`, // disable from the default set for now
`should create and stop a working application`, // Inordinately slow tests
`\[Feature:PerformanceDNS\]`, // very slow
`should ensure that critical pod is scheduled in case there is no resources available`, // should be tagged disruptive, consumes 100% of cluster CPU
`validates that there exists conflict between pods with same hostPort and protocol but one using 0\.0\.0\.0 hostIP`, // 5m, really?
},
// tests that are known flaky
"[Flaky]": {
`Job should run a job to completion when tasks sometimes fail and are not locally restarted`, // seems flaky, also may require too many resources
`openshift mongodb replication creating from a template`, // flaking on deployment
},
// tests that must be run without competition
"[Serial]": {
`\[Disruptive\]`,
`\[Feature:Performance\]`, // requires isolation
`\[Feature:ManualPerformance\]`, // requires isolation
`\[Feature:HighDensityPerformance\]`, // requires no other namespaces
`Service endpoints latency`, // requires low latency
`Clean up pods on node`, // schedules up to max pods per node
`should allow starting 95 pods per node`,
`DynamicProvisioner should test that deleting a claim before the volume is provisioned deletes the volume`, // test is very disruptive to other tests
`Should be able to support the 1\.7 Sample API Server using the current Aggregator`, // down apiservices break other clients today https://bugzilla.redhat.com/show_bug.cgi?id=1623195
},
"[Skipped:azure]": {
"Networking should provide Internet connection for containers", // Azure does not allow ICMP traffic to internet.
// Azure storage tests are failing due to unknown errors. ref: https://bugzilla.redhat.com/show_bug.cgi?id=1723603
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Dynamic PV \(default fs\)\] provisioning should access volume from different nodes`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Dynamic PV \(default fs\)\] subPath should verify container cannot write to subpath readonly volumes`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume \(default fs\)\] subPath should be able to unmount after the subpath directory is deleted`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume \(default fs\)\] subPath should support existing directories when readOnly specified in the volumeSource`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume \(default fs\)\] subPath should support existing directory`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume \(default fs\)\] subPath should support existing single file`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume \(default fs\)\] subPath should support file as subpath`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume \(default fs\)\] subPath should support non-existent path`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume \(default fs\)\] subPath should support readOnly directory specified in the volumeMount`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume \(default fs\)\] subPath should support readOnly file specified in the volumeMount`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume \(default fs\)\] subPath should verify container cannot write to subpath readonly volumes`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume \(default fs\)\] volumes should allow exec of files on the volume`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume \(default fs\)\] volumes should be mountable`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume \(ext4\)\] volumes should allow exec of files on the volume`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Inline-volume \(ext4\)\] volumes should be mountable`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(block volmode\)] volumeMode should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(default fs\)\] subPath should be able to unmount after the subpath directory is deleted`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(default fs\)\] subPath should support existing directories when readOnly specified in the volumeSource`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(default fs\)\] subPath should support existing directory`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(default fs\)\] subPath should support existing single file`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(default fs\)\] subPath should support file as subpath`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(default fs\)\] subPath should support non-existent path`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(default fs\)\] subPath should support readOnly directory specified in the volumeMount`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(default fs\)\] subPath should support readOnly file specified in the volumeMount`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(default fs\)\] subPath should verify container cannot write to subpath readonly volumes`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(default fs\)\] volumes should allow exec of files on the volume`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(default fs\)\] volumes should be mountable`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(ext4\)\] volumes should allow exec of files on the volume`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(ext4\)\] volumes should be mountable`,
`\[sig-storage\] In-tree Volumes \[Driver: azure\] \[Testpattern: Pre-provisioned PV \(filesystem volmode\)] volumeMode should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources`,
},
"[Skipped:gce]": {
// Requires creation of a different compute instance in a different zone and is not compatible with volumeBindingMode of WaitForFirstConsumer which we use in 4.x
`\[sig-scheduling\] Multi-AZ Cluster Volumes \[sig-storage\] should only be allowed to provision PDs in zones where nodes exist`,
// The following tests try to ssh directly to a node. None of our nodes have external IPs
`\[k8s.io\] \[sig-node\] crictl should be able to run crictl on the node`,
`\[sig-storage\] Flexvolumes should be mountable`,
`\[sig-storage\] Detaching volumes should not work when mount is in progress`,
// We are using openshift-sdn to conceal metadata
`\[sig-auth\] Metadata Concealment should run a check-metadata-concealment job to completion`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1740959
`\[sig-api-machinery\] AdmissionWebhook Should be able to deny pod and configmap creation`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1745720
`\[sig-storage\] CSI Volumes \[Driver: pd.csi.storage.gke.io\]\[Serial\]`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1749882
`\[sig-storage\] CSI Volumes CSI Topology test using GCE PD driver \[Serial\]`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1751367
`gce-localssd-scsi-fs`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1750851
// should be serial if/when it's re-enabled
`\[HPA\] Horizontal pod autoscaling \(scale resource: Custom Metrics from Stackdriver\)`,
},
"[Suite:openshift/scalability]": {},
// tests that replace the old test-cmd script
"[Suite:openshift/test-cmd]": {
`\[Suite:openshift/test-cmd\]`,
},
"[Suite:openshift/csi]": {
`External Storage \[Driver:`,
},
}
// labelExcludes temporarily block tests out of a specific suite
labelExcludes = map[string][]string{}
excludedTests = []string{
`\[Disabled:`,
`\[Disruptive\]`,
`\[Skipped\]`,
`\[Slow\]`,
`\[Flaky\]`,
`\[local\]`,
`\[Suite:openshift/test-cmd\]`,
}
)
// checkSyntheticInput selects tests based on synthetic skips or focuses
func checkSyntheticInput() {
checkSuiteSkips()
}
// checkSuiteSkips ensures Origin/Kubernetes synthetic skip labels are applied
// DEPRECATED: remove in a future release
func checkSuiteSkips() {
switch {
case isOriginTest():
if strings.Contains(config.GinkgoConfig.SkipString, "Synthetic Origin") {
ginkgo.Skip("skipping all openshift/origin tests")
}
case isKubernetesE2ETest():
if strings.Contains(config.GinkgoConfig.SkipString, "Synthetic Kubernetes") {
ginkgo.Skip("skipping all k8s.io/kubernetes tests")
}
}
}
var longRetry = wait.Backoff{Steps: 100}
// allowAllNodeScheduling sets the annotation on namespace that allows all nodes to be scheduled onto.
func allowAllNodeScheduling(c kclientset.Interface, namespace string) {
err := retry.RetryOnConflict(longRetry, func() error {
ns, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
if err != nil {
return err
}
if ns.Annotations == nil {
ns.Annotations = make(map[string]string)
}
ns.Annotations[projectv1.ProjectNodeSelector] = ""
_, err = c.CoreV1().Namespaces().Update(ns)
return err
})
if err != nil {
FatalErr(err)
}
}
func addE2EServiceAccountsToSCC(securityClient securityv1client.Interface, namespaces []kapiv1.Namespace, sccName string) {
// Because updates can race, we need to set the backoff retries to be > than the number of possible
// parallel jobs starting at once. Set very high to allow future high parallelism.
err := retry.RetryOnConflict(longRetry, func() error {
scc, err := securityClient.SecurityV1().SecurityContextConstraints().Get(sccName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
return nil
}
return err
}
for _, ns := range namespaces {
if isE2ENamespace(ns.Name) {
scc.Groups = append(scc.Groups, fmt.Sprintf("system:serviceaccounts:%s", ns.Name))
}
}
if _, err := securityClient.SecurityV1().SecurityContextConstraints().Update(scc); err != nil {
return err
}
return nil
})
if err != nil {
FatalErr(err)
}
}
func isE2ENamespace(ns string) bool {
return true
//return strings.HasPrefix(ns, "e2e-") ||
// strings.HasPrefix(ns, "aggregator-") ||
// strings.HasPrefix(ns, "csi-") ||
// strings.HasPrefix(ns, "deployment-") ||
// strings.HasPrefix(ns, "disruption-") ||
// strings.HasPrefix(ns, "gc-") ||
// strings.HasPrefix(ns, "kubectl-") ||
// strings.HasPrefix(ns, "proxy-") ||
// strings.HasPrefix(ns, "provisioning-") ||
// strings.HasPrefix(ns, "statefulset-") ||
// strings.HasPrefix(ns, "services-")
}
func addRoleToE2EServiceAccounts(rbacClient rbacv1client.RbacV1Interface, namespaces []kapiv1.Namespace, roleName string) {
err := retry.RetryOnConflict(longRetry, func() error {
for _, ns := range namespaces {
if isE2ENamespace(ns.Name) && ns.Status.Phase != kapiv1.NamespaceTerminating {
_, err := rbacClient.RoleBindings(ns.Name).Create(&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{GenerateName: "default-" + roleName, Namespace: ns.Name},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: roleName,
},
Subjects: []rbacv1.Subject{
{Name: "default", Namespace: ns.Name, Kind: rbacv1.ServiceAccountKind},
},
})
if err != nil {
e2e.Logf("Warning: Failed to add role to e2e service account: %v", err)
}
}
}
return nil
})
if err != nil {
FatalErr(err)
}
}
|
[
"\"DELETE_NAMESPACE\"",
"\"TEST_REPORT_FILE_NAME\"",
"\"TEST_OUTPUT_QUIET\"",
"\"TEST_REPORT_DIR\"",
"\"ARTIFACT_DIR\""
] |
[] |
[
"DELETE_NAMESPACE",
"TEST_REPORT_DIR",
"TEST_REPORT_FILE_NAME",
"TEST_OUTPUT_QUIET",
"ARTIFACT_DIR"
] |
[]
|
["DELETE_NAMESPACE", "TEST_REPORT_DIR", "TEST_REPORT_FILE_NAME", "TEST_OUTPUT_QUIET", "ARTIFACT_DIR"]
|
go
| 5 | 0 | |
passKeeper/wsgi.py
|
"""
WSGI config for passKeeper project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'passKeeper.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
python/paddle/fluid/incubate/fleet/parameter_server/ir/trainer_pass.py
|
# -*- coding: UTF-8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import six
import collections
import warnings
import math
from functools import reduce
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
import paddle.compat as cpt
from paddle.fluid.transpiler.details.program_utils import delete_ops
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_optimize_ops
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_lr_ops
from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames
from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode
OP_NAME_SCOPE = "op_namescope"
CLIP_OP_NAME_SCOPE = "gradient_clip"
STEP_COUNTER = "@PS_STEP_COUNTER@"
OP_ROLE_VAR_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
RPC_OP_ROLE_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleAttrName()
RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC
LR_SCHED_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.LRSched
OPT_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.Optimize
op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
SPARSE_OP_TYPE_DICT = {"lookup_table": "W", "lookup_table_v2": "W"}
SPARSE_GRAD_OP_TYPE_DICT = {
"lookup_table_grad": "W",
"lookup_table_v2_grad": "W"
}
DEVICE_LIST = ["cpu", "gpu", "xpu"]
COMMUNICATE_OPS_TYPE = ["send", "recv", "fetch_barrier", "send_barrier"]
DEFAULT_DEVICE = 'cpu'
def delete_optimizer_pass(program, config):
def _delete_optimizer_op_and_vars(_program, optimize_ops):
optimize_vars = []
optimize_op_role_vars = []
optimize_need_delete_vars = []
for op in optimize_ops:
optimize_vars.extend(op.input_arg_names)
optimize_op_role_vars.extend(op.attr("op_role_var"))
optimize_vars = list(set(optimize_vars))
optimize_op_role_vars = list(set(optimize_op_role_vars))
for var in optimize_vars:
if var not in optimize_op_role_vars:
optimize_need_delete_vars.append(var)
need_delete_optimize_vars = list(set(optimize_need_delete_vars))
delete_ops(_program.global_block(), optimize_ops)
for var in need_delete_optimize_vars:
if _program.global_block().has_var(var):
_program.global_block()._remove_var(var)
def _add_lr_var(main_program, compiled_config):
# Todo: hard code for pe
lr_var = compiled_config.origin_main_program.global_block().vars[
"learning_rate_0"]
main_program.global_block().create_var(
name=lr_var.name,
shape=lr_var.shape,
dtype=lr_var.dtype,
type=lr_var.type,
lod_level=lr_var.lod_level,
persistable=True)
optimizer_ops = _get_optimize_ops(program)
lr_ops = _get_lr_ops(program)
optimizer_ops.extend(lr_ops)
_delete_optimizer_op_and_vars(program, optimizer_ops)
if hasattr(config.origin_main_program, 'lr_sheduler'):
_add_lr_var(program, config)
return program
def distributed_ops_pass(program, config, use_ps_gpu=False):
trainer_id = config.get_role_id()
send_ctx = config.get_the_one_send_context(
split_dense_table=config.is_heter_ps_mode)
w_2_table_id = {}
emb_size = {}
def _get_pull_sparse_ops(_program):
pull_sparse_ops = {}
pull_sparse_ids = {}
push_sparse_ops = {}
ops = {}
for op in _program.global_block().ops:
if op.type in SPARSE_OP_TYPE_DICT.keys() \
and op.attr('remote_prefetch') is True:
param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0]
if config.is_heter_ps_mode:
# trick for matchnet, need to modify
param_name += op.input("Ids")[0][0]
ops = pull_sparse_ops.get(param_name, [])
ops.append(op)
pull_sparse_ops[param_name] = ops
ids = pull_sparse_ids.get(param_name, [])
ids.append(op.input("Ids")[0])
pull_sparse_ids[param_name] = ids
for op in _program.global_block().ops:
if op.type in SPARSE_GRAD_OP_TYPE_DICT.keys():
param_name = op.input(SPARSE_GRAD_OP_TYPE_DICT[op.type])[0]
if param_name in pull_sparse_ids and op.input("Ids")[
0] in pull_sparse_ids[param_name]:
ops = push_sparse_ops.get(param_name, [])
ops.append(op)
push_sparse_ops[param_name] = ops
return pull_sparse_ops, push_sparse_ops
def _pull_sparse_fuse(_program, pull_sparse_ops, use_ps_gpu):
def dag_check_up_and_reorder(program, inputs, outputs):
global_block = program.global_block()
min_output_index = len(global_block.ops)
max_input_index = -1
input_indexes = [0] * len(global_block.ops)
output_indexes = [0] * len(global_block.ops)
for idx, op in enumerate(global_block.ops):
for i in range(0, len(op.output_names)):
if input_indexes[idx] == 1:
break
outs = op.output(op.output_names[i])
for in_id, in_var in enumerate(inputs):
if in_var.name in outs:
input_indexes[idx] = 1
max_input_index = max(max_input_index, idx)
break
for i in range(0, len(op.input_names)):
if output_indexes[idx] == 1:
break
ins = op.input(op.input_names[i])
for out_id, out_var in enumerate(outputs):
if out_var.name in ins:
output_indexes[idx] = 1
min_output_index = min(min_output_index, idx)
for i in range(len(global_block.ops)):
if input_indexes[i] == 1 and output_indexes[i] == 1:
warnings.warn(
"unable to re-arrange dags order to combine distributed embedding ops because a op both needs embedding table's output as input and produces ids as the same embedding table's input"
)
return
if min_output_index < max_input_index:
move_ops = []
for i in range(min_output_index + 1, len(input_indexes)):
if input_indexes[i] == 1:
move_ops.append((global_block.ops[i], i))
for i, op in enumerate(move_ops):
queue = list()
visited = set()
queue.append(op[1])
visited.add(op[0])
start = 0
while start < len(queue):
pos = queue[start]
op = global_block.ops[pos]
op_inputs = []
for k in range(0, len(op.input_names)):
ins = op.input(op.input_names[k])
op_inputs.append(ins)
for j in range(pos - 1, min_output_index - 1, -1):
op1 = global_block.ops[j]
if op1 in visited:
continue
found = False
for k in range(0, len(op1.output_names)):
outs = op1.output(op1.output_names[k])
for t in range(len(op_inputs)):
for y in op_inputs[t]:
if y in outs:
found = True
break
if found:
break
if found:
break
if found:
if output_indexes[j] == True:
warnings.warn(
"unable to re-arrange dags order to combine distributed embedding ops"
)
return
queue.append(j)
visited.add(global_block.ops[j])
start = start + 1
queue.sort()
for index in queue:
desc = global_block.desc._insert_op(min_output_index)
desc.copy_from(global_block.ops[index].desc)
global_block.desc._remove_op(index + 1, index + 2)
global_block.ops[index].desc = desc
insert_op = global_block.ops.pop(index)
input_state = input_indexes.pop(index)
output_state = output_indexes.pop(index)
global_block.ops.insert(min_output_index, insert_op)
input_indexes.insert(min_output_index, input_state)
output_indexes.insert(min_output_index, output_state)
min_output_index = min_output_index + 1
assert global_block.desc.op_size() == len(global_block.ops)
for i in range(len(global_block.ops)):
assert global_block.desc.op(i) == global_block.ops[i].desc
for param, ops in pull_sparse_ops.items():
all_ops = program.global_block().ops
op_device = ""
if config.is_heter_ps_mode:
op_device = ops[0].attr("op_device")
inputs = [
program.global_block().vars[op.input("Ids")[0]] for op in ops
]
w = program.global_block().vars[ops[0].input("W")[0]]
emb_size[param] = w.shape[1]
grad_name = config.param_name_to_grad_name[w.name]
table_id = -1
for name, ctx in send_ctx.items():
if grad_name in ctx.origin_varnames():
table_id = ctx.table_id()
if table_id == -1:
raise ValueError(
"can not find suitable sparse table, please check")
w_2_table_id[param] = table_id
padding_idx = ops[0].attr("padding_idx")
is_distributed = ops[0].attr("is_distributed")
op_type = ops[0].type
outputs = [
program.global_block().vars[op.output("Out")[0]] for op in ops
]
dag_check_up_and_reorder(program, inputs, outputs)
op_idxs = [all_ops.index(op) for op in ops]
for idx in op_idxs[::-1]:
program.global_block()._remove_op(idx)
inputs_idxs = [-1] * len(inputs)
outputs_idxs = [len(program.global_block().ops) + 1] * len(outputs)
for idx, op in enumerate(program.global_block().ops):
for i in range(0, len(op.output_names)):
outs = op.output(op.output_names[i])
for in_id, in_var in enumerate(inputs):
if in_var.name in outs:
inputs_idxs[in_id] = max(idx, inputs_idxs[in_id])
for i in range(0, len(op.input_names)):
ins = op.input(op.input_names[i])
for out_id, out_var in enumerate(outputs):
if out_var.name in ins:
outputs_idxs[out_id] = min(idx,
outputs_idxs[out_id])
if min(outputs_idxs) - max(inputs_idxs) >= 1:
if max(inputs_idxs) == -1:
distributed_idx = min(op_idxs)
else:
distributed_idx = max(inputs_idxs) + 1
if use_ps_gpu:
program.global_block()._insert_op(
index=distributed_idx,
type="pull_box_sparse",
inputs={"Ids": inputs,
'W': w},
outputs={"Out": outputs},
attrs={
"size": w.shape[1],
"is_distributed": True,
"is_sparse": True
})
else:
program.global_block()._insert_op(
index=distributed_idx,
type="distributed_lookup_table",
inputs={"Ids": inputs,
'W': w},
outputs={"Outputs": outputs},
attrs={
"is_distributed": is_distributed,
"padding_idx": padding_idx,
"table_id": table_id,
"lookup_table_version": op_type,
"op_device": op_device
})
else:
for i in range(len(inputs_idxs)):
distributed_idx = op_idxs[i]
program.global_block()._insert_op(
index=distributed_idx,
type="distributed_lookup_table",
inputs={"Ids": [inputs[i]],
'W': w},
outputs={"Outputs": [outputs[i]]},
attrs={
"is_distributed": is_distributed,
"padding_idx": padding_idx,
"table_id": table_id,
"lookup_table_version": op_type,
"op_device": op_device
})
def _push_sparse_fuse(_program, push_sparse_ops, use_ps_gpu):
if use_ps_gpu:
# in ps_gpu_pass
return
if len(push_sparse_ops) == 0:
return
show = None
clk = None
use_entry = False
for param, ops in push_sparse_ops.items():
op_first = ops[0]
break
print(op_first)
if op_first.has_attr("entry"):
entry = op_first.attr("entry")
entry = entry.split(':')
if len(entry) == 3 and entry[0] == 'show_click_entry':
show_var_name = entry[1]
click_var_name = entry[2]
if show_var_name in program.global_block(
).vars and click_var_name in program.global_block().vars:
show = program.global_block().vars[show_var_name]
clk = program.global_block().vars[click_var_name]
use_entry = True
else:
warnings.warn(
'ShowClickEntry configured, but cannot find show/click var, will not use'
)
if not use_entry:
print('ShowClickEntry not configured, will not use')
show = program.global_block().create_var(
name="show",
dtype=core.VarDesc.VarType.INT64,
persistable=False,
stop_gradient=True)
program.global_block()._insert_op(
index=0,
type='fill_constant',
inputs={},
outputs={'Out': show},
attrs={
'shape': [1],
'dtype': show.dtype,
'value': 1,
#OP_ROLE_KEY: OpRole.Forward
})
clk = program.global_block().create_var(
name="clk",
dtype=core.VarDesc.VarType.INT64,
persistable=False,
stop_gradient=True)
program.global_block()._insert_op(
index=0,
type='fill_constant',
inputs={},
outputs={'Out': clk},
attrs={
'shape': [1],
'dtype': clk.dtype,
'value': 0,
#OP_ROLE_KEY: OpRole.Forward
})
for param, ops in push_sparse_ops.items():
all_ops = program.global_block().ops
op_idxs = [all_ops.index(op) for op in ops]
inputs = [
program.global_block().vars[op.input("Ids")[0]] for op in ops
]
w = program.global_block().vars[ops[0].output("W@GRAD")[0]]
table_id = w_2_table_id[param]
padding_idx = ops[0].attr("padding_idx")
is_distributed = ops[0].attr("is_distributed")
op_type = ops[0].type
outputs = [
program.global_block().vars[op.input("Out@GRAD")[0]]
for op in ops
]
for idx in op_idxs[::-1]:
program.global_block()._remove_op(idx)
# if use_ps_gpu:
# program.global_block().append_op(
# type="push_box_sparse",
# inputs={"Ids": inputs,
# 'Out': outputs},
# outputs={"Out": outputs},
# attrs={
# "size": w.shape[1],
# "is_distributed": True,
# "is_sparse": True
# })
# else:
program.global_block().append_op(
type="distributed_push_sparse",
inputs={
"Ids": inputs,
'W': w,
"Outputs": outputs,
"Shows": show,
"Clicks": clk
},
outputs={"Outputs": outputs},
attrs={
"is_distributed": is_distributed,
"padding_idx": padding_idx,
"table_id": table_id,
"size": emb_size[param]
})
pull_sparse_ops, push_sparse_ops = _get_pull_sparse_ops(program)
_pull_sparse_fuse(program, pull_sparse_ops, use_ps_gpu)
_push_sparse_fuse(program, push_sparse_ops, use_ps_gpu)
return program
def append_send_ops_pass(program, config):
mode = config.get_distributed_mode()
trainer_id = config.get_role_id()
def _append_send_op(union_vars, queue, is_sparse, table_id):
if queue == STEP_COUNTER:
send_input_vars = []
else:
send_input_vars = [
program.global_block().vars[union_var]
for union_var in union_vars
]
dummy_output = []
if mode in [DistributedMode.SYNC, DistributedMode.HALF_ASYNC]:
dummy_output = program.global_block().create_var(
name=framework.generate_control_dev_var_name())
program.global_block().append_op(
type="send",
inputs={"X": send_input_vars},
outputs={"Out": dummy_output},
attrs={
"send_varnames": [queue],
"is_sparse": is_sparse,
"table_id": table_id,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
return dummy_output
def _append_barrier_op(dummys):
program.global_block().append_op(
type="send_barrier",
inputs={"X": dummys},
outputs={"Out": []},
attrs={
"trainer_id": trainer_id,
"half_async": True,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
dummys = []
sends = config.get_the_one_trainer_send_context(
split_dense_table=config.is_heter_ps_mode)
for merged_name, send in sends.items():
if send.is_sparse():
continue
is_sparse = 1 if send.is_sparse() else 0
is_sparse = 2 if send.is_distributed() else is_sparse
dummys.append(
_append_send_op(send.origin_varnames(), merged_name, is_sparse,
send.table_id()))
if mode in [DistributedMode.SYNC, DistributedMode.HALF_ASYNC]:
_append_barrier_op(dummys)
return program
def init_from_server_pass(program, config):
# 0' trainer do not need barrier, it will call barrier at the end init_worker
if config.role_maker._is_first_worker():
return program
fetch_barrier_out = program.global_block().create_var(
name=framework.generate_control_dev_var_name())
program.global_block().append_op(
type="fetch_barrier",
inputs={},
outputs={"Out": fetch_barrier_out},
attrs={
"endpoints": config.get_ps_endpoints(),
"trainer_id": config.get_role_id(),
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
return program
def fake_init_ops_pass(program, config):
origin_program = config.get_origin_main_program()
def _get_sparse_table_names():
dist_varnames = get_sparse_tablenames(origin_program, True)
sparse_varnames = get_sparse_tablenames(origin_program, False)
return list(set(dist_varnames + sparse_varnames))
def _fake_init_sparsetable(sparse_table_names):
# delete table init op
for table_name in sparse_table_names:
table_var = program.global_block().vars[table_name]
table_param_init_op = []
for op in program.global_block().ops:
if table_name in op.output_arg_names:
table_param_init_op.append(op)
init_op_num = len(table_param_init_op)
if init_op_num != 1:
raise ValueError("table init op num should be 1, now is " + str(
init_op_num))
table_init_op = table_param_init_op[0]
program.global_block().append_op(
type="fake_init",
inputs={},
outputs={"Out": table_var},
attrs={"shape": table_init_op.attr('shape')})
delete_ops(program.global_block(), table_param_init_op)
sparse_tables = _get_sparse_table_names()
_fake_init_sparsetable(sparse_tables)
return program
def ps_gpu_pass(program):
def _add_push_box_sparse_op(program):
op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
backward = core.op_proto_and_checker_maker.OpRole.Backward
for op in program.global_block().ops:
if op.type != "pull_box_sparse":
continue
grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
op.desc, cpt.to_text(set()), [])
for op_desc in grad_op_desc:
new_op_desc = program.global_block().desc.append_op()
new_op_desc.copy_from(op_desc)
new_op_desc._set_attr(op_role_attr_name, backward)
def _remove_lookup_table_grad_op_and_var(program):
lookup_table_grad_var = {}
remove_op_index = []
remove_var = []
for idx, op in list(enumerate(program.global_block().ops)):
if op.type == "lookup_table_grad":
for name in op.output("W@GRAD"):
lookup_table_grad_var[name] = 1
remove_op_index.append(idx)
remove_var.append(name)
for name in op.input("W"):
lookup_table_grad_var[name] = 1
for idx, op in list(enumerate(program.global_block().ops)):
if op.type == "pull_box_sparse":
continue
for key_name in op.input_names:
for var in op.input(key_name):
if var in lookup_table_grad_var:
remove_op_index.append(idx)
break
remove_op_index = list(set(remove_op_index))
remove_op_index.sort(reverse=True)
for idx in remove_op_index:
program.global_block()._remove_op(idx)
for name in remove_var:
program.global_block()._remove_var(name)
def _remove_optimizer_var(program):
embedding_w = {}
for idx, op in list(enumerate(program.global_block().ops)):
if op.type == "lookup_table_grad":
for name in op.input("W"):
embedding_w[name] = 1
optimize_vars = []
optimize_op_role_vars = []
optimize_need_delete_vars = []
for op in _get_optimize_ops(program):
for name in op.input("Param"):
if name in embedding_w:
optimize_op_role_vars.extend(op.attr("op_role_var"))
for key_name in op.input_names:
if key_name == "LearningRate":
continue
for var in op.input(key_name):
optimize_vars.append(var)
optimize_vars = list(set(optimize_vars))
optimize_op_role_vars = list(set(optimize_op_role_vars))
for var in optimize_vars:
if var not in optimize_op_role_vars:
optimize_need_delete_vars.append(var)
need_delete_optimize_vars = list(set(optimize_need_delete_vars))
for name in need_delete_optimize_vars:
if program.global_block().has_var(name):
program.global_block()._remove_var(name)
_add_push_box_sparse_op(program)
_remove_optimizer_var(program)
_remove_lookup_table_grad_op_and_var(program)
return program
def delete_extra_optimizes_pass(program, config):
optimize_vars = []
optimize_op_role_vars = []
optimize_need_delete_vars = []
origin_program = config.get_origin_main_program()
for op in _get_optimize_ops(origin_program):
optimize_vars.extend(op.input_arg_names)
optimize_op_role_vars.extend(op.attr("op_role_var"))
optimize_vars = list(set(optimize_vars))
optimize_op_role_vars = list(set(optimize_op_role_vars))
for var in optimize_vars:
if var not in optimize_op_role_vars:
optimize_need_delete_vars.append(var)
need_delete_optimize_vars = list(set(optimize_need_delete_vars))
init_ops = []
for var in need_delete_optimize_vars:
param_init_op = []
for op in program.global_block().ops:
if var in op.output_arg_names:
param_init_op.append(op)
init_ops.extend(param_init_op)
delete_ops(program.global_block(), init_ops)
for var in need_delete_optimize_vars:
if program.global_block().has_var(var):
program.global_block()._remove_var(var)
return program
def find_heter_ops(program, default_device="cpu"):
if default_device not in DEVICE_LIST:
raise ValueError("Given device {} is not in device list {}".format(
default_device, DEVICE_LIST))
def _is_heter_op(op, current_heter_device, default_device="cpu"):
heter_devices = list(DEVICE_LIST)
heter_devices.remove(default_device)
op_device = op.attr("op_device")
op_type = op.type
if op_device in heter_devices:
return True
elif op_type in COMMUNICATE_OPS_TYPE and current_heter_device != default_device:
# for distributed communciate ops: send & recv & barrier etc.
# Todo: need update this method
#op._set_attr('op_device', current_heter_device)
return True
elif op_device == None or op_device == default_device:
op._set_attr('op_device', default_device)
return False
return False
def _is_same_device(op, pre_device, default_device="cpu"):
op_device = op.attr("op_device")
if op_device == pre_device:
return True
if pre_device == default_device:
return True
return False
def _append_heter_op(op, current_heter_block_ops, heter_ops):
op_device = op.attr("op_device")
if op_device not in heter_ops:
heter_ops[op_device] = {}
current_heter_block_ops.append(op)
origin_porgram = program.clone()
block = program.global_block()
'''
re-place sum op to fix bug for union forward backward op
'''
var2idx = {}
op_list = list(block.ops)
op_size = len(op_list)
for i in range(op_size - 1, -1, -1):
op_list = list(block.ops)
op = op_list[i]
if "_grad" in op.type:
forward_op_type = op.type.split("_grad")[0]
if forward_op_type in SPARSE_OP_TYPE_DICT.keys() \
and op.attr('remote_prefetch') is True:
param_name = op.input(SPARSE_OP_TYPE_DICT[forward_op_type])[0]
if param_name in var2idx:
## insert sum op & remove sum op from var2idx and origin place
op_list = list(block.ops)
sum_op = op_list[var2idx[param_name]]
sum_op_inputs = {
sum_op.input_names[0]: [
block.vars[input]
for input in sum_op.input_arg_names
]
}
sum_op_outputs = {
sum_op.output_names[0]: [
block.vars[output]
for output in sum_op.output_arg_names
]
}
block._insert_op(
index=i + 1,
type=sum_op.type,
inputs=sum_op_inputs,
outputs=sum_op_outputs,
attrs=sum_op.all_attrs())
block._remove_op(var2idx[param_name] + 1)
var2idx.pop(param_name)
for var_ in var2idx:
var2idx[var_] += 1
elif forward_op_type == "elementwise_mul":
"""
get output varname of pre op
"""
output_vars_no_grad = []
for key in op.output_names:
for varname in op.output(key):
if varname == "@EMPTY@":
continue
if "lod_tensor_blocking_queue" in varname:
continue
output_vars_no_grad.append(varname.split("@GRAD")[0])
for no_grad_var in output_vars_no_grad:
if no_grad_var in var2idx:
"""
insert sum op & remove sum op from var2idx and origin place
"""
op_list = list(block.ops)
sum_op = op_list[var2idx[no_grad_var]]
sum_op_inputs = {
sum_op.input_names[0]: [
block.vars[input]
for input in sum_op.input_arg_names
]
}
sum_op_outputs = {
sum_op.output_names[0]: [
block.vars[output]
for output in sum_op.output_arg_names
]
}
block._insert_op(
index=i + 1,
type=sum_op.type,
inputs=sum_op_inputs,
outputs=sum_op_outputs,
attrs=sum_op.all_attrs())
block._remove_op(var2idx[no_grad_var] + 1)
var2idx.pop(no_grad_var)
for var_ in var2idx:
var2idx[var_] += 1
else:
if op.type == "sum":
var = op.output("Out")[0]
if "@GRAD" in var:
origin_var = var.split("@GRAD")[0]
pre_op = op_list[i - 1]
if "_grad" in pre_op.type:
forward_op_type = pre_op.type.split("_grad")[0]
if forward_op_type in SPARSE_OP_TYPE_DICT.keys() \
and pre_op.attr('remote_prefetch') is True:
param_name = pre_op.input(SPARSE_OP_TYPE_DICT[
forward_op_type])[0]
if param_name == origin_var and op.attr(
"op_device") == pre_op.attr("op_device"):
continue
else:
var2idx[origin_var] = i
elif forward_op_type == "elementwise_mul":
output_vars = []
for key in pre_op.output_names:
for varname in pre_op.output(key):
if varname == "@EMPTY@":
continue
if "lod_tensor_blocking_queue" in varname:
continue
output_vars.append(varname)
input_vars = []
for key in op.input_names:
for varname in op.input(key):
if varname == "@EMPTY@":
continue
if "lod_tensor_blocking_queue" in varname:
continue
input_vars.append(varname)
is_match = False
for varname in output_vars:
if varname in input_vars:
is_match = True
break
if is_match:
continue
else:
var2idx[origin_var] = i
else:
var2idx[origin_var] = i
origin_porgram = program.clone()
block = program.global_block()
program_block_ops = []
default_ops = {default_device: {}}
heter_ops = {}
block_index = 0
current_heter_block_ops = []
current_default_block_ops = []
current_heter_device = default_device
is_heter = False
for op in block.ops:
if _is_heter_op(op, current_heter_device, default_device):
# for gpu/xpu-op
is_heter = True
# for cpu-op block append
if len(current_default_block_ops) > 1:
default_ops[default_device][
block_index] = current_default_block_ops
program_block_ops.append(current_default_block_ops)
current_default_block_ops = []
block_index += 1
if _is_same_device(op, current_heter_device, default_device):
# for gpu-op, gpu-op -> gpu-op,...
current_heter_device = op.attr("op_device")
_append_heter_op(op, current_heter_block_ops, heter_ops)
else:
# for gpu-op -> xpu-op, ...
op_device = current_heter_block_ops[0].attr("op_device")
heter_ops[op_device][block_index] = current_heter_block_ops
program_block_ops.append(current_heter_block_ops)
block_index += 1
current_heter_block_ops = []
current_heter_device = op.attr("op_device")
_append_heter_op(op, current_heter_block_ops, heter_ops)
elif is_heter:
# for gpu/xpu-op -> cpu-op
op_device = current_heter_block_ops[0].attr("op_device")
heter_ops[op_device][block_index] = current_heter_block_ops
program_block_ops.append(current_heter_block_ops)
block_index += 1
current_heter_block_ops = []
current_heter_device = default_device
is_heter = False
current_default_block_ops.append(op)
else:
# for cpu-op
current_default_block_ops.append(op)
if current_default_block_ops != []:
default_ops[default_device][block_index] = current_default_block_ops
program_block_ops.append(current_default_block_ops)
if current_heter_block_ops != []:
op_device = current_heter_block_ops[0].attr("op_device")
heter_ops[op_device][block_index] = current_heter_block_ops
program_block_ops.append(current_heter_block_ops)
if len(heter_ops) == 0:
warnings.warn(
"No heterogeneous OP was found in your program , "
" please using fluid.device_guard() to run OPs on different device.")
total_heter_ops = 0
heter_blocks = 0
for device in heter_ops.keys():
heter_block_dict = heter_ops[device]
heter_blocks += len(heter_block_dict)
for _, heter_block in heter_block_dict.items():
total_heter_ops += len(heter_block)
print(
"There are {} OPs in your main_program, and contains {} heter-OPs which is made up of {} heter-blocks.".
format(len(block.ops), total_heter_ops, heter_blocks))
return origin_porgram, heter_ops, default_ops, program_block_ops
def create_heter_program(program, config, heter_program, program_block_ops_list,
heter_ops, block_var_detail, current_device, stage_id):
# This function mainly includes the following contents:
# 1. For every heter block:
# a) copy heter device op from origin program
# b) create variables which belong to heter op:
# -> if variable is persistable, clone it in global_scope
# -> if variable is temp, create it in heter block
# c) create communicate related op as follow:
# joint_var.0_1 -> slice -> reshape -> origin_var
# origin_var -> origin_program
# reshape -> concat -> joint_var.1_2
# d) copy send op from origin program for var@grad which loacted in current heter block
# e) re-check every op in current blcok if its device is not current heter devie
# 2. Create send op for step counter in last heter-block
# 3. Create Listen&Serv OP and Send&Recv OP for distributed training
# 4. update CompileTimeStrategy for heter_program
optimizer_block = []
grad_to_block_id = []
send_grad_var_list = []
pre_block_idx = heter_program.num_blocks - 1
stage_id = int(stage_id)
print("stage id", stage_id)
heter_block_ops_forward = program_block_ops_list[stage_id - 1]["forward"]
heter_block_ops_backward = program_block_ops_list[stage_id - 1]["backward"]
heter_block = heter_program._create_block(pre_block_idx)
optimizer_block.append(heter_block)
for _, op in enumerate(heter_block_ops_forward):
block_append_op(heter_program, program, heter_block, op)
entrance_vars = block_var_detail[stage_id - 1]["forward"]["entrance"]
add_vars_by_var_list(entrance_vars, program, heter_program, heter_block)
exit_vars = block_var_detail[stage_id - 1]["forward"]["exit"]
add_vars_by_var_list(exit_vars, program, heter_program, heter_block)
first_op_index_fp = len(heter_block.ops)
if stage_id < len(program_block_ops_list):
heter_block_bp = heter_program._create_block(pre_block_idx)
optimizer_block.append(heter_block_bp)
for _, op in enumerate(heter_block_ops_backward):
block_append_op(heter_program, program, heter_block_bp, op)
bp_entrance_vars = block_var_detail[stage_id - 1]["backward"][
"entrance"]
add_vars_by_var_list(bp_entrance_vars, program, heter_program,
heter_block_bp)
bp_exit_vars = block_var_detail[stage_id - 1]["backward"]["exit"]
add_vars_by_var_list(bp_exit_vars, program, heter_program,
heter_block_bp)
backward_comm_info = get_communicate_var_info(
program, stage_id, bp_entrance_vars, type="backward")
grad_to_block_id.append(backward_comm_info["block_input_var_name"] + ":"
+ str(heter_block_bp.idx))
else:
for _, op in enumerate(heter_block_ops_backward):
block_append_op(heter_program, program, heter_block, op)
bp_entrance_vars = block_var_detail[stage_id - 1]["backward"][
"entrance"]
add_vars_by_var_list(bp_entrance_vars, program, heter_program,
heter_block)
bp_exit_vars = block_var_detail[stage_id - 1]["backward"]["exit"]
add_vars_by_var_list(bp_exit_vars, program, heter_program, heter_block)
heter_block_bp = heter_block
forward_comm_info = get_communicate_var_info(
program, stage_id, entrance_vars, type="forward")
grad_to_block_id.append(forward_comm_info["block_input_var_name"] + ":" +
str(heter_block.idx))
first_op_index_bp = len(heter_block_bp.ops)
if stage_id <= len(block_var_detail) - 1:
static_var = insert_communicate_op(program, config, heter_block,
stage_id, first_op_index_fp,
block_var_detail, current_device)
static_var_bp = insert_communicate_op(
program, config, heter_block_bp, stage_id, first_op_index_bp,
block_var_detail, current_device, False)
# add send op
send_grad_var_list = add_heter_send_op(
program, heter_program, heter_block_bp, block_var_detail[stage_id - 1])
# ---------------
# add step conter
send_input_vars = []
dummy_output = []
pserver_endpoints = config.get_ps_endpoints()
# optimizer_block[-1].append_op(
# type="send",
# inputs={"X": send_input_vars},
# outputs={"Out": dummy_output},
# attrs={
# "send_varnames": [STEP_COUNTER],
# "merge_add": True,
# "use_send_handler": False,
# "endpoints": pserver_endpoints
# })
# add info in listen&serv
attrs = {
#"mode": "sync",
#"trainers": config.get_trainers(),
#"trainer_id": config.get_role_id() + config.get_trainers(),
"message_to_block_id": grad_to_block_id,
"optimize_blocks": optimizer_block,
# runtime attribute
"endpoint": config.get_heter_worker_endpoint(),
"fanin": len(config.get_previous_stage_trainers()),
"pserver_id": config.get_role_id(),
"distributed_mode": config.get_distributed_mode(),
"rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)),
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
}
# append the listen_and_serv op
heter_program.global_block().append_op(
type="heter_listen_and_serv", inputs={'X': []}, outputs={}, attrs=attrs)
check_heter_compile_time_strategy(program, config, send_grad_var_list)
def check_heter_compile_time_strategy(program, config, send_grad_var_list):
origin_grad_var_list = []
for _, var_grad in config.merged_variables_pairs:
origin_grad_var_list.append(var_grad.merged_var.name)
origin_grad_var_list = list(set(origin_grad_var_list))
send_grad_var_list = list(set(send_grad_var_list))
useless_grad_var_list = list(
set(origin_grad_var_list) - set(send_grad_var_list))
for useless_grad_var in useless_grad_var_list:
config.remove_var_pair_by_grad(useless_grad_var)
def create_trainer_program(program, origin_program, config,
program_block_ops_list, block_var_detail):
# This function mainly includes the following contents:
# 1. For every heter block in origin program
# a) delete heter op and related variables
# b) add send&recv op
# c) add communicate ops as follows:
# origin_var -> reshape -> concat -> joint_var.0_1
# send&recv op(send joint_var.0_1; recv joint_var.1_2)
# joint_var.1_2 -> slice -> reshape -> origin_var
# d) remove send op which related var@grad is not in trainer program
# 2. check every op's device
static_var = []
for heter_block_index in range(1, len(program_block_ops_list)):
ops_list = program_block_ops_list[heter_block_index][
"forward"] + program_block_ops_list[heter_block_index]["backward"]
static_var += replace_ops_by_communicate_op(
program, config, heter_block_index, ops_list, block_var_detail)
remove_trainer_send_op(program, config, heter_block_index,
block_var_detail)
optimizer_block = []
grad_to_block_id = []
bp_ops_list = program_block_ops_list[0]["backward"]
delete_same_ops(program.global_block(), bp_ops_list)
delete_trainer_useless_var(config, program, static_var)
backward_block = create_backward_block(program, origin_program, config,
bp_ops_list, block_var_detail)
bp_entrance_vars = block_var_detail[0]["backward"]["entrance"]
backward_comm_info = get_communicate_var_info(
origin_program, 1, bp_entrance_vars, type="backward")
grad_to_block_id.append(backward_comm_info["block_input_var_name"] + ":" +
str(backward_block.idx))
optimizer_block.append(backward_block)
attrs = {
#"mode": "sync",
#"trainers": config.get_trainers(),
#"trainer_id": config.get_role_id(),
"message_to_block_id": grad_to_block_id,
"optimize_blocks": optimizer_block,
# runtime attribute
"endpoint": config.get_trainer_endpoint(), ## get trainer endpoint
"fanin": 0, ## get heter worker
"pserver_id": config.get_role_id(),
"distributed_mode": config.get_distributed_mode(),
"rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)),
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
}
# append the listen_and_serv op
program.global_block()._insert_op(
index=0,
type="heter_listen_and_serv",
inputs={'X': []},
outputs={},
attrs=attrs)
## TODO add check for bp block
check_op_device(program.global_block(), DEFAULT_DEVICE)
def insert_communicate_op(orign_program,
config,
heter_block,
stage_id,
first_op_index,
block_var_detail,
device,
is_forward=True):
if is_forward:
next_heter_worker_endpoints = config.get_next_stage_trainers()
previous_heter_worker_endpoints = config.get_previous_stage_trainers()
entrance_var = block_var_detail[stage_id]["forward"]["entrance"]
comm_info = get_communicate_var_info(orign_program, stage_id + 1,
entrance_var)
else:
next_heter_worker_endpoints = config.get_next_stage_trainers()
#if next_heter_worker_endpoints == "":
# next_heter_worker_endpoints = []
previous_heter_worker_endpoints = config.get_previous_stage_trainers()
entrance_var = block_var_detail[stage_id - 1]["backward"]["exit"]
comm_info = get_communicate_var_info(orign_program, stage_id - 1,
entrance_var, "backward")
heter_block._insert_op(
index=first_op_index,
type="send_and_recv",
inputs={"X": heter_block.vars[entrance_var[0]]},
outputs={"Out": []},
attrs={
"mode": "forward" if is_forward else "backward",
"send_var_name": entrance_var + ["microbatch_id"],
"recv_var_name": [],
"message_name": comm_info["block_input_var_name"],
"next_endpoints": next_heter_worker_endpoints,
"previous_endpoints": previous_heter_worker_endpoints,
"trainer_id": config.get_role_id(),
"op_device": device,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
return entrance_var
def create_backward_block(program, origin_program, config, bp_ops_list,
block_var_detail):
pre_block_idx = program.num_blocks - 1
heter_block = program._create_block(pre_block_idx)
for _, op in enumerate(bp_ops_list):
if op.type == "send":
send_varnames = op.attr('send_varnames')
is_skip = False
for varname in send_varnames:
if varname not in program.global_block(
).vars and varname not in heter_block.vars:
is_skip = True
break
if is_skip == True:
continue
block_append_op(program, origin_program, heter_block, op)
entrance_vars = block_var_detail[0]["backward"]["entrance"]
add_vars_by_var_list(entrance_vars, origin_program, program, heter_block)
exit_vars = block_var_detail[0]["backward"]["exit"]
add_vars_by_var_list(exit_vars, origin_program, program, heter_block)
return heter_block
def replace_ops_by_communicate_op(program, config, heter_block_index, ops_list,
block_var_detail):
all_op = program.global_block().ops
start_op = ops_list[0]
first_op_idx = -1
for op in all_op:
if is_same_op(op, start_op):
first_op_idx = all_op.index(op)
break
assert first_op_idx != -1
delete_same_ops(program.global_block(), ops_list)
entrance_var = []
if heter_block_index == 1:
mode = config.get_distributed_mode()
next_heter_worker_endpoints = config.get_next_stage_trainers()
entrance_var = block_var_detail[heter_block_index]["forward"][
"entrance"]
comm_info = get_communicate_var_info(program, heter_block_index + 1,
entrance_var)
program.global_block()._insert_op(
index=first_op_idx,
type="send_and_recv",
inputs={"X": program.global_block().vars[entrance_var[0]]},
outputs={"Out": []},
attrs={
"mode": "forward",
"send_var_name": entrance_var + ["microbatch_id"],
"recv_var_name": [],
"message_name": comm_info["block_input_var_name"],
"next_endpoints": next_heter_worker_endpoints,
"previous_endpoints": [],
"trainer_id": config.get_role_id(),
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
return entrance_var
def remove_trainer_send_op(program, config, heter_block_index,
block_var_detail):
# if trainer do FF->BP->SEND, it has follow vars: var, var@GRAD
# if trainer only do SEND, it has one var: var@GRAD
# Delete Send op ,if trainer doesn't has pair var (var<->var@GRAD)
persistables = block_var_detail[heter_block_index]["forward"]["persistables"] + \
block_var_detail[heter_block_index]["backward"]["persistables"]
need_remove_send_op = []
need_remove_grad_var = []
for op in find_send_op(program):
input_list, _ = find_op_input_output(program,
program.global_block(), op)
for var_name in input_list:
origin_var_name = var_name.split("@GRAD")[0]
if origin_var_name in persistables:
need_remove_send_op.append(op)
need_remove_grad_var.append(var_name)
need_remove_send_op = list(set(need_remove_send_op))
delete_ops(program.global_block(), need_remove_send_op)
for grad_var_name in need_remove_grad_var:
config.remove_var_pair_by_grad(grad_var_name)
def add_heter_send_op(program, heter_program, block, block_var_detail):
def _get_send_op_dict():
send_op_dict = {}
send_op_list = find_send_op(program)
for op in send_op_list:
input_list, _ = find_op_input_output(program,
program.global_block(), op)
for var in input_list:
send_op_dict[var] = op
return send_op_dict
# send_Op = { inputs{'X':[]},
# outputs{'Out':dummy_output},
# attrs{'send_varnames'"[]",
# 'is_sparse':int,
# 'table_id':int } }
send_grad_var_list = []
send_op_dict = _get_send_op_dict()
table_dict = {}
for persistable_var in block_var_detail["backward"]["persistables"]:
# check var_name == var@GRAD
if "@GRAD" not in persistable_var:
continue
if "GRAD" != persistable_var.split("@")[-1]:
continue
if persistable_var not in send_op_dict:
continue
send_op = send_op_dict[persistable_var]
is_sparse = send_op.attr('is_sparse')
table_id = send_op.attr('table_id')
send_varnames = send_op.attr('send_varnames')
send_grad_var_list.append(persistable_var)
if table_id not in table_dict:
table_dict[table_id] = {}
table_dict[table_id]['var_list'] = []
table_dict[table_id]['is_sparse'] = is_sparse
table_dict[table_id]['send_varnames'] = send_varnames
table_dict[table_id]['var_list'].append(persistable_var)
for table_id in table_dict:
dummy_output = block.create_var(
name=framework.generate_control_dev_var_name())
send_input_vars = [
block.vars[union_var]
for union_var in table_dict[table_id]['var_list']
]
block.append_op(
type="send",
inputs={"X": send_input_vars},
outputs={"Out": dummy_output},
attrs={
"send_varnames": table_dict[table_id]['send_varnames'],
"is_sparse": is_sparse,
"table_id": table_id,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
return send_grad_var_list
def find_send_op(program):
send_op_list = []
for op in program.global_block().ops:
if op.type == "send":
send_op_list.append(op)
return send_op_list
def get_communicate_var_info(program,
block_index,
entrance_var_list,
type="forward"):
input_var_reshape_dim = []
input_var_reshape_name = []
if type == "forward":
block_input_var_name = "forward_joint_{}_{}@Heter".format(
block_index - 1, block_index)
else:
block_input_var_name = "backward_joint_{}_{}@Heter".format(
block_index + 1, block_index)
entrance_var_list.sort()
# input
# Heter_SERVER_BLOCK_index@JOINT_VAR -> slice -> var@Heter_SERVER_BLOCK@INPUT_RESHAPE_VAR -> reshape -> var
for name in entrance_var_list:
var = program.global_block().vars[name]
shape = var.shape
# if len(shape) < 2 or shape[0] != -1:
# raise ValueError(
# "Variable {} not support heter training. its shape is {}".
# format(name, shape))
recv_var_dim = -1 * reduce(lambda x, y: x * y, shape)
input_var_reshape_dim.append(recv_var_dim)
input_var_reshape_name.append("{}.input_reshape@Heter".format(name))
# output
# var -> reshape -> var@Heter_SERVER_BLOCK@INPUT_RESHAPE_VAR -> concat -> Heter_SERVER_BLOCK_index@JOINT_VAR
#for var_name in exit_var_list:
# var = program.global_block().vars[var_name]
# shape = var.shape
# # if len(shape) < 2 or shape[0] != -1:
# # raise ValueError(
# # "Variable {} not support heter training. its shape is {}".
# # format(var_name, shape))
# send_reshape_dim = -1 * reduce(lambda x, y: x * y, shape)
# output_var_reshape_dim.append(send_reshape_dim)
# output_var_reshape_name.append("{}.output_reshape@Heter".format(
# var_name))
info = {
"input_var_reshape_dim": input_var_reshape_dim,
"input_var_reshape_name": input_var_reshape_name,
"block_input_var_name": block_input_var_name,
# "output_var_reshape_dim": output_var_reshape_dim,
# "output_var_reshape_name": output_var_reshape_name,
# "block_output_var_name": block_output_var_name
}
return info
def union_forward_gradient_op(program_block_ops_list):
"""
before analyzing the input & output of each block in program_block_list, we should
union the forward op and corresponding gradient op to elimincate the uneccessary variable
transmit
"""
"""
fix for 2emb model, re-place sum op
"""
block_length = len(program_block_ops_list)
'''
## get the final part
final_part_idx = -1
for i in range(block_length):
op_list = program_block_ops_list[i]
for op in op_list:
if "_grad" in op.type:
final_part_idx = i
break
if final_part_idx != -1:
break
## eliminate wrong partition because of sum op
## lookup_table_v2_grad
## every looup_table_v2_grad op block should follow a sum op
var2idx = {}
for i in range(final_part_idx, block_length):
op_list = program_block_ops_list[i]
for j in range(len(op_list) - 1, -1, -1):
op = op_list[j]
#if op.type == "lookup_table_v2_grad":
# if j < len(op_list) - 1):
# else:
# ## get var and record place
if _grad in op.type:
forward_op_type = op.type.split("_grad")[0]
if forward_op_type in SPARSE_OP_TYPE_DICT.keys() \
and op.attr('remote_prefetch') is True:
param_name = op.input(SPARSE_OP_TYPE_DICT[forward_op_type])[0]
var2idx[] = [i,j] ##
'''
union_program_block_ops_list = []
assert block_length % 2 != 0, "the length of program_block_ops_list should be odd"
for i in range(0, block_length // 2):
block_op_list = {"forward": program_block_ops_list[i]}
block_op_list.update({
"backward": program_block_ops_list[block_length - 1 - i]
})
union_program_block_ops_list.append(block_op_list)
block_op_list = {"forward": [], "backward": []}
for op in program_block_ops_list[block_length // 2]:
if not "_grad" in op.type and not (op.type == "sum"):
block_op_list["forward"].append(op)
else:
block_op_list["backward"].append(op)
union_program_block_ops_list.append(block_op_list)
return union_program_block_ops_list
def find_block_joints(program, program_block_ops_list, heter_ops):
block_var_detail = find_entrance_exit_private(program,
program_block_ops_list)
block_var_detail = entrance_exit_check(program, program_block_ops_list,
block_var_detail, heter_ops)
block_var_detail = delete_block_useless_exit(
program, program_block_ops_list, block_var_detail)
return block_var_detail
def find_entrance_exit_private(program, program_block_ops_list):
block_var_detail = []
persistables = []
for index, block_op_list in enumerate(program_block_ops_list):
## forward
block_input, block_output = find_ops_list_input_output(
program, block_op_list["forward"])
persistables = screen_persistables(
program, block_input) + screen_persistables(program, block_output)
# find entrance & exit
block_private_vars = list(set(block_input) & set(block_output))
block_entrance = list(set(block_input) - set(block_private_vars))
block_exit = list(set(block_output) - set(block_private_vars))
detail = {
"forward": {
"entrance": block_entrance,
"exit": block_exit,
"private": block_private_vars,
"persistables": persistables
}
}
## backward
bp_block_input, bp_block_output = find_ops_list_input_output(
program, block_op_list["backward"])
bp_persistables = screen_persistables(
program, bp_block_input) + screen_persistables(program,
bp_block_output)
# find entrance & exit
bp_block_private_vars = list(set(bp_block_input) & set(bp_block_output))
bp_block_entrance = list(
set(bp_block_input) - set(bp_block_private_vars))
bp_block_exit = list(set(bp_block_output) - set(bp_block_private_vars))
detail.update({
"backward": {
"entrance": bp_block_entrance,
"exit": bp_block_exit,
"private": bp_block_private_vars,
"persistables": bp_persistables
}
})
block_var_detail.append(detail)
return block_var_detail
def entrance_exit_check(program, program_block_ops_list, block_var_detail,
heter_ops):
for index in range(len(block_var_detail) - 1, -1, -1):
if index - 1 < 0:
break
previous_block_exit = block_var_detail[index - 1]["forward"]["exit"]
previous_block_exit.sort()
current_block_entrance = block_var_detail[index]["forward"]["entrance"]
backward_entrance = block_var_detail[index]["backward"]["entrance"]
forward_all = block_var_detail[index]["forward"][
"entrance"] + block_var_detail[index]["forward"][
"private"] + block_var_detail[index]["forward"]["exit"]
for var in backward_entrance:
if not ("@GRAD" in var) and not (var in forward_all):
current_block_entrance.append(var)
current_block_entrance.sort()
if previous_block_exit == current_block_entrance:
continue
exist_vars = list(
set(previous_block_exit) & set(current_block_entrance))
need_add_vars = list(set(current_block_entrance) - set(exist_vars))
# var in different stage should not be ignored, since they are not placed in the same program & device
#need_add_vars = find_need_var_from_previous_block(
# need_add_vars, block_var_detail, index, heter_ops)
previous_block_private = block_var_detail[index - 1]["forward"][
"private"]
previous_block_entrance = block_var_detail[index - 1]["forward"][
"entrance"]
for var in need_add_vars:
if var not in previous_block_private and var not in previous_block_entrance:
previous_block_entrance.append(var)
previous_block_exit.append(var)
if not var in current_block_entrance:
current_block_entrance.append(var)
for index in range(0, len(block_var_detail) - 1, 1):
previous_block_exit = block_var_detail[index + 1]["backward"]["exit"]
previous_block_exit.sort()
current_block_entrance = block_var_detail[index]["backward"]["entrance"]
current_block_entrance.sort()
if previous_block_exit == current_block_entrance:
continue
exist_vars = list(
set(previous_block_exit) & set(current_block_entrance))
need_add_vars = list(set(current_block_entrance) - set(exist_vars))
need_ignore_vars = []
for var in need_add_vars:
if not "@GRAD" in var:
need_ignore_vars.append(var)
need_add_vars = list(
set(need_add_vars).difference(set(need_ignore_vars)))
previous_block_private = block_var_detail[index + 1]["backward"][
"private"]
previous_block_entrance = block_var_detail[index + 1]["backward"][
"entrance"]
for var in need_add_vars:
if var not in previous_block_private and var not in previous_block_entrance:
previous_block_entrance.append(var)
previous_block_exit.append(var)
return block_var_detail
def find_need_var_from_previous_block(need_add_vars, block_var_detail,
current_index, heter_ops):
# create index_device_map
index_device_map = {}
for index in range(len(block_var_detail)):
index_device_map[index] = DEFAULT_DEVICE
for device in heter_ops:
for index in heter_ops[device].keys():
if index < len(block_var_detail):
index_device_map[index] = device
pre_index = current_index - 1
need_ignore_var = []
# if need_add_var in current device, no need communicate
for var in need_add_vars:
while (pre_index >= 0):
previous_block_private = block_var_detail[pre_index]["private"]
previous_block_exit = block_var_detail[pre_index]["exit"]
previous_block_entrance = block_var_detail[pre_index]["entrance"]
total_var = previous_block_private + previous_block_exit + previous_block_entrance
if var in total_var:
if index_device_map[current_index] == index_device_map[
pre_index] and index_device_map[
current_index] == DEFAULT_DEVICE:
need_ignore_var.append(var)
break
pre_index -= 1
need_add_vars = list(set(need_add_vars).difference(set(need_ignore_var)))
return need_add_vars
def delete_block_useless_exit(program, program_block_ops_list,
block_var_detail):
## forward
for index in range(len(block_var_detail)):
if index == len(block_var_detail) - 1:
break
current_block_exit = block_var_detail[index]["forward"]["exit"]
next_block_entrance = block_var_detail[index + 1]["forward"]["entrance"]
need_delete_var = []
for var in current_block_exit:
if var not in next_block_entrance:
need_delete_var.append(var)
for var in need_delete_var:
current_block_exit.remove(var)
## backward
for index in range(len(block_var_detail) - 1, -1, -1):
if index - 1 < 0:
break
current_block_exit = block_var_detail[index]["backward"]["exit"]
next_block_entrance = block_var_detail[index - 1]["backward"][
"entrance"]
need_delete_var = []
for var in current_block_exit:
if var not in next_block_entrance:
need_delete_var.append(var)
for var in need_delete_var:
current_block_exit.remove(var)
return block_var_detail
def check_op_device(block, device):
for op in block.ops:
op._set_attr('op_device', device)
def screen_persistables(program, var_list):
need_remove = []
for var_name in var_list:
if "@GRAD" in var_name:
if "GRAD" != var_name.split("@")[-1]:
continue
origin_var_name = var_name.split("@GRAD")[0]
var = program.global_block().vars[origin_var_name]
else:
var = program.global_block().vars[var_name]
if fluid.io.is_persistable(var):
need_remove.append(var_name)
for var_name in need_remove:
var_list.remove(var_name)
return need_remove
def insert_reshape_op(program,
block,
index,
var_name,
new_var_name,
new_var_shape=None):
input_var = block.vars[var_name]
if new_var_name not in block.vars:
out = block.create_var(
name=new_var_name,
shape=new_var_shape,
dtype=input_var.dtype,
type=input_var.type)
else:
out = block.vars[new_var_name]
new_var_shape = out.shape
x_shape = block.create_var(
name="{}.xshape@Heter".format(var_name), dtype=input_var.dtype)
block._insert_op(
index=index,
type="reshape2",
inputs={"X": input_var},
attrs={'shape': new_var_shape},
outputs={"Out": out,
"XShape": x_shape})
def insert_send_concat_op(program, block, index, var_name_list, new_var_name,
new_var_shape):
input_var_list = [block.vars[var_name] for var_name in var_name_list]
out = program.global_block().create_var(
name=new_var_name,
shape=new_var_shape,
dtype=input_var_list[0].dtype,
type=input_var_list[0].type)
block._insert_op(
index=index,
type='concat',
inputs={"X": input_var_list},
outputs={'Out': [out]},
attrs={'axis': -1,
'use_stack': False})
def insert_recv_slice_op(program, block, index, var_name, var_shape, dtype,
type, new_var_name_list, new_var_shape_list):
if var_name not in program.global_block().vars:
input_var = program.global_block().create_var(
name=var_name, shape=var_shape, dtype=dtype, type=type)
else:
input_var = program.global_block().vars[var_name]
out_list = []
for i in range(len(new_var_name_list)):
if new_var_name_list[i] not in block.vars:
out = block.create_var(
name=new_var_name_list[i],
shape=new_var_shape_list[i],
dtype=input_var.dtype,
type=input_var.type)
else:
out = block.vars[new_var_name_list[i]]
out_list.append(out)
start_index = 0
end_index = 0
for i in range(len(new_var_name_list)):
starts = []
ends = []
attrs = {'axes': [1]}
end_index += new_var_shape_list[i][1]
starts.append(start_index)
ends.append(end_index)
attrs['starts'] = starts
attrs['ends'] = ends
block._insert_op(
index=index,
type='slice',
inputs={'Input': input_var},
attrs=attrs,
outputs={'Out': out_list[i]})
start_index = end_index
index += 1
def add_heter_trainer_useful_vars(config, program, heter_program, heter_block,
static_var):
static_var = list(set(static_var))
for var_name in static_var:
if var_name not in heter_program.global_block(
).vars and var_name not in heter_block.vars:
var = program.global_block().vars[var_name]
if var.persistable:
heter_program.global_block()._clone_variable(
var, force_persistable=False)
else:
heter_block._clone_variable(var, force_persistable=False)
def delete_trainer_useless_var(config, program, static_var):
static_var = list(set(static_var))
program_useful_var_list = []
for op in program.global_block().ops:
input_var_list, output_var_list = find_op_input_output(
program, program.global_block(), op)
op_var_list = list(set(input_var_list).union(set(output_var_list)))
program_useful_var_list = list(
set(program_useful_var_list).union(set(op_var_list)))
program_useful_var_list += static_var
program_useless_var_list = list(
set(get_vars_name_in_block(program.global_block())).difference(
set(program_useful_var_list)))
for var in program_useless_var_list:
program.global_block()._remove_var(var)
return program_useless_var_list
def block_append_op(program, origin_program, block, op):
merge_ordereddict = origin_program.global_block().vars.copy()
merge_ordereddict.update(block.vars)
inputs = _get_input_map_from_op(merge_ordereddict, op)
for key, varlist in six.iteritems(inputs):
if not isinstance(varlist, list):
varlist = [varlist]
for var in varlist:
if var.name not in program.global_block(
).vars and var.name not in block.vars:
if var.persistable:
program.global_block()._clone_variable(
var, force_persistable=False)
else:
block._clone_variable(var, force_persistable=False)
outputs = _get_output_map_from_op(origin_program.global_block().vars, op)
for key, varlist in six.iteritems(outputs):
if not isinstance(varlist, list):
varlist = [varlist]
for var in varlist:
if var.name not in program.global_block(
).vars and var.name not in block.vars:
if var.persistable:
program.global_block()._clone_variable(
var, force_persistable=False)
else:
block._clone_variable(var, force_persistable=False)
if "_grad" not in op.type:
# for forward op
return block.append_op(
type=op.type, inputs=inputs, outputs=outputs, attrs=op.all_attrs())
else:
# for grad op
op_desc = op.desc
op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
backward = core.op_proto_and_checker_maker.OpRole.Backward
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
# append grad op
new_op_desc = block.desc.append_op()
new_op_desc.copy_from(op_desc)
new_op_desc._set_attr(op_role_attr_name, backward)
# set device gard
if op.desc.has_attr(device_attr_name):
op_device = op_desc.attr(device_attr_name)
new_op_desc._set_attr(device_attr_name, op_device)
block._sync_with_cpp()
def add_vars_by_var_list(var_name_list, origin_program, program, block):
for var_name in var_name_list:
if var_name not in program.global_block(
).vars and var_name not in block.vars:
var = origin_program.global_block().vars[var_name]
if var.persistable:
program.global_block()._clone_variable(
var, force_persistable=False)
else:
block._clone_variable(var, force_persistable=False)
def get_varlist_from_op_map(var_map):
var_list = []
for key, varlist in six.iteritems(var_map):
if not isinstance(varlist, list):
varlist = [varlist]
for i in range(len(varlist)):
var = varlist[i]
var_list.append(var.name)
return var_list
def find_ops_list_input_output(program, ops_list):
input_var_list = []
output_var_list = []
for op in ops_list:
inputs = _get_input_map_from_op(program.global_block().vars, op)
input_var_list += get_varlist_from_op_map(inputs)
outputs = _get_output_map_from_op(program.global_block().vars, op)
output_var_list += get_varlist_from_op_map(outputs)
input_var_list = list(set(input_var_list))
output_var_list = list(set(output_var_list))
return input_var_list, output_var_list
def find_op_input_output(program, block, op):
input_var_list = []
output_var_list = []
inputs = _get_input_map_from_op(block.vars, op)
input_var_list += get_varlist_from_op_map(inputs)
outputs = _get_output_map_from_op(block.vars, op)
output_var_list += get_varlist_from_op_map(outputs)
input_var_list = list(set(input_var_list))
output_var_list = list(set(output_var_list))
return input_var_list, output_var_list
def get_vars_name_in_block(block):
vars_list = block.vars.keys()
vars_name_list = [var_name for var_name in vars_list]
return vars_name_list
def is_same_op(op1, op2):
if str(op1) != str(op2):
return False
return True
def _get_input_map_from_op(varmap, op):
"""Returns a dict from op input name to the vars in varmap."""
iomap = collections.OrderedDict()
for key in op.input_names:
vars = []
for varname in op.input(key):
if varname == "@EMPTY@":
continue
if "lod_tensor_blocking_queue" in varname:
continue
vars.append(varmap[varname])
if len(vars) == 1:
iomap[key] = vars[0]
else:
iomap[key] = vars
return iomap
def _get_output_map_from_op(varmap, op):
"""Returns a dict from op output name to the vars in varmap."""
iomap = collections.OrderedDict()
for key in op.output_names:
vars = []
for varname in op.output(key):
if varname == "@EMPTY@":
continue
if "lod_tensor_blocking_queue" in varname:
continue
vars.append(varmap[varname])
if len(vars) == 1:
iomap[key] = vars[0]
else:
iomap[key] = vars
return iomap
def delete_same_ops(block, ops):
for op in ops:
try:
for origin_op in block.ops:
if is_same_op(origin_op, op):
idx = list(block.ops).index(origin_op)
block._remove_op(idx)
break
except Exception as e:
print(e)
|
[] |
[] |
[
"CPU_NUM"
] |
[]
|
["CPU_NUM"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"os"
"strconv"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sapcc/netapp-api-exporter/pkg/collector"
"gopkg.in/alecthomas/kingpin.v2"
log "github.com/sirupsen/logrus"
)
var (
configFile = kingpin.Flag("config", "Config file").Short('c').Default("./netapp-filers.yaml").String()
listenAddress = kingpin.Flag("listen", "Listen address").Short('l').Default("0.0.0.0").String()
debug = kingpin.Flag("debug", "Debug mode").Short('d').Bool()
volumeFetchPeriod = kingpin.Flag("volume-fetch-period", "Period of asynchronously fetching volumes").Short('v').Default("2m").Duration()
disableAggregate = kingpin.Flag("no-aggregate", "Disable aggregate collector").Bool()
disableVolume = kingpin.Flag("no-volume", "Disable volume collector").Bool()
disableSystem = kingpin.Flag("no-system", "Disable system collector").Bool()
DNSErrorCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "netapp_filer_dns_error",
Help: "hostname not resolved",
},
[]string{"host"},
)
AuthenticationErrorCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "netapp_filer_authentication_error",
Help: "access netapp filer failed with 401",
},
[]string{"host"},
)
TimeoutErrorCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "netapp_filer_timeout_error",
Help: "access netapp filer timeout",
},
[]string{"host"},
)
UnknownErrorCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "netapp_filer_unknown_error",
Help: "check filer failed with unknown error",
},
[]string{"host"},
)
)
func main() {
var filers map[string]Filer
// new prometheus registry and register global collectors
reg := prometheus.NewPedanticRegistry()
reg.MustRegister(prometheus.NewGoCollector())
reg.MustRegister(DNSErrorCounter)
reg.MustRegister(AuthenticationErrorCounter)
reg.MustRegister(TimeoutErrorCounter)
reg.MustRegister(UnknownErrorCounter)
// load filers from configuration and register new colloector for new filer
go func() {
initLoadCounter := 0
initLoadCh := make(chan bool, 1)
reloadTicker := time.NewTicker(5 * time.Minute)
defer reloadTicker.Stop()
for {
ff, err := loadFilers(*configFile)
if err != nil {
log.WithError(err).Error("load filers failed")
// retry initial loading config file quickly for 10 times
if initLoadCounter < 10 {
time.Sleep(10 * time.Second)
initLoadCh <- true
}
} else {
for _, f := range ff {
if _, ok := filers[f.Host]; ok {
continue
}
l := log.WithFields(log.Fields{
"Name": f.Name,
"Host": f.Host,
"AvailabilityZone": f.AvailabilityZone,
"AggregatePattern": f.AggregatePattern,
})
l.Info("check filer")
if !checkFiler(f, l) {
continue
}
l.Info("register filer")
err = registerFiler(reg, f)
if err != nil {
l.Error(err)
continue
}
if filers == nil {
filers = make(map[string]Filer)
}
filers[f.Host] = f
}
}
select {
case <-initLoadCh:
initLoadCounter += 1
case <-reloadTicker.C:
}
}
}()
port := "9108"
addr := *listenAddress + ":" + port
http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
log.Debugf("open link http://%s/metrics for metrics", addr)
log.Fatal(http.ListenAndServe(addr, nil))
}
func checkFiler(f Filer, l *log.Entry) bool {
var dnsError *net.DNSError
status, err := f.Client.CheckCluster()
l = l.WithField("status", strconv.Itoa(status))
switch status {
case 200, 201, 202, 204, 205, 206:
case 401:
AuthenticationErrorCounter.WithLabelValues(f.Host).Inc()
l.Error("check filer failed: authentication error")
return false
default:
if err != nil {
l.WithError(err).Error("check filer failed")
if errors.As(err, &dnsError) {
DNSErrorCounter.WithLabelValues(f.Host).Inc()
} else if errors.Is(err, context.DeadlineExceeded) {
TimeoutErrorCounter.WithLabelValues(f.Host).Inc()
} else {
UnknownErrorCounter.WithLabelValues(f.Host).Inc()
}
} else {
UnknownErrorCounter.WithLabelValues(f.Host).Inc()
l.Error("check filer failed")
}
return false
}
return true
}
func registerFiler(reg prometheus.Registerer, f Filer) error {
if f.Name == "" {
return fmt.Errorf("Filer.Name not set")
}
if f.AvailabilityZone == "" {
return fmt.Errorf("Filer.AvailabilityZone not set")
}
extraLabels := prometheus.Labels{
"filer": f.Name,
"availability_zone": f.AvailabilityZone,
}
if !*disableAggregate {
prometheus.WrapRegistererWith(extraLabels, reg).MustRegister(
collector.NewAggregateCollector(f.Client, f.Name, f.AggregatePattern))
}
if !*disableVolume {
prometheus.WrapRegistererWith(extraLabels, reg).MustRegister(
collector.NewVolumeCollector(f.Client, f.Name, *volumeFetchPeriod))
}
if !*disableSystem {
prometheus.WrapRegistererWith(extraLabels, reg).MustRegister(
collector.NewSystemCollector(f.Client, f.Name))
}
return nil
}
func init() {
kingpin.Parse()
log.SetOutput(os.Stdout)
log.SetFormatter(&log.TextFormatter{})
if *debug || os.Getenv("DEV") == "1" {
log.SetLevel(log.DebugLevel)
} else {
log.SetLevel(log.InfoLevel)
}
}
|
[
"\"DEV\""
] |
[] |
[
"DEV"
] |
[]
|
["DEV"]
|
go
| 1 | 0 | |
cmd/osm-bootstrap/osm-bootstrap.go
|
// Package main implements the main entrypoint for osm-bootstrap and utility routines to
// bootstrap the various internal components of osm-bootstrap.
// osm-bootstrap provides crd conversion capability in OSM.
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"os"
"github.com/pkg/errors"
"github.com/spf13/pflag"
admissionv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apiclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"github.com/openservicemesh/osm/pkg/apis/config/v1alpha1"
"github.com/openservicemesh/osm/pkg/certificate/providers"
"github.com/openservicemesh/osm/pkg/configurator"
"github.com/openservicemesh/osm/pkg/constants"
"github.com/openservicemesh/osm/pkg/crdconversion"
configClientset "github.com/openservicemesh/osm/pkg/gen/client/config/clientset/versioned"
"github.com/openservicemesh/osm/pkg/httpserver"
httpserverconstants "github.com/openservicemesh/osm/pkg/httpserver/constants"
"github.com/openservicemesh/osm/pkg/k8s/events"
"github.com/openservicemesh/osm/pkg/logger"
"github.com/openservicemesh/osm/pkg/messaging"
"github.com/openservicemesh/osm/pkg/metricsstore"
"github.com/openservicemesh/osm/pkg/reconciler"
"github.com/openservicemesh/osm/pkg/signals"
"github.com/openservicemesh/osm/pkg/version"
)
const (
meshConfigName = "osm-mesh-config"
presetMeshConfigName = "preset-mesh-config"
presetMeshConfigJSONKey = "preset-mesh-config.json"
)
var (
verbosity string
osmNamespace string
caBundleSecretName string
osmMeshConfigName string
meshName string
osmVersion string
crdConverterConfig crdconversion.Config
certProviderKind string
tresorOptions providers.TresorOptions
vaultOptions providers.VaultOptions
certManagerOptions providers.CertManagerOptions
enableReconciler bool
scheme = runtime.NewScheme()
)
var (
flags = pflag.NewFlagSet(`osm-bootstrap`, pflag.ExitOnError)
log = logger.New(constants.OSMBootstrapName)
)
type bootstrap struct {
kubeClient kubernetes.Interface
meshConfigClient configClientset.Interface
namespace string
}
func init() {
flags.StringVar(&meshName, "mesh-name", "", "OSM mesh name")
flags.StringVarP(&verbosity, "verbosity", "v", "info", "Set log verbosity level")
flags.StringVar(&osmNamespace, "osm-namespace", "", "Namespace to which OSM belongs to.")
flags.StringVar(&osmMeshConfigName, "osm-config-name", "osm-mesh-config", "Name of the OSM MeshConfig")
flags.StringVar(&osmVersion, "osm-version", "", "Version of OSM")
// Generic certificate manager/provider options
flags.StringVar(&certProviderKind, "certificate-manager", providers.TresorKind.String(), fmt.Sprintf("Certificate manager, one of [%v]", providers.ValidCertificateProviders))
flags.StringVar(&caBundleSecretName, "ca-bundle-secret-name", "", "Name of the Kubernetes Secret for the OSM CA bundle")
// Vault certificate manager/provider options
flags.StringVar(&vaultOptions.VaultProtocol, "vault-protocol", "http", "Host name of the Hashi Vault")
flags.StringVar(&vaultOptions.VaultHost, "vault-host", "vault.default.svc.cluster.local", "Host name of the Hashi Vault")
flags.StringVar(&vaultOptions.VaultToken, "vault-token", "", "Secret token for the the Hashi Vault")
flags.StringVar(&vaultOptions.VaultRole, "vault-role", "openservicemesh", "Name of the Vault role dedicated to Open Service Mesh")
flags.IntVar(&vaultOptions.VaultPort, "vault-port", 8200, "Port of the Hashi Vault")
// Cert-manager certificate manager/provider options
flags.StringVar(&certManagerOptions.IssuerName, "cert-manager-issuer-name", "osm-ca", "cert-manager issuer name")
flags.StringVar(&certManagerOptions.IssuerKind, "cert-manager-issuer-kind", "Issuer", "cert-manager issuer kind")
flags.StringVar(&certManagerOptions.IssuerGroup, "cert-manager-issuer-group", "cert-manager.io", "cert-manager issuer group")
// Reconciler options
flags.BoolVar(&enableReconciler, "enable-reconciler", false, "Enable reconciler for CDRs, mutating webhook and validating webhook")
_ = clientgoscheme.AddToScheme(scheme)
_ = admissionv1.AddToScheme(scheme)
}
func main() {
log.Info().Msgf("Starting osm-bootstrap %s; %s; %s", version.Version, version.GitCommit, version.BuildDate)
if err := parseFlags(); err != nil {
log.Fatal().Err(err).Msg("Error parsing cmd line arguments")
}
// This ensures CLI parameters (and dependent values) are correct.
if err := validateCLIParams(); err != nil {
events.GenericEventRecorder().FatalEvent(err, events.InvalidCLIParameters, "Error validating CLI parameters")
}
if err := logger.SetLogLevel(verbosity); err != nil {
log.Fatal().Err(err).Msg("Error setting log level")
}
// Initialize kube config and client
kubeConfig, err := clientcmd.BuildConfigFromFlags("", "")
if err != nil {
log.Fatal().Err(err).Msg("Error creating kube configs using in-cluster config")
}
kubeClient := kubernetes.NewForConfigOrDie(kubeConfig)
crdClient := apiclient.NewForConfigOrDie(kubeConfig)
apiServerClient := clientset.NewForConfigOrDie(kubeConfig)
configClient, err := configClientset.NewForConfig(kubeConfig)
if err != nil {
log.Fatal().Err(err).Msgf("Could not access Kubernetes cluster, check kubeconfig.")
return
}
bootstrap := bootstrap{
kubeClient: kubeClient,
meshConfigClient: configClient,
namespace: osmNamespace,
}
err = bootstrap.ensureMeshConfig()
if err != nil {
log.Fatal().Err(err).Msgf("Error setting up default MeshConfig %s from ConfigMap %s", meshConfigName, presetMeshConfigName)
return
}
err = bootstrap.initiatilizeKubernetesEventsRecorder()
if err != nil {
log.Fatal().Err(err).Msg("Error initializing Kubernetes events recorder")
}
stop := signals.RegisterExitHandlers()
_, cancel := context.WithCancel(context.Background())
defer cancel()
// Start the default metrics store
metricsstore.DefaultMetricsStore.Start(
metricsstore.DefaultMetricsStore.ErrCodeCounter,
)
msgBroker := messaging.NewBroker(stop)
// Initialize Configurator to retrieve mesh specific config
cfg := configurator.NewConfigurator(configClient, stop, osmNamespace, osmMeshConfigName, msgBroker)
// Intitialize certificate manager/provider
certProviderConfig := providers.NewCertificateProviderConfig(kubeClient, kubeConfig, cfg, providers.Kind(certProviderKind), osmNamespace,
caBundleSecretName, tresorOptions, vaultOptions, certManagerOptions, msgBroker)
certManager, _, err := certProviderConfig.GetCertificateManager()
if err != nil {
events.GenericEventRecorder().FatalEvent(err, events.InvalidCertificateManager,
"Error initializing certificate manager of kind %s", certProviderKind)
}
// Initialize the crd conversion webhook server to support the conversion of OSM's CRDs
crdConverterConfig.ListenPort = 9443
if err := crdconversion.NewConversionWebhook(crdConverterConfig, kubeClient, crdClient, certManager, osmNamespace, enableReconciler, stop); err != nil {
events.GenericEventRecorder().FatalEvent(err, events.InitializationError, "Error creating crd conversion webhook")
}
/*
* Initialize osm-bootstrap's HTTP server
*/
httpServer := httpserver.NewHTTPServer(constants.OSMHTTPServerPort)
// Metrics
httpServer.AddHandler(httpserverconstants.MetricsPath, metricsstore.DefaultMetricsStore.Handler())
// Version
httpServer.AddHandler(httpserverconstants.VersionPath, version.GetVersionHandler())
// Start HTTP server
err = httpServer.Start()
if err != nil {
log.Fatal().Err(err).Msgf("Failed to start OSM metrics/probes HTTP server")
}
if enableReconciler {
log.Info().Msgf("OSM reconciler enabled for custom resource definitions")
err = reconciler.NewReconcilerClient(kubeClient, apiServerClient, meshName, osmVersion, stop, reconciler.CrdInformerKey)
if err != nil {
events.GenericEventRecorder().FatalEvent(err, events.InitializationError, "Error creating reconciler client for custom resource definitions")
}
}
<-stop
log.Info().Msgf("Stopping osm-bootstrap %s; %s; %s", version.Version, version.GitCommit, version.BuildDate)
}
func (b *bootstrap) createDefaultMeshConfig() error {
// find presets config map to build the default MeshConfig from that
presetsConfigMap, err := b.kubeClient.CoreV1().ConfigMaps(b.namespace).Get(context.TODO(), presetMeshConfigName, metav1.GetOptions{})
// If the presets MeshConfig could not be loaded return the error
if err != nil {
return err
}
// Create a default meshConfig
defaultMeshConfig := buildDefaultMeshConfig(presetsConfigMap)
if _, err := b.meshConfigClient.ConfigV1alpha1().MeshConfigs(b.namespace).Create(context.TODO(), defaultMeshConfig, metav1.CreateOptions{}); err == nil {
log.Info().Msgf("MeshConfig (%s) created in namespace %s", meshConfigName, b.namespace)
return nil
}
if apierrors.IsAlreadyExists(err) {
log.Info().Msgf("MeshConfig already exists in %s. Skip creating.", b.namespace)
return nil
}
return err
}
func (b *bootstrap) ensureMeshConfig() error {
_, err := b.meshConfigClient.ConfigV1alpha1().MeshConfigs(b.namespace).Get(context.TODO(), meshConfigName, metav1.GetOptions{})
if err == nil {
return nil // default meshConfig was found
}
if apierrors.IsNotFound(err) {
// create a default mesh config since it was not found
if err = b.createDefaultMeshConfig(); err != nil {
return err
}
}
return err
}
// initiatilizeKubernetesEventsRecorder initializes the generic Kubernetes event recorder and associates it with
// the osm-bootstrap pod resource. The events recorder allows the osm-bootstap to publish Kubernets events to
// report fatal errors with initializing this application. These events will show up in the output of `kubectl get events`
func (b *bootstrap) initiatilizeKubernetesEventsRecorder() error {
bootstrapPod, err := b.getBootstrapPod()
if err != nil {
return errors.Errorf("Error fetching osm-bootstrap pod: %s", err)
}
eventRecorder := events.GenericEventRecorder()
return eventRecorder.Initialize(bootstrapPod, b.kubeClient, osmNamespace)
}
// getBootstrapPod returns the osm-bootstrap pod spec.
// The pod name is inferred from the 'BOOTSTRAP_POD_NAME' env variable which is set during deployment.
func (b *bootstrap) getBootstrapPod() (*corev1.Pod, error) {
podName := os.Getenv("BOOTSTRAP_POD_NAME")
if podName == "" {
return nil, errors.New("BOOTSTRAP_POD_NAME env variable cannot be empty")
}
pod, err := b.kubeClient.CoreV1().Pods(b.namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
log.Error().Err(err).Msgf("Error retrieving osm-bootstrap pod %s", podName)
return nil, err
}
return pod, nil
}
func parseFlags() error {
if err := flags.Parse(os.Args); err != nil {
return err
}
_ = flag.CommandLine.Parse([]string{})
return nil
}
// validateCLIParams contains all checks necessary that various permutations of the CLI flags are consistent
func validateCLIParams() error {
if osmNamespace == "" {
return errors.New("Please specify the OSM namespace using --osm-namespace")
}
if caBundleSecretName == "" {
return errors.Errorf("Please specify the CA bundle secret name using --ca-bundle-secret-name")
}
return nil
}
func buildDefaultMeshConfig(presetMeshConfigMap *corev1.ConfigMap) *v1alpha1.MeshConfig {
presetMeshConfig := presetMeshConfigMap.Data[presetMeshConfigJSONKey]
presetMeshConfigSpec := v1alpha1.MeshConfigSpec{}
err := json.Unmarshal([]byte(presetMeshConfig), &presetMeshConfigSpec)
if err != nil {
log.Fatal().Err(err).Msgf("Error converting preset-mesh-config json string to meshConfig object")
}
return &v1alpha1.MeshConfig{
TypeMeta: metav1.TypeMeta{
Kind: "MeshConfig",
APIVersion: "config.openservicemesh.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: meshConfigName,
},
Spec: presetMeshConfigSpec,
}
}
|
[
"\"BOOTSTRAP_POD_NAME\""
] |
[] |
[
"BOOTSTRAP_POD_NAME"
] |
[]
|
["BOOTSTRAP_POD_NAME"]
|
go
| 1 | 0 | |
data-science-onramp/ai-platform/modules/trainer/tfkeras_model/task.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START ai_platform_tfkeras_task]
"""Trains a Keras model to predict number of trips
started and ended at Citibike stations. """
# [START ai_platform_tfkeras_task_imports]
import argparse
import os
import tensorflow as tf
from trainer import utils
from trainer.tfkeras_model import model
# [END ai_platform_tfkeras_task_imports]
# [START ai_platform_tfkeras_task_args]
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-path",
type=str,
required=True,
help="path to input data"
)
parser.add_argument(
"--num-epochs",
type=int,
help="number of times to go through the data, default=20",
)
parser.add_argument(
"--batch-size",
type=int,
help="number of records to read during each training step, default=128",
)
parser.add_argument(
"--learning-rate",
type=float,
help="learning rate for gradient descent, default=.01",
)
parser.add_argument(
"--verbosity",
choices=["DEBUG", "ERROR", "FATAL", "INFO", "WARN"],
default="INFO",
)
parser.add_argument(
"--model_dir",
type=str,
help="Output directory for the model.",
default=os.environ["AIP_MODEL_DIR"],
)
return parser.parse_args()
# [END ai_platform_tfkeras_task_args]
# [START ai_platform_tfkeras_task_train_and_evaluate]
# [START ai_platform_tfkeras_task_train_and_evaluate_load]
def train_and_evaluate(
input_path: str,
model_dir: str,
num_epochs: int = 5,
batch_size: int = 128,
learning_rate: float = 0.01
) -> None:
"""Trains and evaluates the Keras model.
Uses the Keras model defined in model.py. Saves the trained model in TensorFlow SavedModel
format to the path defined in part by the --job-dir argument."""
# Split datasets into training and testing
train_feature, eval_feature, train_target, eval_target = utils.load_data(input_path)
# [END ai_platform_tfkeras_task_train_and_evaluate_load]
# [START ai_platform_tfkeras_task_train_and_evaluate_dimensions]
# Extract dimensions of the data
num_train_examples, input_dim = train_feature.shape
num_eval_examples = eval_feature.shape[1]
output_dim = train_target.shape[1]
# [END ai_platform_tfkeras_task_train_and_evaluate_dimensions]
# [START ai_platform_tfkeras_task_train_and_evaluate_model]
# Create the Keras Model
keras_model = model.create_keras_model(
input_dim=input_dim,
output_dim=output_dim,
learning_rate=learning_rate,
)
# [END ai_platform_tfkeras_task_train_and_evaluate_model]
# [START ai_platform_tfkeras_task_train_and_evaluate_training_data]
# Pass a numpy array by passing DataFrame.values
training_dataset = model.input_fn(
features=train_feature.values,
labels=train_target.values,
shuffle=True,
num_epochs=num_epochs,
batch_size=batch_size,
)
# [END ai_platform_tfkeras_task_train_and_evaluate_training_data]
# [START ai_platform_tfkeras_task_train_and_evaluate_validation_data]
# Pass a numpy array by passing DataFrame.values
validation_dataset = model.input_fn(
features=eval_feature.values,
labels=eval_target.values,
shuffle=False,
num_epochs=num_epochs,
batch_size=num_eval_examples,
)
# [END ai_platform_tfkeras_task_train_and_evaluate_validation_data]
# [START ai_platform_tfkeras_task_train_and_evaluate_tensorboard]
# Setup Learning Rate decay.
lr_decay_cb = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: learning_rate + 0.02 * (0.5 ** (1 + epoch)), verbose=True
)
# Setup TensorBoard callback.
tensorboard_cb = tf.keras.callbacks.TensorBoard(
os.path.join(model_dir, "keras_tensorboard"), histogram_freq=1
)
# [END ai_platform_tfkeras_task_train_and_evaluate_tensorboard]
# [START ai_platform_tfkeras_task_train_and_evaluate_fit_export]
# Train model
keras_model.fit(
training_dataset,
steps_per_epoch=int(num_train_examples / batch_size),
epochs=num_epochs,
validation_data=validation_dataset,
validation_steps=1,
verbose=1,
callbacks=[lr_decay_cb, tensorboard_cb],
)
# Export model
keras_model.save(model_dir)
print(f"Model exported to: {model_dir}")
# [END ai_platform_tfkeras_task_train_and_evaluate_fit_export]
# [END ai_platform_tfkeras_task_train_and_evaluate]
if __name__ == "__main__":
args = get_args()
kwargs = {}
if args.num_epochs:
kwargs["num-epochs"] = args.num_epochs
if args.batch_size:
kwargs["batch-size"] = args.batch_size
if args.learning_rate:
kwargs["learning-rate"] = args.learning_rate
tf.compat.v1.logging.set_verbosity(args.verbosity)
train_and_evaluate(args.input_path, args.model_dir, **kwargs)
# [END ai_platform_tfkeras_task]
|
[] |
[] |
[
"AIP_MODEL_DIR"
] |
[]
|
["AIP_MODEL_DIR"]
|
python
| 1 | 0 | |
discordbot.py
|
from discord.ext import commands
import os
import traceback
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
@bot.command()
async def 'えらい'(ctx):
await ctx.send('えらい')
bot.run(token)
|
[] |
[] |
[
"DISCORD_BOT_TOKEN"
] |
[]
|
["DISCORD_BOT_TOKEN"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"bytes"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"os/exec"
"gopkg.in/go-playground/webhooks.v5/github"
)
const (
path = "/webhooks"
)
var test = flag.Bool("t", false, "is test mode")
func main() {
flag.Parse()
if *test {
execShell("1", "vipwzw", "chain33", "master")
return
}
hook, _ := github.New(github.Options.Secret(""))
qhook := make(chan interface{}, 64)
http.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
payload, err := hook.Parse(r, github.ReleaseEvent, github.PullRequestEvent)
if err != nil {
if err == github.ErrEventNotFound {
// ok event wasn;t one of the ones asked to be parsed
}
}
w.Header().Set("Content-type", "application/json")
switch payload.(type) {
case github.ReleasePayload:
release := payload.(github.ReleasePayload)
// Do whatever you want from here...
//fmt.Printf("%+v", release)
w.WriteHeader(http.StatusOK)
io.WriteString(w, `{"action":"release"}`)
qhook <- release
return
case github.PullRequestPayload:
pullRequest := payload.(github.PullRequestPayload)
// Do whatever you want from here...
//fmt.Printf("%+v", pullRequest)
w.WriteHeader(http.StatusOK)
io.WriteString(w, `{"action":"pullrequest"}`)
qhook <- pullRequest
return
}
w.WriteHeader(http.StatusBadRequest)
io.WriteString(w, `{"action":"unkown request event"}`)
})
go webhooksProcess(qhook)
http.ListenAndServe(":3000", nil)
}
func webhooksProcess(ch chan interface{}) {
for payload := range ch {
switch payload.(type) {
case github.ReleasePayload:
break
case github.PullRequestPayload:
processGithubPL(payload.(github.PullRequestPayload))
}
}
}
func processGithubPL(payload github.PullRequestPayload) {
id := fmt.Sprintf("%d", payload.PullRequest.ID)
user := payload.PullRequest.Head.User.Login
branch := payload.PullRequest.Head.Ref
proj := payload.PullRequest.Head.Repo.Name
execShell(id, user, proj, branch)
}
func execShell(id, user, project, branch string) {
log.Println("run", id, user, branch)
gopath := os.Getenv("GOPATH")
repo := "https://github.com/" + user + "/" + project + ".git"
gitpath := gopath + "/src/github.com/33cn/" + project
log.Println("git path", gitpath)
cmd := exec.Command("rm", "-rf", gitpath)
if err := cmd.Run(); err != nil {
log.Println("rm -rf ", gitpath, err)
return
}
log.Println("1. del path ok")
cmd = exec.Command("git", "clone", "-b" , branch, repo, gitpath)
if err := cmd.Run(); err != nil {
log.Println("run git", err)
return
}
log.Println("2. clone ok")
var stdout bytes.Buffer
var stderr bytes.Buffer
cmd = exec.Command("make", "webhook")
cmd.Dir = gitpath
cmd.Stdout = &stdout
cmd.Stderr = &stderr
cmd.Env = append(os.Environ(),
"ChangeID="+id,
"name="+user,
"b="+branch,
)
if err := cmd.Run(); err != nil {
log.Println("run make webhook", err, stdout.String(), stderr.String())
return
}
log.Println("3. run ok")
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
automation/tincrepo/main/mpp/lib/gpdbverify.py
|
"""
This utility allows to verify GPDB with the following:
- gpverify: online verification with database activity
- gpcheckmirrorseg.pl: check primary mirror integrity with no activity
- gpcheckcat: check the database catalog
Usage:
Run GPDB Verify in a loop every 10 mins. This will stop when GPDB is not running
python gpdbverify.py -l True|T|t
Run GPDB Verify once
python gpdbverify.py
Run GPDB Verify with no activity.
python gpdbverify.py -t noactivity
"""
import os
import sys
import time
import logging
from optparse import OptionParser
import tinctest
from tinctest.lib import local_path, run_shell_command
from qautils.gppylib.commands.base import Command
from mpp.lib.PSQL import PSQL
from mpp.lib.config import GPDBConfig
#####
class GpdbVerify:
SLEEPTIME = 600 # Default sleep time, 10 mins
LOGPATH = os.path.dirname(os.path.realpath(__file__))
GPVERIFY_STATE = { "success": "SUCCESS",
"running": "RUNNING",
"fail": "FAIL"
}
GPCHECK_STATE = { "fail": "[FAIL]",
"warning": "[WARNING]",
"ok": "[OK]",
"repair": "repair scripts generated in directory"
}
def __init__(self, **kwargs):
"""
Constructor for GpdbVerify
"""
self.gphome = os.environ["GPHOME"]
self.mdd = os.environ["MASTER_DATA_DIRECTORY"]
self.pgport = int(os.environ["PGPORT"]) or 5432
self.dbname = os.environ["PGDATABASE"] or 'gptest'
self.config = GPDBConfig()
if not os.path.exists(self.LOGPATH):
os.mkdir(self.LOGPATH)
try: # Initialize the arguments and set default values if does not exist
self.set_commandtype(kwargs["cmdtype"])
self.set_sleeptime(kwargs["sleeptime"])
except KeyError, e:
self.set_commandtype("cmd")
self.set_sleeptime(self.SLEEPTIME)
def set_commandtype(self, value):
"""
Set the Command Type
@param value: command type
"""
self.cmdtype = value
def set_sleeptime(self, value):
"""
Set the sleept time
@param value: sleeptime in seconds
"""
self.sleeptime = value
def gpverify(self, wait=False):
"""
gpverify utility
@param wait
"""
tinctest.logger.info("Running gpverify ...")
if self.check_integrityresults() == 0: # Check first whether gpverify is already running
cmd = Command(name=' gpverify --full ', cmdStr = 'gpverify --full')
cmd.run(validateAfter=False)
result = cmd.get_results()
if result.rc and wait:
while self.check_integrityresults()==-1:
time.sleep(SLEEPTIME)
tinctest.logger.info("gpverify is still Running ...")
def gpcheckmirrorseg(self, options=None, master=False):
"""
gpcheckmirrorseg utility
@param options
@param master
@return True or False and Fix File
"""
if master:
integrity_outfile = self.LOGPATH + "/checkmastermirrorseg_output"
else:
integrity_outfile = self.LOGPATH + "/checkmirrorseg_output"
fix_outfile = self.LOGPATH + "/fix_file"
if options is None:
if master:
version = PSQL.run_sql_command('select version();', flags = '-q -t', dbname= 'postgres')
if '4.3' not in version :
PSQL.run_sql_command("CHECKPOINT; CHECKPOINT; CHECKPOINT; CHECKPOINT; select pg_sleep(60);")
options = "-mastermirror=true -exclude recovery.conf -ignore 'gp_restore' -ignore 'gp_cdatabase' -ignore 'gp_dump' -ignore '_data' -ignore 'wet_execute.tbl' -ignore 'pg_xlog' -ignore 'pg_changetracking' -ignore 'pg_verify' -ignore 'backup_label.old' -dirignore 'pg_xlog' -ignore 'pg_subtrans'"
else:
options = " -ignore '_data' -ignore 'wet_execute.tbl' -ignore 'gp_dump' -ignore 'core' -ignore pg_changetracking -ignore 'pg_xlog' -ignore 'pg_verify' -parallel=true"
command = "gpcheckmirrorseg.pl -connect '-p %d -d %s' -fixfile %s" \
% (self.pgport, self.dbname, fix_outfile)
command += options
command += "> %s 2>&1" % (integrity_outfile)
tinctest.logger.info("Running gpcheckmirroseg.pl ...")
run_shell_command(command)
ifile = open(integrity_outfile, 'r')
lines = ifile.readlines()
checkmirror = False
if lines[-1].find('no differences found')>=0:
checkmirror = True
else:
# Issue with mirror cleanup from Storage when we abort transaction.
# For now, ignore the extra_m and rewrites
checkmirror = self.ignore_extra_m(fix_outfile)
if master:
if checkmirror:
tinctest.logger.info("-- MasterMirror Integrity passed")
else:
tinctest.logger.error("-- MasterMirror Integrity failed")
else:
if checkmirror:
tinctest.logger.info("-- PrimaryMirror Integrity check passed")
else:
tinctest.logger.info("-- PrimaryMirror Integrity check failed")
return (checkmirror, fix_outfile)
def ignore_extra_m(self, fix_file):
"""
Ignore extra_m and rewrites to new fix_file_ignoreextram
"""
os.rename(fix_file, fix_file+".bak")
f = open(fix_file, "w")
contents = open(fix_file+".bak").readlines()
tinctest.logger.info('contents of fixfile %s' % contents)
checkmirror = True
for line in contents:
if line.find('extra_m') == -1:
f.write(line)
checkmirror = False
f.close()
return checkmirror
def gpcheckcat(self, dbname=None, alldb=False, online=False, testname=None):
"""
gpcheckcat wrapper
@param dbname: Database name (default gptest)
@param alldb: Check all database
@param online: Activity (True) vs No Activity (False)
@return: errorCode, hasError, gpcheckcat output, repairScript
# errorCode from gpcheckcat
# SUCCESS=0
# ERROR_REMOVE=1
# ERROR_RESYNC=2
# ERROR_NOREPAIR=3
"""
if dbname is None:
dbname = self.dbname
if alldb is False:
alldb = ""
else:
alldb = "-A"
if online:
online = "-O"
else:
online = ""
if testname is None:
testname = ""
else:
testname = "-R %s" % testname
outputFile = self.LOGPATH + '/checkcat_output'
tinctest.logger.info("Running gpcheckcat ...")
checkcat_cmd = "%s/bin/lib/gpcheckcat %s %s %s %s > %s 2>&1; echo $?" \
% (self.gphome, alldb, online, testname, dbname, outputFile)
cmd = Command(name=' Running Gpcheckcat.. ', cmdStr = checkcat_cmd)
cmd.run(validateAfter=False)
result = cmd.get_results()
# Get Error Code from gpcheckcat
errorCode = result.rc
gpcheckcat_output = open(outputFile).readlines()
(hasError, repairScript) = self.check_catalogresults(outputFile)
return (errorCode, hasError, gpcheckcat_output, repairScript)
def run_repair_script(self,repair_script):
'''
@summary : Run the gpcehckcat repair script generated by gpcehckcat,
Repeat gpcheckcat one more time to see if the repair script repaired
'''
cmd = "/bin/bash %s/runsql.sh" % str(repair_script).strip()
ok = run_shell_command(cmd)
if ok:
(errorCode, hasError, output, repair_script) = self.gpcheckcat()
if errorCode == 0:
return True
else:
return False
def check_integrityresults(self):
"""
Check gpverify results from the last token
@return: True or False, -1 is still running
"""
sql = "select vertoken from gp_verification_history order by 1 desc limit 1"
out= PSQL.run_sql_command(sql, flags='-q -t', dbname='postgres')
last_token = out.strip()
if not last_token:
return 0 # No records of gpverify
cmd = Command(name='gpverify', cmdStr="gpverify --results --token %s" % (last_token))
cmd.run(validateAfter=False)
result = cmd.get_results()
state = result.stdout[len(result.stdout)-2]
if state.find(self.GPVERIFY_STATE["success"])>0:
tinctest.logger.info("gpverify at %s: Successful" % last_token)
return True
elif state.find(self.GPVERIFY_STATE["running"])>0:
tinctest.logger.info("gpverify at %s: Running" % last_token)
return -1
elif state.find(self.GPVERIFY_STATE["fail"])>0:
tinctest.logger.info("gpverify at %s: Failed" % last_token)
return False
else:
tinctest.logger.info("gpverify has not start")
return 0
def check_catalogresults(self, output):
"""
Verify the gpcheckcat output for ERROR
@param output: gpcheckcat output
@return: True or False
"""
dbname = ""
error_lines = ""
repair_script = ""
mark = False
f = open(output)
for line in f:
if line.find("Performing check for database")>0:
dbname = line[line.find("'")+1:line.rfind("'")]
if line.find(self.GPCHECK_STATE["fail"])>0:
mark = True
elif line.find(self.GPCHECK_STATE["warning"])>0:
error_lines += "%s: %s\n" % (dbname, line)
elif line.find(self.GPCHECK_STATE["ok"])>0:
mark = False
elif line.find(self.GPCHECK_STATE["repair"])>0:
repair_script = line
if mark:
error_lines += "%s: %s" % (dbname, line)
# gpcheckcat:-[INFO]:-repair scripts generated in directory gpcheckcat.repair.2011-10-10.13.45.12
repair_line = repair_script.split(" ")
repair_script = repair_line[-1] # Get the gpcheckcat repair script
if error_lines == "":
tinctest.logger.info("Catalog Check: Successful")
return (True, repair_script)
else:
tinctest.logger.info("*** Catalog Check: Failed")
tinctest.logger.info(error_lines)
return (False, repair_script)
def check_mastermirrorintegrity(self, options=None):
"""
Check the integrity of master mirror
@param options: options for gpcheckmirrorseg.pl
"""
return self.gpcheckmirrorseg(options=options, master=True)
def online_verification_loop(self):
"""
Run the online verification in a loop, every SLEEPTIME (10 mins)
"""
while self.check_db_is_running(): # Need to check DB is running
self.verify_online()
tinctest.logger.info("Running online verification in ... %d s" %
(self.SLEEPTIME))
time.sleep(self.SLEEPTIME)
def verify_online(self, wait=True):
"""
Verify GPDB even when there is activity with gpverify and gpcheckcat
@param wait: Wait until gpverify is complete
"""
self.gpverify(wait=wait)
self.gpcheckcat(alldb=True, online=True)
def verify_online_noactivity(self, options=None):
"""
Verify GPDB when there is no activity with gpverify and gpcheckcat
@param options: options for gpcheckmirrorseg.pl
"""
if self.config.has_master_mirror():
self.gpcheckmirrorseg(master=True) # Check Master/Standby
self.gpcheckmirrorseg(master=False) # Check Primary/Mirror
self.gpcheckcat(alldb=True, online=False)
def check_db_is_running(self):
"""
Check whether DB is running
TODO: Need to put in gpdbSystem
"""
cmd = Command(name='pg_ctl call',cmdStr="%s/bin/pg_ctl -D %s status" % (self.gphome, self.mdd))
cmd.run(validateAfter=False)
result = cmd.get_results()
if result.rc == 0:
if result.stdout.splitlines()[0].find("server is running")>0:
tinctest.logger.info('Server is Running')
return True
return False
#
# Input argument
#
def ProcessOption():
"""
Process Options
"""
parser = OptionParser()
parser.add_option("-t", "--type", dest="verify",
default="activity",
help="Verify with: activity (default) or no_activity")
parser.add_option("-l", "--loop", dest="loop",
default="False",
help="Loop online verification")
parser.add_option("-s", "--sleep", dest="sleep",
default=None, type="int",
help="gpdb verify sleeps every 600s (10 mins) by default")
(options, args) = parser.parse_args()
return options
##########################
if __name__ == '__main__':
options = ProcessOption()
gpv = GpdbVerify()
if options.verify == "activity":
if options.loop == "True" or options.loop == "t" or options.loop == "T":
if options.sleep:
gpv.set_sleeptime(options.sleep)
gpv.set_commandtype("background")
gpv.online_verification_loop()
else:
gpv.verify_online()
else:
gpv.verify_online_noactivity()
|
[] |
[] |
[
"MASTER_DATA_DIRECTORY",
"PGDATABASE",
"PGPORT",
"GPHOME"
] |
[]
|
["MASTER_DATA_DIRECTORY", "PGDATABASE", "PGPORT", "GPHOME"]
|
python
| 4 | 0 | |
clearml/backend_interface/task/repo/scriptinfo.py
|
import os
import sys
from copy import copy
from functools import partial
from tempfile import mkstemp
import attr
import logging
import json
from pathlib2 import Path
from threading import Thread, Event
from .util import get_command_output, remove_user_pass_from_url
from ....backend_api import Session
from ....debugging import get_logger
from .detectors import GitEnvDetector, GitDetector, HgEnvDetector, HgDetector, Result as DetectionResult
_logger = get_logger("Repository Detection")
class ScriptInfoError(Exception):
pass
class ScriptRequirements(object):
_max_requirements_size = 512 * 1024
_packages_remove_version = ('setuptools', )
def __init__(self, root_folder):
self._root_folder = root_folder
def get_requirements(self, entry_point_filename=None):
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail
from ....utilities.pigar.__main__ import GenerateReqs
installed_pkgs = self._remove_package_versions(
get_installed_pkgs_detail(), self._packages_remove_version)
gr = GenerateReqs(save_path='', project_path=self._root_folder, installed_pkgs=installed_pkgs,
ignores=['.git', '.hg', '.idea', '__pycache__', '.ipynb_checkpoints',
'site-packages', 'dist-packages'])
reqs, try_imports, guess, local_pks = gr.extract_reqs(
module_callback=ScriptRequirements.add_trains_used_packages, entry_point_filename=entry_point_filename)
return self.create_requirements_txt(reqs, local_pks)
except Exception:
return '', ''
@staticmethod
def add_trains_used_packages(modules):
# hack: forcefully insert storage modules if we have them
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import boto3 # noqa: F401
modules.add('boto3', 'clearml.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from google.cloud import storage # noqa: F401
modules.add('google_cloud_storage', 'clearml.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from azure.storage.blob import ContentSettings # noqa: F401
modules.add('azure_storage_blob', 'clearml.storage', 0)
except Exception:
pass
# bugfix, replace sklearn with scikit-learn name
if 'sklearn' in modules:
sklearn = modules.pop('sklearn', {})
for fname, lines in sklearn.items():
modules.add('scikit_learn', fname, lines)
# if we have torch and it supports tensorboard, we should add that as well
# (because it will not be detected automatically)
if 'torch' in modules and 'tensorboard' not in modules:
# noinspection PyBroadException
try:
# see if this version of torch support tensorboard
# noinspection PyPackageRequirements,PyUnresolvedReferences
import torch.utils.tensorboard # noqa: F401
# noinspection PyPackageRequirements,PyUnresolvedReferences
import tensorboard # noqa: F401
modules.add('tensorboard', 'torch', 0)
except Exception:
pass
# remove setuptools, we should not specify this module version. It is installed by default
if 'setuptools' in modules:
modules.pop('setuptools', {})
# add forced requirements:
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
for package, version in Task._force_requirements.items():
modules.add(package, 'clearml', 0)
except Exception:
pass
return modules
@staticmethod
def create_requirements_txt(reqs, local_pks=None):
# write requirements.txt
# noinspection PyBroadException
try:
conda_requirements = ''
conda_prefix = os.environ.get('CONDA_PREFIX')
if conda_prefix and not conda_prefix.endswith(os.path.sep):
conda_prefix += os.path.sep
if conda_prefix and sys.executable.startswith(conda_prefix):
conda_packages_json = get_command_output(['conda', 'list', '--json'])
conda_packages_json = json.loads(conda_packages_json)
reqs_lower = {k.lower(): (k, v) for k, v in reqs.items()}
for r in conda_packages_json:
# check if this is a pypi package, if it is, leave it outside
if not r.get('channel') or r.get('channel') == 'pypi':
continue
# check if we have it in our required packages
name = r['name'].lower()
# hack support pytorch/torch different naming convention
if name == 'pytorch':
name = 'torch'
k, v = None, None
if name in reqs_lower:
k, v = reqs_lower.get(name, (None, None))
else:
name = name.replace('-', '_')
if name in reqs_lower:
k, v = reqs_lower.get(name, (None, None))
if k and v is not None:
if v.version:
conda_requirements += '{0} {1} {2}\n'.format(k, '==', v.version)
else:
conda_requirements += '{0}\n'.format(k)
except Exception:
conda_requirements = ''
# add forced requirements:
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
forced_packages = copy(Task._force_requirements)
except Exception:
forced_packages = {}
# python version header
requirements_txt = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n'
if local_pks:
requirements_txt += '\n# Local modules found - skipping:\n'
for k, v in local_pks.sorted_items():
if v.version:
requirements_txt += '# {0} == {1}\n'.format(k, v.version)
else:
requirements_txt += '# {0}\n'.format(k)
# requirement summary
requirements_txt += '\n'
for k, v in reqs.sorted_items():
version = v.version
if k in forced_packages:
forced_version = forced_packages.pop(k, None)
if forced_version:
version = forced_version
# requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
if k == '-e' and version:
requirements_txt += '{0}\n'.format(version)
elif k.startswith('-e '):
requirements_txt += '{0} {1}\n'.format(k.replace('-e ', '', 1), version or '')
elif version:
requirements_txt += '{0} {1} {2}\n'.format(k, '==', version)
else:
requirements_txt += '{0}\n'.format(k)
# add forced requirements that we could not find installed on the system
for k in sorted(forced_packages.keys()):
if forced_packages[k]:
requirements_txt += '{0} {1} {2}\n'.format(k, '==', forced_packages[k])
else:
requirements_txt += '{0}\n'.format(k)
requirements_txt_packages_only = \
requirements_txt + '\n# Skipping detailed import analysis, it is too large\n'
# requirements details (in comments)
requirements_txt += '\n' + \
'# Detailed import analysis\n' \
'# **************************\n'
if local_pks:
for k, v in local_pks.sorted_items():
requirements_txt += '\n'
requirements_txt += '# IMPORT LOCAL PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
for k, v in reqs.sorted_items():
requirements_txt += '\n'
if k == '-e':
requirements_txt += '# IMPORT PACKAGE {0} {1}\n'.format(k, v.version)
else:
requirements_txt += '# IMPORT PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
# make sure we do not exceed the size a size limit
return (requirements_txt if len(requirements_txt) < ScriptRequirements._max_requirements_size
else requirements_txt_packages_only,
conda_requirements)
@staticmethod
def _remove_package_versions(installed_pkgs, package_names_to_remove_version):
installed_pkgs = {k: (v[0], None if str(k) in package_names_to_remove_version else v[1])
for k, v in installed_pkgs.items()}
return installed_pkgs
class _JupyterObserver(object):
_thread = None
_exit_event = Event()
_sync_event = Event()
_sample_frequency = 30.
_first_sample_frequency = 3.
_jupyter_history_logger = None
@classmethod
def observer(cls, jupyter_notebook_filename, log_history):
if cls._thread is not None:
# order of signaling is important!
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
if log_history and cls._jupyter_history_logger is None:
cls._jupyter_history_logger = _JupyterHistoryLogger()
cls._jupyter_history_logger.hook()
cls._sync_event.clear()
cls._exit_event.clear()
cls._thread = Thread(target=cls._daemon, args=(jupyter_notebook_filename, ))
cls._thread.daemon = True
cls._thread.start()
@classmethod
def signal_sync(cls, *_, **__):
cls._sync_event.set()
@classmethod
def close(cls):
if not cls._thread:
return
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
cls._thread = None
@classmethod
def _daemon(cls, jupyter_notebook_filename):
from clearml import Task
# load jupyter notebook package
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from nbconvert.exporters.script import ScriptExporter
_script_exporter = ScriptExporter()
except Exception:
return
# load pigar
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail, file_import_modules
from ....utilities.pigar.modules import ReqsModules
from ....utilities.pigar.log import logger
logger.setLevel(logging.WARNING)
except Exception:
file_import_modules = None
# load IPython
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
except Exception:
# should not happen
get_ipython = None
# setup local notebook files
if jupyter_notebook_filename:
notebook = Path(jupyter_notebook_filename)
local_jupyter_filename = jupyter_notebook_filename
else:
notebook = None
fd, local_jupyter_filename = mkstemp(suffix='.ipynb')
os.close(fd)
last_update_ts = None
counter = 0
prev_script_hash = None
# noinspection PyBroadException
try:
from ....version import __version__
our_module = cls.__module__.split('.')[0], __version__
except Exception:
our_module = None
# noinspection PyBroadException
try:
import re
replace_ipython_pattern = re.compile(r'\n([ \t]*)get_ipython\(\)')
except Exception:
replace_ipython_pattern = None
# main observer loop, check if we need to exit
while not cls._exit_event.wait(timeout=0.):
# wait for timeout or sync event
cls._sync_event.wait(cls._sample_frequency if counter else cls._first_sample_frequency)
cls._sync_event.clear()
counter += 1
# noinspection PyBroadException
try:
# if there is no task connected, do nothing
task = Task.current_task()
if not task:
continue
script_code = None
fmodules = None
current_cell = None
# if we have a local file:
if notebook:
if not notebook.exists():
continue
# check if notebook changed
if last_update_ts is not None and notebook.stat().st_mtime - last_update_ts <= 0:
continue
last_update_ts = notebook.stat().st_mtime
else:
# serialize notebook to a temp file
if cls._jupyter_history_logger:
script_code, current_cell = cls._jupyter_history_logger.history_to_str()
else:
# noinspection PyBroadException
try:
# noinspection PyBroadException
try:
os.unlink(local_jupyter_filename)
except Exception:
pass
get_ipython().run_line_magic('history', '-t -f {}'.format(local_jupyter_filename))
with open(local_jupyter_filename, 'r') as f:
script_code = f.read()
# load the modules
from ....utilities.pigar.modules import ImportedModules
fmodules = ImportedModules()
for nm in set([str(m).split('.')[0] for m in sys.modules]):
fmodules.add(nm, 'notebook', 0)
except Exception:
continue
# get notebook python script
if script_code is None:
script_code, _ = _script_exporter.from_filename(local_jupyter_filename)
current_script_hash = hash(script_code + (current_cell or ''))
if prev_script_hash and prev_script_hash == current_script_hash:
continue
# remove ipython direct access from the script code
# we will not be able to run them anyhow
if replace_ipython_pattern:
script_code = replace_ipython_pattern.sub(r'\n# \g<1>get_ipython()', script_code)
requirements_txt = ''
conda_requirements = ''
# parse jupyter python script and prepare pip requirements (pigar)
# if backend supports requirements
if file_import_modules and Session.check_min_api_version('2.2'):
if fmodules is None:
fmodules, _ = file_import_modules(
notebook.parts[-1] if notebook else 'notebook', script_code)
if current_cell:
cell_fmodules, _ = file_import_modules(
notebook.parts[-1] if notebook else 'notebook', current_cell)
# noinspection PyBroadException
try:
fmodules |= cell_fmodules
except Exception:
pass
# add current cell to the script
if current_cell:
script_code += '\n' + current_cell
fmodules = ScriptRequirements.add_trains_used_packages(fmodules)
# noinspection PyUnboundLocalVariable
installed_pkgs = get_installed_pkgs_detail()
# make sure we are in installed packages
if our_module and (our_module[0] not in installed_pkgs):
installed_pkgs[our_module[0]] = our_module
# noinspection PyUnboundLocalVariable
reqs = ReqsModules()
for name in fmodules:
if name in installed_pkgs:
pkg_name, version = installed_pkgs[name]
reqs.add(pkg_name, version, fmodules[name])
requirements_txt, conda_requirements = ScriptRequirements.create_requirements_txt(reqs)
# update script
prev_script_hash = current_script_hash
data_script = task.data.script
data_script.diff = script_code
data_script.requirements = {'pip': requirements_txt, 'conda': conda_requirements}
# noinspection PyProtectedMember
task._update_script(script=data_script)
# update requirements
# noinspection PyProtectedMember
task._update_requirements(requirements=requirements_txt)
except Exception:
pass
class ScriptInfo(object):
max_diff_size_bytes = 500000
plugins = [GitEnvDetector(), HgEnvDetector(), HgDetector(), GitDetector()]
""" Script info detection plugins, in order of priority """
@classmethod
def _jupyter_install_post_store_hook(cls, jupyter_notebook_filename, log_history=False):
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython():
_JupyterObserver.observer(jupyter_notebook_filename, log_history)
get_ipython().events.register('pre_run_cell', _JupyterObserver.signal_sync)
if log_history:
get_ipython().events.register('post_run_cell', _JupyterObserver.signal_sync)
except Exception:
pass
@classmethod
def _get_jupyter_notebook_filename(cls):
if not (sys.argv[0].endswith(os.path.sep + 'ipykernel_launcher.py') or
sys.argv[0].endswith(os.path.join(os.path.sep, 'ipykernel', '__main__.py'))) \
or len(sys.argv) < 3 or not sys.argv[2].endswith('.json'):
return None
# we can safely assume that we can import the notebook package here
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from notebook.notebookapp import list_running_servers
import requests
current_kernel = sys.argv[2].split(os.path.sep)[-1].replace('kernel-', '').replace('.json', '')
# noinspection PyBroadException
try:
server_info = next(list_running_servers())
except Exception:
# on some jupyter notebook versions this function can crash on parsing the json file,
# we will parse it manually here
# noinspection PyPackageRequirements
import ipykernel
from glob import glob
import json
for f in glob(os.path.join(os.path.dirname(ipykernel.get_connection_file()), 'nbserver-*.json')):
# noinspection PyBroadException
try:
with open(f, 'r') as json_data:
server_info = json.load(json_data)
except Exception:
server_info = None
if server_info:
break
cookies = None
password = None
if server_info and server_info.get('password'):
# we need to get the password
from ....config import config
password = config.get('development.jupyter_server_password', '')
if not password:
_logger.warning(
'Password protected Jupyter Notebook server was found! '
'Add `sdk.development.jupyter_server_password=<jupyter_password>` to ~/clearml.conf')
return os.path.join(os.getcwd(), 'error_notebook_not_found.py')
r = requests.get(url=server_info['url'] + 'login')
cookies = {'_xsrf': r.cookies.get('_xsrf', '')}
r = requests.post(server_info['url'] + 'login?next', cookies=cookies,
data={'_xsrf': cookies['_xsrf'], 'password': password})
cookies.update(r.cookies)
try:
r = requests.get(
url=server_info['url'] + 'api/sessions', cookies=cookies,
headers={'Authorization': 'token {}'.format(server_info.get('token', '')), })
except requests.exceptions.SSLError:
# disable SSL check warning
from urllib3.exceptions import InsecureRequestWarning
# noinspection PyUnresolvedReferences
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
# fire request
r = requests.get(
url=server_info['url'] + 'api/sessions', cookies=cookies,
headers={'Authorization': 'token {}'.format(server_info.get('token', '')), }, verify=False)
# enable SSL check warning
import warnings
warnings.simplefilter('default', InsecureRequestWarning)
# send request to the jupyter server
try:
r.raise_for_status()
except Exception as ex:
_logger.warning('Failed accessing the jupyter server{}: {}'.format(
' [password={}]'.format(password) if server_info.get('password') else '', ex))
return os.path.join(os.getcwd(), 'error_notebook_not_found.py')
notebooks = r.json()
cur_notebook = None
for n in notebooks:
if n['kernel']['id'] == current_kernel:
cur_notebook = n
break
notebook_path = cur_notebook['notebook'].get('path', '')
notebook_name = cur_notebook['notebook'].get('name', '')
is_google_colab = False
# check if this is google.colab, then there is no local file
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython() and 'google.colab' in get_ipython().extension_manager.loaded:
is_google_colab = True
except Exception:
pass
if is_google_colab:
script_entry_point = str(notebook_name or 'notebook').replace(
'>', '_').replace('<', '_').replace('.ipynb', '.py')
if not script_entry_point.lower().endswith('.py'):
script_entry_point += '.py'
local_ipynb_file = None
else:
# always slash, because this is from uri (so never backslash not even oon windows)
entry_point_filename = notebook_path.split('/')[-1]
# now we should try to find the actual file
entry_point = (Path.cwd() / entry_point_filename).absolute()
if not entry_point.is_file():
entry_point = (Path.cwd() / notebook_path).absolute()
# get local ipynb for observer
local_ipynb_file = entry_point.as_posix()
# now replace the .ipynb with .py
# we assume we will have that file available with the Jupyter notebook plugin
entry_point = entry_point.with_suffix('.py')
script_entry_point = entry_point.as_posix()
# install the post store hook,
# notice that if we do not have a local file we serialize/write every time the entire notebook
cls._jupyter_install_post_store_hook(local_ipynb_file, is_google_colab)
return script_entry_point
except Exception:
return None
@classmethod
def _get_entry_point(cls, repo_root, script_path):
repo_root = Path(repo_root).absolute()
try:
# Use os.path.relpath as it calculates up dir movements (../)
entry_point = os.path.relpath(
str(script_path), str(cls._get_working_dir(repo_root, return_abs=True)))
except ValueError:
# Working directory not under repository root
entry_point = script_path.relative_to(repo_root)
return Path(entry_point).as_posix()
@classmethod
def _cwd(cls):
# return the current working directory (solve for hydra changing it)
# check if running with hydra
if sys.modules.get('hydra'):
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
import hydra
return Path(hydra.utils.get_original_cwd()).absolute()
except Exception:
pass
return Path.cwd().absolute()
@classmethod
def _get_working_dir(cls, repo_root, return_abs=False):
# get the repository working directory (might be different from actual cwd)
repo_root = Path(repo_root).absolute()
cwd = cls._cwd()
try:
# do not change: test if we are under the repo root folder, it will throw an exception if we are not
relative = cwd.relative_to(repo_root).as_posix()
return cwd.as_posix() if return_abs else relative
except ValueError:
# Working directory not under repository root, default to repo root
return repo_root.as_posix() if return_abs else '.'
@classmethod
def _absolute_path(cls, file_path, cwd):
# return the absolute path, relative to a specific working directory (cwd)
file_path = Path(file_path)
if file_path.is_absolute():
return file_path.as_posix()
# Convert to absolute and squash 'path/../folder'
return os.path.abspath((Path(cwd).absolute() / file_path).as_posix())
@classmethod
def _get_script_code(cls, script_path):
# noinspection PyBroadException
try:
with open(script_path, 'r') as f:
script_code = f.read()
return script_code
except Exception:
pass
return ''
@classmethod
def _get_script_info(cls, filepaths, check_uncommitted=True, create_requirements=True, log=None,
uncommitted_from_remote=False):
jupyter_filepath = cls._get_jupyter_notebook_filename()
if jupyter_filepath:
scripts_path = [Path(os.path.normpath(jupyter_filepath)).absolute()]
else:
cwd = cls._cwd()
scripts_path = [Path(cls._absolute_path(os.path.normpath(f), cwd)) for f in filepaths if f]
if all(not f.is_file() for f in scripts_path):
raise ScriptInfoError(
"Script file {} could not be found".format(scripts_path)
)
scripts_dir = [f.parent for f in scripts_path]
def _log(msg, *args, **kwargs):
if not log:
return
log.warning(
"Failed auto-detecting task repository: {}".format(
msg.format(*args, **kwargs)
)
)
plugin = next((p for p in cls.plugins if any(p.exists(d) for d in scripts_dir)), None)
repo_info = DetectionResult()
script_dir = scripts_dir[0]
script_path = scripts_path[0]
messages = []
auxiliary_git_diff = None
if not plugin:
if log:
log.info("No repository found, storing script code instead")
else:
try:
for i, d in enumerate(scripts_dir):
repo_info = plugin.get_info(
str(d), include_diff=check_uncommitted, diff_from_remote=uncommitted_from_remote)
if not repo_info.is_empty():
script_dir = d
script_path = scripts_path[i]
break
except Exception as ex:
_log("no info for {} ({})", scripts_dir, ex)
else:
if repo_info.is_empty():
_log("no info for {}", scripts_dir)
repo_root = repo_info.root or script_dir
if not plugin:
working_dir = '.'
entry_point = str(script_path.name)
else:
working_dir = cls._get_working_dir(repo_root)
entry_point = cls._get_entry_point(repo_root, script_path)
if check_uncommitted:
diff = cls._get_script_code(script_path.as_posix()) \
if not plugin or not repo_info.commit else repo_info.diff
# make sure diff is not too big:
if len(diff) > cls.max_diff_size_bytes:
messages.append(
"======> WARNING! Git diff to large to store "
"({}kb), skipping uncommitted changes <======".format(len(diff)//1024))
auxiliary_git_diff = diff
diff = '# WARNING! git diff too large to store, clear this section to execute without it.\n' \
'# full git diff available in Artifacts/auxiliary_git_diff\n' \
'# Clear the section before enqueueing Task!\n'
else:
diff = ''
# if this is not jupyter, get the requirements.txt
requirements = ''
conda_requirements = ''
# create requirements if backend supports requirements
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if not jupyter_filepath and Session.check_min_api_version('2.2'):
script_requirements = ScriptRequirements(
Path(repo_root).as_posix() if repo_info.url else script_path.as_posix())
if create_requirements:
requirements, conda_requirements = script_requirements.get_requirements()
else:
script_requirements = None
script_info = dict(
repository=remove_user_pass_from_url(repo_info.url),
branch=repo_info.branch,
version_num=repo_info.commit,
entry_point=entry_point,
working_dir=working_dir,
diff=diff,
requirements={'pip': requirements, 'conda': conda_requirements} if requirements else None,
binary='python{}.{}'.format(sys.version_info.major, sys.version_info.minor),
repo_root=repo_root,
jupyter_filepath=jupyter_filepath,
)
# if repo_info.modified:
# messages.append(
# "======> WARNING! UNCOMMITTED CHANGES IN REPOSITORY {} <======".format(
# script_info.get("repository", "")
# )
# )
if not any(script_info.values()):
script_info = None
return (ScriptInfoResult(script=script_info, warning_messages=messages, auxiliary_git_diff=auxiliary_git_diff),
script_requirements)
@classmethod
def get(cls, filepaths=None, check_uncommitted=True, create_requirements=True, log=None,
uncommitted_from_remote=False):
try:
if not filepaths:
filepaths = [sys.argv[0], ]
return cls._get_script_info(
filepaths=filepaths, check_uncommitted=check_uncommitted,
create_requirements=create_requirements, log=log, uncommitted_from_remote=uncommitted_from_remote)
except Exception as ex:
if log:
log.warning("Failed auto-detecting task repository: {}".format(ex))
return ScriptInfoResult(), None
@classmethod
def is_running_from_module(cls):
# noinspection PyBroadException
try:
return '__main__' in sys.modules and vars(sys.modules['__main__'])['__package__']
except Exception:
return False
@classmethod
def detect_running_module(cls, script_dict):
# noinspection PyBroadException
try:
# If this is jupyter, do not try to detect the running module, we know what we have.
if script_dict.get('jupyter_filepath'):
return script_dict
if cls.is_running_from_module():
argvs = ''
git_root = os.path.abspath(script_dict['repo_root']) if script_dict['repo_root'] else None
for a in sys.argv[1:]:
if git_root and os.path.exists(a):
# check if common to project:
a_abs = os.path.abspath(a)
if os.path.commonpath([a_abs, git_root]) == git_root:
# adjust path relative to working dir inside git repo
a = ' ' + os.path.relpath(a_abs, os.path.join(git_root, script_dict['working_dir']))
argvs += ' {}'.format(a)
# update the script entry point to match the real argv and module call
script_dict['entry_point'] = '-m {}{}'.format(
vars(sys.modules['__main__'])['__package__'], (' ' + argvs) if argvs else '')
except Exception:
pass
return script_dict
@classmethod
def close(cls):
_JupyterObserver.close()
@attr.s
class ScriptInfoResult(object):
script = attr.ib(default=None)
warning_messages = attr.ib(factory=list)
auxiliary_git_diff = attr.ib(default=None)
class _JupyterHistoryLogger(object):
_reg_replace_ipython = r'\n([ \t]*)get_ipython\(\)'
_reg_replace_magic = r'\n([ \t]*)%'
_reg_replace_bang = r'\n([ \t]*)!'
def __init__(self):
self._exception_raised = False
self._cells_code = {}
self._counter = 0
self._ip = None
self._current_cell = None
# noinspection PyBroadException
try:
import re
self._replace_ipython_pattern = re.compile(self._reg_replace_ipython)
self._replace_magic_pattern = re.compile(self._reg_replace_magic)
self._replace_bang_pattern = re.compile(self._reg_replace_bang)
except Exception:
self._replace_ipython_pattern = None
self._replace_magic_pattern = None
self._replace_bang_pattern = None
def hook(self, ip=None):
if not ip:
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
except Exception:
return
self._ip = get_ipython()
else:
self._ip = ip
# noinspection PyBroadException
try:
# if this is colab, the callbacks do not contain the raw_cell content, so we have to patch it
if 'google.colab' in self._ip.extension_manager.loaded:
self._ip._org_run_cell = self._ip.run_cell
self._ip.run_cell = partial(self._patched_run_cell, self._ip)
except Exception:
pass
# start with the current history
self._initialize_history()
self._ip.events.register('post_run_cell', self._post_cell_callback)
self._ip.events.register('pre_run_cell', self._pre_cell_callback)
self._ip.set_custom_exc((Exception,), self._exception_callback)
def _patched_run_cell(self, shell, *args, **kwargs):
# noinspection PyBroadException
try:
raw_cell = kwargs.get('raw_cell') or args[0]
self._current_cell = raw_cell
except Exception:
pass
# noinspection PyProtectedMember
return shell._org_run_cell(*args, **kwargs)
def history(self, filename):
with open(filename, 'wt') as f:
for k, v in sorted(self._cells_code.items(), key=lambda p: p[0]):
f.write(v)
def history_to_str(self):
# return a pair: (history as str, current cell if we are in still in cell execution otherwise None)
return '\n'.join(v for k, v in sorted(self._cells_code.items(), key=lambda p: p[0])), self._current_cell
# noinspection PyUnusedLocal
def _exception_callback(self, shell, etype, value, tb, tb_offset=None):
self._exception_raised = True
return shell.showtraceback()
def _pre_cell_callback(self, *args, **_):
# noinspection PyBroadException
try:
if args:
self._current_cell = args[0].raw_cell
# we might have this value from somewhere else
if self._current_cell:
self._current_cell = self._conform_code(self._current_cell, replace_magic_bang=True)
except Exception:
pass
def _post_cell_callback(self, *_, **__):
# noinspection PyBroadException
try:
self._current_cell = None
if self._exception_raised:
# do nothing
self._exception_raised = False
return
self._exception_raised = False
# add the cell history
# noinspection PyBroadException
try:
cell_code = '\n' + self._ip.history_manager.input_hist_parsed[-1]
except Exception:
return
# fix magic / bang in code
cell_code = self._conform_code(cell_code)
self._cells_code[self._counter] = cell_code
self._counter += 1
except Exception:
pass
def _initialize_history(self):
# only once
if -1 in self._cells_code:
return
# noinspection PyBroadException
try:
cell_code = '\n' + '\n'.join(self._ip.history_manager.input_hist_parsed[:-1])
except Exception:
return
cell_code = self._conform_code(cell_code)
self._cells_code[-1] = cell_code
def _conform_code(self, cell_code, replace_magic_bang=False):
# fix magic / bang in code
if self._replace_ipython_pattern:
cell_code = self._replace_ipython_pattern.sub(r'\n# \g<1>get_ipython()', cell_code)
if replace_magic_bang and self._replace_magic_pattern and self._replace_bang_pattern:
cell_code = self._replace_magic_pattern.sub(r'\n# \g<1>%', cell_code)
cell_code = self._replace_bang_pattern.sub(r'\n# \g<1>!', cell_code)
return cell_code
|
[] |
[] |
[
"CONDA_PREFIX"
] |
[]
|
["CONDA_PREFIX"]
|
python
| 1 | 0 | |
providers/datadog/datadog_provider.go
|
// Copyright 2018 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datadog
import (
"context"
"errors"
"fmt"
"net/url"
"os"
datadogV1 "github.com/DataDog/datadog-api-client-go/api/v1/datadog"
"github.com/GoogleCloudPlatform/terraformer/terraformutils"
"github.com/zclconf/go-cty/cty"
)
type DatadogProvider struct { //nolint
terraformutils.Provider
apiKey string
appKey string
apiURL string
authV1 context.Context
datadogClientV1 *datadogV1.APIClient
}
// Init check env params and initialize API Client
func (p *DatadogProvider) Init(args []string) error {
if args[0] != "" {
p.apiKey = args[0]
} else {
if apiKey := os.Getenv("DATADOG_API_KEY"); apiKey != "" {
p.apiKey = apiKey
} else {
return errors.New("api-key requirement")
}
}
if args[1] != "" {
p.appKey = args[1]
} else {
if appKey := os.Getenv("DATADOG_APP_KEY"); appKey != "" {
p.appKey = appKey
} else {
return errors.New("app-key requirement")
}
}
if args[2] != "" {
p.apiURL = args[2]
} else if v := os.Getenv("DATADOG_HOST"); v != "" {
p.apiURL = v
}
// Initialize the Datadog API client
authV1 := context.WithValue(
context.Background(),
datadogV1.ContextAPIKeys,
map[string]datadogV1.APIKey{
"apiKeyAuth": {
Key: p.apiKey,
},
"appKeyAuth": {
Key: p.appKey,
},
},
)
if p.apiURL != "" {
parsedAPIURL, parseErr := url.Parse(p.apiURL)
if parseErr != nil {
return fmt.Errorf(`invalid API Url : %v`, parseErr)
}
if parsedAPIURL.Host == "" || parsedAPIURL.Scheme == "" {
return fmt.Errorf(`missing protocol or host : %v`, p.apiURL)
}
// If api url is passed, set and use the api name and protocol on ServerIndex{1}
authV1 = context.WithValue(authV1, datadogV1.ContextServerIndex, 1)
authV1 = context.WithValue(authV1, datadogV1.ContextServerVariables, map[string]string{
"name": parsedAPIURL.Host,
"protocol": parsedAPIURL.Scheme,
})
}
p.authV1 = authV1
configV1 := datadogV1.NewConfiguration()
datadogClientV1 := datadogV1.NewAPIClient(configV1)
p.datadogClientV1 = datadogClientV1
return nil
}
// GetName return string of provider name for Datadog
func (p *DatadogProvider) GetName() string {
return "datadog"
}
// GetConfig return map of provider config for Datadog
func (p *DatadogProvider) GetConfig() cty.Value {
return cty.ObjectVal(map[string]cty.Value{
"api_key": cty.StringVal(p.apiKey),
"app_key": cty.StringVal(p.appKey),
"api_url": cty.StringVal(p.apiURL),
})
}
// InitService ...
func (p *DatadogProvider) InitService(serviceName string, verbose bool) error {
var isSupported bool
if _, isSupported = p.GetSupportedService()[serviceName]; !isSupported {
return errors.New(p.GetName() + ": " + serviceName + " not supported service")
}
p.Service = p.GetSupportedService()[serviceName]
p.Service.SetName(serviceName)
p.Service.SetVerbose(verbose)
p.Service.SetProviderName(p.GetName())
p.Service.SetArgs(map[string]interface{}{
"api-key": p.apiKey,
"app-key": p.appKey,
"api-url": p.apiURL,
"authV1": p.authV1,
"datadogClientV1": p.datadogClientV1,
})
return nil
}
// GetSupportedService return map of support service for Datadog
func (p *DatadogProvider) GetSupportedService() map[string]terraformutils.ServiceGenerator {
return map[string]terraformutils.ServiceGenerator{
"dashboard": &DashboardGenerator{},
"downtime": &DowntimeGenerator{},
"monitor": &MonitorGenerator{},
"screenboard": &ScreenboardGenerator{},
"synthetics": &SyntheticsGenerator{},
"timeboard": &TimeboardGenerator{},
"user": &UserGenerator{},
}
}
// GetResourceConnections return map of resource connections for Datadog
func (DatadogProvider) GetResourceConnections() map[string]map[string][]string {
return map[string]map[string][]string{}
}
// GetProviderData return map of provider data for Datadog
func (p DatadogProvider) GetProviderData(arg ...string) map[string]interface{} {
return map[string]interface{}{}
}
|
[
"\"DATADOG_API_KEY\"",
"\"DATADOG_APP_KEY\"",
"\"DATADOG_HOST\""
] |
[] |
[
"DATADOG_HOST",
"DATADOG_APP_KEY",
"DATADOG_API_KEY"
] |
[]
|
["DATADOG_HOST", "DATADOG_APP_KEY", "DATADOG_API_KEY"]
|
go
| 3 | 0 | |
cmd/domasimu/main.go
|
// Copyright © 2014 Jay R. Wren <[email protected]>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (
"context"
"flag"
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"github.com/BurntSushi/toml"
"github.com/dnsimple/dnsimple-go/dnsimple"
"golang.org/x/oauth2"
)
var verbose = flag.Bool("v", false, "Use verbose output")
var list = flag.Bool("l", false, "List domains.")
var update = flag.String("u", "", "Update or create record. The format is 'domain name type oldvalue newvlaue ttl'. Use - for oldvalue to create a new record.")
var del = flag.String("d", "", "Delete record. The format is 'domain name type value'")
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
flag.PrintDefaults()
fmt.Fprintln(os.Stderr, "domasimu config file example:")
toml.NewEncoder(os.Stderr).Encode(Config{"[email protected]", "TOKENHERE1234"})
}
flag.Parse()
_, token, err := getCreds()
if err != nil {
fmt.Fprintln(os.Stderr, "could not read config", err)
return
}
ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
tc := oauth2.NewClient(context.Background(), ts)
client := dnsimple.NewClient(tc)
whoamiResponse, err := client.Identity.Whoami()
if err != nil {
fmt.Fprintln(os.Stderr, "could not connect to dnsimple", err)
return
}
if whoamiResponse.Data.Account == nil {
fmt.Fprintln(os.Stderr, "you need to use account token instead of user token")
return
}
accountID := strconv.FormatInt(whoamiResponse.Data.Account.ID, 10)
if *list {
domainsResponse, err := client.Domains.ListDomains(accountID, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "could get domains %v\n", err)
return
}
for _, domain := range domainsResponse.Data {
if *verbose {
fmt.Println(domain.Name, domain.ExpiresOn)
} else {
fmt.Println(domain.Name)
}
}
return
}
if *update != "" {
id, err := createOrUpdate(client, *update, accountID)
if err != nil {
fmt.Fprintln(os.Stderr, "could not get create or update:", err)
} else {
fmt.Printf("record written with id %s\n", id)
}
return
}
if *del != "" {
id, err := deleteRecord(client, *del, accountID)
if err != nil {
fmt.Fprintln(os.Stderr, "could not delete:", err)
} else {
fmt.Printf("record deleted with id %s\n", id)
}
return
}
for _, domain := range flag.Args() {
listZoneRecordsResponse, err := client.Zones.ListRecords(accountID, domain, nil)
if err != nil {
fmt.Fprintln(os.Stderr, "could not get records:", err)
continue
}
for _, record := range listZoneRecordsResponse.Data {
if *verbose {
fmt.Println(record.Name, record.Type, record.Content, record.TTL, record.Priority)
} else {
fmt.Println(record.Name, record.Type, record.Content)
}
}
}
}
var configFileName = func() string {
if os.Getenv("DOMASIMU_CONF") != "" {
return os.Getenv("DOMASIMU_CONF")
}
switch runtime.GOOS {
case "windows":
return filepath.Join(os.Getenv("LOCALAPPDATA"), "Domasimu", "config")
case "darwin":
return filepath.Join(os.Getenv("HOME"), "Library", "Application Support", "Domasimu", "config")
default:
if os.Getenv("XDG_CONFIG_HOME") != "" {
return filepath.Join(os.Getenv("XDG_CONFIG_HOME"), "domasimu", "config")
} else {
return filepath.Join(os.Getenv("HOME"), ".config", "domasimu", "config")
}
}
}()
func getCreds() (string, string, error) {
var config Config
_, err := toml.DecodeFile(configFileName, &config)
if err != nil {
return "", "", err
}
return config.User, config.Token, nil
}
type Config struct {
User string
Token string
}
func createOrUpdate(client *dnsimple.Client, message string, accountID string) (string, error) {
pieces := strings.Split(message, " ")
if len(pieces) != 6 {
return "", fmt.Errorf("expected space seperated domain, name, type, oldvalue, newvalue, ttl")
}
domain := pieces[0]
changeRecord := dnsimple.ZoneRecord{
Name: pieces[1],
Type: pieces[2],
}
oldValue := pieces[3]
newRecord := changeRecord
newRecord.Content = pieces[4]
ttl, _ := strconv.Atoi(pieces[5])
newRecord.TTL = ttl
id, err := getRecordIDByValue(client, domain, oldValue, accountID, &changeRecord)
if err != nil {
return "", err
}
var respID string
if id == 0 {
zoneRecordResponse, err := client.Zones.CreateRecord(accountID, domain, newRecord)
respID = strconv.FormatInt(zoneRecordResponse.Data.ID, 10)
if err != nil {
return "", err
}
} else {
zoneRecordResponse, err := client.Zones.UpdateRecord(accountID, domain, id, newRecord)
respID = strconv.FormatInt(zoneRecordResponse.Data.ID, 10)
if err != nil {
return "", err
}
}
return respID, nil
}
func deleteRecord(client *dnsimple.Client, message, accountID string) (string, error) {
pieces := strings.Split(message, " ")
if len(pieces) != 4 {
return "", fmt.Errorf("expected space seperated domain, name, type, value")
}
domain := pieces[0]
changeRecord := dnsimple.ZoneRecord{
Name: pieces[1],
Type: pieces[2],
}
value := pieces[3]
id, err := getRecordIDByValue(client, domain, value, accountID, &changeRecord)
if err != nil {
return "", err
}
if id == 0 {
return "", fmt.Errorf("could not find record")
}
_, err = client.Zones.DeleteRecord(accountID, domain, id)
respID := strconv.FormatInt(id, 10)
return respID, err
}
func getRecordIDByValue(client *dnsimple.Client, domain, value, accountID string, changeRecord *dnsimple.ZoneRecord) (int64, error) {
recordResponse, err := client.Zones.ListRecords(accountID, domain, nil)
if err != nil {
return 0, err
}
var id int64
for _, record := range recordResponse.Data {
if record.Name == changeRecord.Name && record.Type == changeRecord.Type && record.Content == value {
id = record.ID
break
}
}
return id, nil
}
|
[
"\"DOMASIMU_CONF\"",
"\"DOMASIMU_CONF\"",
"\"LOCALAPPDATA\"",
"\"HOME\"",
"\"XDG_CONFIG_HOME\"",
"\"XDG_CONFIG_HOME\"",
"\"HOME\""
] |
[] |
[
"DOMASIMU_CONF",
"XDG_CONFIG_HOME",
"HOME",
"LOCALAPPDATA"
] |
[]
|
["DOMASIMU_CONF", "XDG_CONFIG_HOME", "HOME", "LOCALAPPDATA"]
|
go
| 4 | 0 | |
go/src/github.com/joyent/gosdc/cloudapi/cloudapi_test.go
|
//
// gosdc - Go library to interact with the Joyent CloudAPI
//
// Copyright (c) Joyent Inc.
//
package cloudapi_test
import (
"flag"
gc "launchpad.net/gocheck"
"testing"
"github.com/joyent/gocommon/jpc"
"os"
"strconv"
)
const (
testKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDdArXEuyqVPwJ7uT/QLFYrGLposHGKRP4U1YPuXFFYQMa2Mq9cke6c6YYoHpNU3mVjatHp+sicfQHcO9nPMaWXoIn53kWdldvo0brsqGXXaHcQCjCaSooJiMgG4jDWUmnfySOQA0sEAXcktqmePpLsDlih05mORiueAR1Mglrc6TiVvjd8ZTPhZejMzETVusMweIilE+K7cNjQVxwHId5WVjTRAqRCvZXAIcP2+fzDXTmuKWhSdln19bKz5AEp1jU/eg4D4PuQvwynb9A8Ra2SJnOZ2+9cfDVhrbpzVMty4qQU6WblJNjpLnLpkm8w0isYk2Vr13a+1/N941gFcZaZ [email protected]"
testKeyFingerprint = "6b:06:0c:6b:0b:44:67:97:2c:4f:87:28:28:f3:c6:a9"
packageID = "d6ca9994-53e7-4adf-a818-aadd3c90a916"
localPackageID = "11223344-1212-abab-3434-aabbccddeeff"
packageName = "g3-standard-1-smartos"
localPackageName = "Small"
imageID = "f669428c-a939-11e2-a485-b790efc0f0c1"
localImageID = "12345678-a1a1-b2b2-c3c3-098765432100"
testFwRule = "FROM subnet 10.35.76.0/24 TO subnet 10.35.101.0/24 ALLOW tcp (PORT 80 AND PORT 443)"
testUpdatedFwRule = "FROM subnet 10.35.76.0/24 TO subnet 10.35.101.0/24 ALLOW tcp (port 80 AND port 443 AND port 8080)"
networkID = "42325ea0-eb62-44c1-8eb6-0af3e2f83abc"
localNetworkID = "123abc4d-0011-aabb-2233-ccdd4455"
)
var live = flag.Bool("live", false, "Include live Joyent Cloud tests")
var keyName = flag.String("key.name", "", "Specify the full path to the private key, defaults to ~/.ssh/id_rsa")
func Test(t *testing.T) {
// check environment variables
if os.Getenv("LIVE") != "" {
var err error
*live, err = strconv.ParseBool(os.Getenv("LIVE"))
if err != nil {
t.Fatal(err)
}
}
if os.Getenv("KEY_NAME") != "" {
*keyName = os.Getenv("KEY_NAME")
}
if *live {
creds, err := jpc.CompleteCredentialsFromEnv(*keyName)
if err != nil {
t.Fatalf("Error setting up test suite: %s", err.Error())
}
registerJoyentCloudTests(creds)
}
registerLocalTests(*keyName)
gc.TestingT(t)
}
|
[
"\"LIVE\"",
"\"LIVE\"",
"\"KEY_NAME\"",
"\"KEY_NAME\""
] |
[] |
[
"KEY_NAME",
"LIVE"
] |
[]
|
["KEY_NAME", "LIVE"]
|
go
| 2 | 0 | |
services/api-gateway/main.go
|
package main
import (
"context"
"fmt"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/hzhyvinskyi/adventure/services/api-gateway/app"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"log"
"net/http"
"os"
"time"
)
func main() {
proxyAddr := ":" + os.Getenv("API_PORT")
HTTPProxy(proxyAddr)
}
func HTTPProxy(proxyAddr string) {
grpcGatewayMux := runtime.NewServeMux()
// TODO Connect Services
// REST-side routes setup
mux := http.NewServeMux()
mux.Handle("/api/v1/", grpcGatewayMux)
fmt.Println("HTTP Server is listening on " + proxyAddr)
log.Fatalln(http.ListenAndServe(proxyAddr, mux))
}
func AccessLogInterceptor(
ctx context.Context,
method string,
req interface{},
reply interface{},
cc *grpc.ClientConn,
invoker grpc.UnaryInvoker,
opts ...grpc.CallOption,
) error {
md, _ := metadata.FromOutgoingContext(ctx)
start := time.Now()
var traceId string
if len(md["Authorization"]) > 0 {
tokenString := md["Authorization"][0]
if tokenString != "" {
// TODO Call function from User service
}
}
// Assign Request ID
traceId = string(time.Now().UTC().UnixNano())
mdOut := metadata.Pairs(
"trace-id", traceId,
)
callContext := metadata.NewOutgoingContext(context.Background(), mdOut)
err := invoker(callContext, method, req, reply, cc, opts...)
msg := fmt.Sprintf("Call: %s, traceId: %s, time: %s", method, traceId, time.Since(start))
app.AccessLog(msg)
return err
}
|
[
"\"API_PORT\""
] |
[] |
[
"API_PORT"
] |
[]
|
["API_PORT"]
|
go
| 1 | 0 | |
tests/integration/test_scm.py
|
# vim:ts=4:sw=4:et:
# Copyright 2017-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
import WatchmanInstance
import WatchmanTestCase
import pywatchman
import os
import subprocess
if pywatchman.compat.PYTHON3:
STRING_TYPES = (str, bytes)
else:
STRING_TYPES = (str, unicode)
@WatchmanTestCase.expand_matrix
class TestScm(WatchmanTestCase.WatchmanTestCase):
def requiresPersistentSession(self):
return True
def skipIfNoFSMonitor(self):
''' cause the test to skip if fsmonitor is not available.
We don't call this via unittest.skip because we want
to have the skip message show the context '''
try:
self.hg(['help', '--extension', 'fsmonitor'])
except Exception as e:
self.skipTest('fsmonitor is not available: %s' % str(e))
def checkOSApplicability(self):
if os.name == 'nt':
self.skipTest('The order of events on Windows is funky')
def hg(self, args=None, cwd=None):
env = dict(os.environ)
env['HGPLAIN'] = '1'
env['HGUSER'] = 'John Smith <[email protected]>'
env['NOSCMLOG'] = '1' # disable some instrumentation at FB
env['WATCHMAN_SOCK'] = \
WatchmanInstance.getSharedInstance().getSockPath()
p = subprocess.Popen(
# we force the extension on. This is a soft error for
# mercurial if it is not available, so we also employ
# the skipIfNoFSMonitor() test above to make sure the
# environment is sane.
['hg', '--config', 'extensions.fsmonitor='] + args,
env=env,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise Exception("hg %r failed: %s, %s" % (args, out, err))
return out, err
def test_scmHg(self):
self.skipIfNoFSMonitor()
root = self.mkdtemp()
''' Set up a repo with a DAG like this:
@ changeset: 4:6c38b3c78a62
| bookmark: feature2
| tag: tip
| summary: add m2
|
o changeset: 3:88fea8704cd2
| bookmark: TheMaster
| parent: 1:6b3ecb11785e
| summary: add m1
|
| o changeset: 5:7bc34583612
|/ bookmark: feature3
| summary: remove car
|
| o changeset: 2:2db357583971
|/ bookmark: feature1
| summary: add f1
|
o changeset: 0:b08db10380dd
bookmark: initial
summary: initial
'''
self.hg(['init'], cwd=root)
self.touchRelative(root, 'foo')
self.hg(['book', 'initial'], cwd=root)
self.hg(['addremove'], cwd=root)
self.hg(['commit', '-m', 'initial'], cwd=root)
# Some environments prohibit locally creating "master",
# so we use an alternative similar name.
self.hg(['book', 'TheMaster'], cwd=root)
self.touchRelative(root, 'bar')
self.touchRelative(root, 'car')
self.hg(['addremove'], cwd=root)
self.hg(['commit', '-m', 'add bar and car'], cwd=root)
self.hg(['book', 'feature1'], cwd=root)
self.touchRelative(root, 'f1')
self.hg(['addremove'], cwd=root)
self.hg(['commit', '-m', 'add f1'], cwd=root)
self.hg(['co', 'TheMaster'], cwd=root)
self.hg(['book', 'feature3'], cwd=root)
self.hg(['rm', 'car'], cwd=root)
self.hg(['commit', '-m', 'remove car'], cwd=root)
self.hg(['co', 'TheMaster'], cwd=root)
self.touchRelative(root, 'm1')
self.hg(['addremove'], cwd=root)
self.hg(['commit', '-m', 'add m1'], cwd=root)
self.hg(['book', 'feature2'], cwd=root)
self.touchRelative(root, 'm2')
self.hg(['addremove'], cwd=root)
self.hg(['commit', '-m', 'add m2'], cwd=root)
self.watchmanCommand('watch', root)
res = self.watchmanCommand('query', root, {
'expression': ['not', ['anyof', ['name', '.hg'], ['dirname', '.hg']]],
'fields': ['name']})
self.assertFileListsEqual(
res['files'], ['foo', 'bar', 'car', 'm1', 'm2'])
# Verify behavior with badly formed queries
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand('query', root, {
'expression': ['not', ['anyof', ['name', '.hg'], ['dirname', '.hg']]],
'since': {'scm': {}}})
self.assertIn(
"key 'mergebase-with' is not present in this json object",
str(ctx.exception))
# When the client doesn't know the merge base, we should give
# them the current status and merge base
res = self.watchmanCommand('query', root, {
'expression': ['not', ['anyof', ['name', '.hg'], ['dirname', '.hg']]],
'fields': ['name'],
'since': {
'scm': {
'mergebase-with': 'TheMaster'}}})
self.assertNotEqual(res['clock']['scm']['mergebase'], '')
self.assertEqual(res['clock']['scm']['mergebase-with'], 'TheMaster')
# The only file changed between TheMaster and feature2 is m2
self.assertFileListsEqual(res['files'], ['m2'])
# Let's also set up a subscription for the same query
sub = self.watchmanCommand('subscribe', root, 'scmsub', {
'expression': ['not', ['anyof', ['name', '.hg'], ['dirname', '.hg']]],
'fields': ['name'],
'since': {
'scm': {
'mergebase-with': 'TheMaster'}}})
self.watchmanCommand('flush-subscriptions', root, {'sync_timeout': 1000})
dat = self.getSubFatClocksOnly('scmsub', root=root)
# compare with the query results that we got
self.assertEqual(sub['clock']['scm'], res['clock']['scm'])
self.assertFileListsEqual(res['files'], dat[0]['files'])
mergeBase = res['clock']['scm']['mergebase']
# Ensure that we can see a file that isn't tracked show up
# as a delta in the what we consider to be the common case.
# we're threading the merge-base result from the prior query
# through, so this should just end up looking like a normal
# since query.
self.touchRelative(root, 'w00t')
res = self.watchmanCommand('query', root, {
'expression': ['not', ['anyof', ['name', '.hg'], ['dirname', '.hg']]],
'fields': ['name'],
'since': res['clock']})
self.assertEqual(res['clock']['scm']['mergebase'], mergeBase)
self.assertFileListsEqual(res['files'], ['w00t'])
# and check that subscription results are consistent with it
self.watchmanCommand('flush-subscriptions', root, {'sync_timeout': 1000})
dat = self.getSubFatClocksOnly('scmsub', root=root)
self.assertEqual(dat[0]['clock']['scm'], res['clock']['scm'])
self.assertFileListsEqual(res['files'], dat[0]['files'])
# Going back to the merge base, we should get a regular looking incremental
# list of the files as we would from a since query; we expect to see
# the removal of w00t and m2
os.unlink(os.path.join(root, 'w00t'))
self.watchmanCommand('flush-subscriptions', root, {'sync_timeout': 1000})
dat = self.getSubFatClocksOnly('scmsub', root=root)
self.assertFileListsEqual(['w00t'], dat[0]['files'])
self.hg(['co', '-C', 'TheMaster'], cwd=root)
res = self.watchmanCommand('query', root, {
'expression': ['not', ['anyof', ['name', '.hg'], ['dirname', '.hg']]],
'fields': ['name'],
'since': res['clock']})
self.assertEqual(res['clock']['scm']['mergebase'], mergeBase)
self.assertFileListsEqual(res['files'], ['w00t', 'm2'])
self.watchmanCommand('flush-subscriptions', root, {'sync_timeout': 1000})
dat = self.getSubFatClocksOnly('scmsub', root=root)
self.assertEqual(dat[0]['clock']['scm'], res['clock']['scm'])
# we already observed the w00t update above, so we expect to see just the
# file(s) that changed in the update operation
self.assertFileListsEqual(['m2'], dat[0]['files'])
# Now we're going to move to another branch with a different mergebase.
self.hg(['co', '-C', 'feature1'], cwd=root)
res = self.watchmanCommand('query', root, {
'expression': ['not', ['anyof', ['name', '.hg'], ['dirname', '.hg']]],
'fields': ['name'],
'since': res['clock']})
# We expect to observe the changed merged base
self.assertNotEqual(res['clock']['scm']['mergebase'], mergeBase)
# and only the file that changed since that new mergebase
self.assertFileListsEqual(res['files'], ['f1'])
# check again that subscription results are consistent with it.
self.watchmanCommand('flush-subscriptions', root, {'sync_timeout': 1000})
dat = self.getSubFatClocksOnly('scmsub', root=root)
self.assertEqual(dat[0]['clock']['scm'], res['clock']['scm'])
self.assertFileListsEqual(res['files'], dat[0]['files'])
# and to check whether our dirstate caching code is reasonable,
# run a query that should be able to hit the cache
clock = res['clock']
res = self.watchmanCommand('query', root, {
'expression': ['not', ['anyof', ['name', '.hg'], ['dirname', '.hg']]],
'fields': ['name'],
'since': {
'scm': {
'mergebase-with': 'TheMaster'}}})
self.assertEqual(clock['scm'], res['clock']['scm'])
# Fresh instance queries return the complete set of changes (so there is
# no need to provide information on deleted files). In contrast, SCM
# aware queries must contain the deleted files in the result list. Check
# that the deleted file is part of the result set for feature3.
self.hg(['co', '-C', 'feature3'], cwd=root)
res = self.watchmanCommand('query', root, {
'expression': ['not', ['anyof', ['name', '.hg'], ['dirname', '.hg']]],
'fields': ['name'],
'since': res['clock']})
self.assertFileListsEqual(res['files'], ['f1', 'car'])
res = self.watchmanCommand('query', root, {
'expression': ['not', ['anyof', ['name', '.hg'], ['dirname', '.hg']]],
'fields': ['name'],
'since': {
'scm': {
'mergebase': '',
'mergebase-with': 'TheMaster'}}})
self.assertFileListsEqual(res['files'], ['car'])
def getSubFatClocksOnly(self, subname, root):
dat = self.waitForSub(subname, root=root)
return [
item for item in dat if not isinstance(item['clock'], STRING_TYPES)
]
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
PaddleNLP/legacy/dialogue_domain_classification/run_classifier.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import argparse
import numpy as np
import multiprocessing
import sys
# sys.path.append("../models/classification/")
from nets import textcnn_net_multi_label
import paddle
import paddle.fluid as fluid
from utils import ArgumentGroup, print_arguments, DataProcesser, DataReader, ConfigReader
from utils import init_checkpoint, check_version, logger
import random
import codecs
import logging
import math
np.random.seed(0)
random.seed(0)
parser = argparse.ArgumentParser(__doc__)
DEV_COUNT = 1
model_g = ArgumentGroup(parser, "model", "model configuration and paths.")
model_g.add_arg("init_checkpoint", str, None,
"Init checkpoint to resume training from.")
model_g.add_arg("checkpoints", str, "./checkpoints",
"Path to save checkpoints.")
model_g.add_arg("config_path", str, "./data/input/model.conf", "Model conf.")
model_g.add_arg("build_dict", bool, False, "Build dict.")
train_g = ArgumentGroup(parser, "training", "training options.")
train_g.add_arg("cpu_num", int, 3, "Number of Threads.")
train_g.add_arg("epoch", int, 100, "Number of epoches for training.")
train_g.add_arg("learning_rate", float, 0.1,
"Learning rate used to train with warmup.")
train_g.add_arg("save_steps", int, 1000,
"The steps interval to save checkpoints.")
train_g.add_arg("validation_steps", int, 100,
"The steps interval to evaluate model performance.")
train_g.add_arg("random_seed", int, 7, "random seed")
train_g.add_arg(
"threshold", float, 0.1,
"When the confidence exceeds the threshold, the corresponding label is given."
)
log_g = ArgumentGroup(parser, "logging", "logging related.")
log_g.add_arg("skip_steps", int, 10, "The steps interval to print loss.")
data_g = ArgumentGroup(parser, "data",
"Data paths, vocab paths and data processing options")
data_g.add_arg("data_dir", str, "./data/input/", "Path to training data.")
data_g.add_arg("save_dir", str, "./data/output/", "Path to save.")
data_g.add_arg("max_seq_len", int, 50,
"Tokens' number of the longest seqence allowed.")
data_g.add_arg("batch_size", int, 64,
"The total number of examples in one batch for training.")
run_type_g = ArgumentGroup(parser, "run_type", "running type options.")
run_type_g.add_arg("use_cuda", bool, False, "If set, use GPU for training.")
# run_type_g.add_arg("use_fast_executor", bool, False, "If set, use fast parallel executor (in experiment).")
run_type_g.add_arg("do_train", bool, True,
"Whether to perform evaluation on test data set.")
run_type_g.add_arg("do_eval", bool, True,
"Whether to perform evaluation on test data set.")
run_type_g.add_arg("do_test", bool, True,
"Whether to perform evaluation on test data set.")
args = parser.parse_args()
def get_score(pred_result, label, eval_phase):
"""[get precision recall and f-score]
Arguments:
pred_result {[type]} -- [pred labels]
label {[type]} -- [origin labels]
"""
tp = 0
total = 0
true_cnt = 0
pred_pos_num = 0
pos_num = 0
for i in range(len(pred_result)):
total += 1
pred_labels = []
actual_labels = []
for j in range(1, len(pred_result[0])): # the 0 one is background
if pred_result[i][j] == 1:
pred_labels.append(j)
if label[i][j] == 1:
actual_labels.append(j)
if len(pred_labels) > 0:
pred_pos_num += 1
if len(actual_labels) > 0:
pos_num += 1
if set(actual_labels).issubset(set(pred_labels)):
tp += 1
true_cnt += 1
elif len(pred_labels) == 0 and len(actual_labels) == 0:
true_cnt += 1
try:
precision = tp * 1.0 / pred_pos_num
recall = tp * 1.0 / pos_num
f1 = 2 * precision * recall / (recall + precision)
except Exception as e:
precision = 0
recall = 0
f1 = 0
acc = true_cnt * 1.0 / total
logger.info("tp, pred_pos_num, pos_num, total")
logger.info("%d, %d, %d, %d" % (tp, pred_pos_num, pos_num, total))
logger.info("%s result is : precision is %f, recall is %f, f1_score is %f, acc is %f" % (eval_phase, precision, \
recall, f1, acc))
def train(args, train_exe, build_res, place):
"""[train the net]
Arguments:
args {[type]} -- [description]
train_exe {[type]} -- [description]
compiled_prog{[type]} -- [description]
build_res {[type]} -- [description]
place {[type]} -- [description]
"""
global DEV_COUNT
compiled_prog = build_res["compiled_prog"]
cost = build_res["cost"]
prediction = build_res["prediction"]
pred_label = build_res["pred_label"]
label = build_res["label"]
fetch_list = [cost.name, prediction.name, pred_label.name, label.name]
train_data_loader = build_res["train_data_loader"]
train_prog = build_res["train_prog"]
steps = 0
time_begin = time.time()
test_exe = train_exe
logger.info("Begin training")
for i in range(args.epoch):
try:
for data in train_data_loader():
avg_cost_np, avg_pred_np, pred_label, label = train_exe.run(feed=data, program=compiled_prog, \
fetch_list=fetch_list)
steps += 1
if steps % int(args.skip_steps) == 0:
time_end = time.time()
used_time = time_end - time_begin
get_score(pred_label, label, eval_phase="Train")
logger.info('loss is {}'.format(avg_cost_np))
logger.info("epoch: %d, step: %d, speed: %f steps/s" %
(i, steps, args.skip_steps / used_time))
time_begin = time.time()
if steps % args.save_steps == 0:
save_path = os.path.join(args.checkpoints,
"step_" + str(steps))
fluid.io.save(train_prog, save_path)
logger.info("[save]step %d : save at %s" %
(steps, save_path))
if steps % args.validation_steps == 0:
if args.do_eval:
evaluate(args, test_exe, build_res, "eval")
if args.do_test:
evaluate(args, test_exe, build_res, "test")
except Exception as e:
logger.exception(str(e))
logger.error("Train error : %s" % str(e))
exit(1)
save_path = os.path.join(args.checkpoints, "step_" + str(steps))
fluid.io.save(train_prog, save_path)
logger.info("[save]step %d : save at %s" % (steps, save_path))
def evaluate(args,
test_exe,
build_res,
eval_phase,
save_result=False,
id2intent=None):
"""[evaluate on dev/test dataset]
Arguments:
args {[type]} -- [description]
test_exe {[type]} -- [description]
test_prog {[type]} -- [description]
build_res {[type]} -- [description]
place {[type]} -- [description]
eval_phase {[type]} -- [description]
Keyword Arguments:
threshold {float} -- [description] (default: {0.5})
save_result {bool} -- [description] (default: {False})
id2intent {[type]} -- [description] (default: {None})
"""
place = build_res["test_place"]
threshold = args.threshold
cost = build_res["cost"]
prediction = build_res["prediction"]
pred_label = build_res["pred_label"]
label = build_res["label"]
fetch_list = [cost.name, prediction.name, pred_label.name, label.name]
total_cost, total_acc, pred_prob_list, pred_label_list, label_list = [], [], [], [], []
if eval_phase == "eval":
test_prog = build_res["eval_compiled_prog"]
test_data_loader = build_res["eval_data_loader"]
elif eval_phase == "test":
test_prog = build_res["test_compiled_prog"]
test_data_loader = build_res["test_data_loader"]
else:
exit(1)
logger.info("-----------------------------------------------------------")
for data in test_data_loader():
avg_cost_np, avg_pred_np, pred_label, label= test_exe.run(program=test_prog, fetch_list=fetch_list, feed=data, \
return_numpy=True)
total_cost.append(avg_cost_np)
pred_prob_list.extend(avg_pred_np)
pred_label_list.extend(pred_label)
label_list.extend(label)
if save_result:
logger.info("save result at : %s" % args.save_dir + "/" + eval_phase +
".rst")
save_dir = args.save_dir
if not os.path.exists(save_dir):
logger.warning("save dir not exists, and create it")
os.makedirs(save_dir)
fin = codecs.open(
os.path.join(args.data_dir, eval_phase + ".txt"),
"r",
encoding="utf8")
fout = codecs.open(
args.save_dir + "/" + eval_phase + ".rst", "w", encoding="utf8")
for line in pred_prob_list:
query = fin.readline().rsplit("\t", 1)[0]
res = []
for i in range(1, len(line)):
if line[i] > threshold:
#res.append(id2intent[i]+":"+str(line[i]))
res.append(id2intent[i])
if len(res) == 0:
res.append(id2intent[0])
fout.write("%s\t%s\n" % (query, "\2".join(sorted(res))))
fout.close()
fin.close()
logger.info("[%s] result: " % eval_phase)
get_score(pred_label_list, label_list, eval_phase)
logger.info('loss is {}'.format(sum(total_cost) * 1.0 / len(total_cost)))
logger.info("-----------------------------------------------------------")
def create_net(args,
flow_data,
class_dim,
dict_dim,
place,
model_name="textcnn_net",
is_infer=False):
"""[create network and loader]
Arguments:
flow_data {[type]} -- [description]
class_dim {[type]} -- [description]
dict_dim {[type]} -- [description]
place {[type]} -- [description]
Keyword Arguments:
model_name {str} -- [description] (default: {"textcnn_net"})
is_infer {bool} -- [description] (default: {False})
Returns:
[type] -- [description]
"""
if model_name == "textcnn_net":
model = textcnn_net_multi_label
else:
return
char_list = fluid.data(
name="char",
shape=[None, args.max_seq_len, 1],
dtype="int64",
lod_level=0)
label = fluid.data(
name="label", shape=[None, class_dim], dtype="float32",
lod_level=0) # label data
data_loader = fluid.io.DataLoader.from_generator(
feed_list=[char_list, label],
capacity=args.batch_size * 10,
iterable=True,
return_list=False)
output = model(
char_list,
label,
dict_dim,
emb_dim=flow_data["model"]["emb_dim"],
hid_dim=flow_data["model"]["hid_dim"],
hid_dim2=flow_data["model"]["hid_dim2"],
class_dim=class_dim,
win_sizes=flow_data["model"]["win_sizes"],
is_infer=is_infer,
threshold=args.threshold,
max_seq_len=args.max_seq_len)
if is_infer:
prediction = output
return [data_loader, prediction]
else:
avg_cost, prediction, pred_label, label = output[0], output[1], output[
2], output[3]
return [data_loader, avg_cost, prediction, pred_label, label]
def build_data_loader(args, char_dict, intent_dict):
"""[decorate samples for dataloader]
Arguments:
args {[type]} -- [description]
char_dict {[type]} -- [description]
intent_dict {[type]} -- [description]
Returns:
[type] -- [description]
"""
loader_res = {}
if args.do_train:
train_processor = DataReader(char_dict, intent_dict, args.max_seq_len)
train_data_generator = train_processor.prepare_data(
data_path=args.data_dir + "train.txt",
batch_size=args.batch_size,
mode='train')
loader_res["train_data_generator"] = train_data_generator
num_train_examples = train_processor._get_num_examples()
logger.info("Num train examples: %d" % num_train_examples)
logger.info("Num train steps: %d" % (math.ceil(num_train_examples * 1.0 / args.batch_size) * \
args.epoch // DEV_COUNT))
if math.ceil(num_train_examples * 1.0 /
args.batch_size) // DEV_COUNT <= 0:
logger.error(
"Num of train steps is less than 0 or equals to 0, exit")
exit(1)
if args.do_eval:
eval_processor = DataReader(char_dict, intent_dict, args.max_seq_len)
eval_data_generator = eval_processor.prepare_data(
data_path=args.data_dir + "eval.txt",
batch_size=args.batch_size,
mode='eval')
loader_res["eval_data_generator"] = eval_data_generator
num_eval_examples = eval_processor._get_num_examples()
logger.info("Num eval examples: %d" % num_eval_examples)
if args.do_test:
test_processor = DataReader(char_dict, intent_dict, args.max_seq_len)
test_data_generator = test_processor.prepare_data(
data_path=args.data_dir + "test.txt",
batch_size=args.batch_size,
mode='test')
loader_res["test_data_generator"] = test_data_generator
return loader_res
def build_graph(args, model_config, num_labels, dict_dim, place, test_place,
loader_res):
"""[build paddle graph]
Arguments:
args {[type]} -- [description]
model_config {[type]} -- [description]
num_labels {[type]} -- [description]
dict_dim {[type]} -- [description]
place {[type]} -- [description]
loader_res {[type]} -- [description]
Returns:
[type] -- [description]
"""
res = {}
cost, prediction, pred_label, label = None, None, None, None
train_prog = fluid.default_main_program()
startup_prog = fluid.default_startup_program()
eval_prog = train_prog.clone(for_test=True)
test_prog = train_prog.clone(for_test=True)
train_prog.random_seed = args.random_seed
startup_prog.random_seed = args.random_seed
if args.do_train:
with fluid.program_guard(train_prog, startup_prog):
with fluid.unique_name.guard():
train_data_loader, cost, prediction, pred_label, label = create_net(args, model_config, num_labels, \
dict_dim, place, model_name="textcnn_net")
train_data_loader.set_sample_list_generator(
loader_res['train_data_generator'], places=place)
res["train_data_loader"] = train_data_loader
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay(
learning_rate=args.learning_rate,
decay_steps=1000,
decay_rate=0.5,
staircase=True))
sgd_optimizer.minimize(cost)
if args.do_eval:
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
eval_data_loader, cost, prediction, pred_label, label = create_net(args, model_config, num_labels, \
dict_dim, test_place, model_name="textcnn_net")
eval_data_loader.set_sample_list_generator(
loader_res['eval_data_generator'], places=test_place)
res["eval_data_loader"] = eval_data_loader
if args.do_test:
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
test_data_loader, cost, prediction, pred_label, label = create_net(args, model_config, num_labels, \
dict_dim, test_place, model_name="textcnn_net")
test_data_loader.set_sample_list_generator(
loader_res['test_data_generator'], places=test_place)
res["test_data_loader"] = test_data_loader
res["cost"] = cost
res["prediction"] = prediction
res["label"] = label
res["pred_label"] = pred_label
res["train_prog"] = train_prog
res["eval_prog"] = eval_prog
res["test_prog"] = test_prog
return res
def main(args):
"""
Main Function
"""
global DEV_COUNT
startup_prog = fluid.default_startup_program()
random.seed(args.random_seed)
model_config = ConfigReader.read_conf(args.config_path)
if args.use_cuda:
test_place = fluid.cuda_places(0)
place = fluid.cuda_places()
DEV_COUNT = len(place)
else:
test_place = fluid.cpu_places(1)
os.environ['CPU_NUM'] = str(args.cpu_num)
place = fluid.cpu_places()
DEV_COUNT = args.cpu_num
logger.info("Dev Num is %s" % str(DEV_COUNT))
exe = fluid.Executor(place[0])
if args.do_train and args.build_dict:
DataProcesser.build_dict(args.data_dir + "train.txt", args.data_dir)
# read dict
char_dict = DataProcesser.read_dict(args.data_dir + "char.dict")
dict_dim = len(char_dict)
intent_dict = DataProcesser.read_dict(args.data_dir + "domain.dict")
id2intent = {}
for key, value in intent_dict.items():
id2intent[int(value)] = key
num_labels = len(intent_dict)
# build model
loader_res = build_data_loader(args, char_dict, intent_dict)
build_res = build_graph(args, model_config, num_labels, dict_dim, place,
test_place, loader_res)
build_res["place"] = place
build_res["test_place"] = test_place
if not (args.do_train or args.do_eval or args.do_test):
raise ValueError("For args `do_train`, `do_eval` and `do_test`, at "
"least one of them must be True.")
exe.run(startup_prog)
if args.init_checkpoint and args.init_checkpoint != "None":
try:
init_checkpoint(
exe, args.init_checkpoint, main_program=startup_prog)
logger.info("Load model from %s" % args.init_checkpoint)
except Exception as e:
logger.exception(str(e))
logger.error("Faild load model from %s [%s]" %
(args.init_checkpoint, str(e)))
build_strategy = fluid.compiler.BuildStrategy()
build_strategy.fuse_all_reduce_ops = False
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 1
# add compiled prog
if args.do_train:
compiled_prog = fluid.compiler.CompiledProgram(build_res["train_prog"]).with_data_parallel( \
loss_name=build_res["cost"].name, \
build_strategy=build_strategy, \
exec_strategy=exec_strategy)
build_res["compiled_prog"] = compiled_prog
if args.do_test:
test_compiled_prog = fluid.compiler.CompiledProgram(build_res[
"test_prog"])
build_res["test_compiled_prog"] = test_compiled_prog
if args.do_eval:
eval_compiled_prog = fluid.compiler.CompiledProgram(build_res[
"eval_prog"])
build_res["eval_compiled_prog"] = eval_compiled_prog
if args.do_train:
train(args, exe, build_res, place)
if args.do_eval:
evaluate(args, exe, build_res, "eval", \
save_result=True, id2intent=id2intent)
if args.do_test:
evaluate(args, exe, build_res, "test",\
save_result=True, id2intent=id2intent)
if __name__ == "__main__":
import paddle
paddle.enable_static()
logger.info("the paddle version is %s" % paddle.__version__)
check_version('1.6.0')
print_arguments(args)
main(args)
|
[] |
[] |
[
"CPU_NUM"
] |
[]
|
["CPU_NUM"]
|
python
| 1 | 0 | |
interactive/model_driver/run_setup.py
|
import os
import time
import subprocess
from django.conf import settings
from interactive.tools import *
from mechanism.reactions import reactions_are_valid
from shutil import rmtree
import json
if "MUSIC_BOX_BUILD_DIR" in os.environ:
mb_dir = os.path.join(os.environ['MUSIC_BOX_BUILD_DIR'])
interface_solo = False
else:
print(os.environ)
mb_dir = ''
interface_solo = True
out_path = os.path.join(mb_dir, 'output.csv')
error_path = os.path.join(mb_dir, 'error.json')
copy_path = os.path.join(settings.BASE_DIR, 'dashboard/static/past_run/past.csv')
config_path = os.path.join(settings.BASE_DIR, "dashboard/static/config/my_config.json")
old_path = os.path.join(settings.BASE_DIR, "dashboard/static/config/old_config.json")
complete_path = os.path.join(mb_dir, 'MODEL_RUN_COMPLETE')
config_dest = os.path.join(settings.BASE_DIR, 'dashboard/static/past_run/config.json')
config_folder_path = os.path.join(settings.BASE_DIR, "dashboard/static/config")
camp_folder_path = os.path.join(settings.BASE_DIR, "dashboard/static/config/camp_data")
def copyConfigFile(source, destination):
configFile = open(source, 'rb')
content = configFile.read()
g = open(destination, 'wb')
g.write(content)
g.close()
configFile.close()
reactions_path = os.path.join(settings.BASE_DIR, "dashboard/static/config/camp_data/reactions.json")
species_path = os.path.join(settings.BASE_DIR, "dashboard/static/config/camp_data/species.json")
def add_integrated_rates():
with open(reactions_path) as f:
r_data = json.loads(f.read())
with open(species_path) as h:
s_data = json.loads(h.read())
names_list = []
reactions = r_data['pmc-data'][0]['reactions']
for r in reactions:
if 'reactants' in r:
reactants = [j for j in r['reactants']]
else:
reactants = ['null']
if 'products' in r:
products = [m for m in r['products']]
else:
products = ['null']
name = "myrate__" + '_'.join(reactants) + "->" + '_'.join(products)
if 'type' in r:
name = name + "__" + r['type']
if 'products' not in r:
r.update({'products': {name: {}}})
else:
r['products'].update({name: {}})
names_list.append(name)
for name in names_list:
s_data['pmc-data'].append({"name": name, "type": "CHEM_SPEC"})
r_data['pmc-data'][0].update({'reactions': reactions})
with open(reactions_path, 'w') as g:
json.dump(r_data, g)
with open(species_path, 'w') as i:
json.dump(s_data, i)
def create_file_list():
config = open_json('my_config.json')
filelist = []
configFolderContents = os.listdir(config_folder_path)
for configSection in config:
section = config[configSection]
for configItem in section:
if '.' in configItem:
filelist.append(configItem)
for name in filelist:
if name not in configFolderContents:
filelist.remove(name)
return filelist
def setup_run():
if interface_solo:
return {'model_running': False, 'error_message': 'Model not connected to interface.'}
if not reactions_are_valid():
return {'model_running': False, 'error_message': 'At least one reaction must be present for the model to run.'}
if os.path.isfile(complete_path):
os.remove(complete_path)
if os.path.isfile(out_path):
os.remove(out_path)
if os.path.isfile(error_path):
os.remove(error_path)
with open(reactions_path) as h:
reactions_data = json.load(h)
with open(species_path) as j:
species_data = json.load(j)
add_integrated_rates()
config = open_json('my_config.json')
newpath = os.path.join(mb_dir, 'mb_configuration')
if os.path.exists(newpath):
rmtree(newpath)
os.mkdir(newpath)
else:
os.mkdir(newpath)
filelist = create_file_list()
filelist.append('my_config.json')
print(filelist)
for f in filelist:
copyConfigFile(os.path.join(config_folder_path, f), os.path.join(newpath, f))
camp_path = os.path.join(mb_dir, 'camp_data')
if os.path.exists(camp_path):
rmtree(camp_path)
os.mkdir(camp_path)
for f in os.listdir(camp_folder_path):
copyConfigFile(os.path.join(camp_folder_path, f), os.path.join(camp_path, f))
time.sleep(0.1)
filelist.remove('my_config.json')
for f in filelist:
copyConfigFile(os.path.join('/build/mb_configuration', f), os.path.join('/build', f))
process = subprocess.Popen([r'./music_box', r'./mb_configuration/my_config.json'], cwd=mb_dir)
with open(reactions_path, 'w') as k:
json.dump(reactions_data, k)
with open(species_path, 'w') as l:
json.dump(species_data, l)
return {'model_running': True}
# copy inital config file on first model run
def setup_config_check():
copyConfigFile(config_path, old_path)
|
[] |
[] |
[
"MUSIC_BOX_BUILD_DIR"
] |
[]
|
["MUSIC_BOX_BUILD_DIR"]
|
python
| 1 | 0 | |
cmd/client.go
|
package cmd
import (
"fmt"
"net/http"
"os"
"time"
"github.com/exoscale/egoscale"
exov2 "github.com/exoscale/egoscale/v2"
)
// cliRoundTripper implements the http.RoundTripper interface and allows client
// request customization, such as HTTP headers injection. If provided with a
// non-nil next parameter, it will wrap around it when performing requests.
type cliRoundTripper struct {
next http.RoundTripper
reqHeaders http.Header
}
func newCLIRoundTripper(next http.RoundTripper, headers map[string]string) cliRoundTripper {
roundTripper := cliRoundTripper{
next: http.DefaultTransport,
reqHeaders: http.Header{},
}
if next != nil {
roundTripper.next = next
}
roundTripper.reqHeaders.Add("User-Agent", fmt.Sprintf("Exoscale-CLI/%s (%s) %s",
gVersion, gCommit, egoscale.UserAgent))
for k, v := range headers {
roundTripper.reqHeaders.Add(k, v)
}
return roundTripper
}
func (rt cliRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
for h := range rt.reqHeaders {
r.Header.Add(h, rt.reqHeaders.Get(h))
}
return rt.next.RoundTrip(r)
}
func buildClient() {
if ignoreClientBuild {
return
}
if cs != nil {
return
}
httpClient := &http.Client{Transport: newCLIRoundTripper(http.DefaultTransport, gCurrentAccount.CustomHeaders)}
cs = egoscale.NewClient(
gCurrentAccount.Endpoint,
gCurrentAccount.Key,
gCurrentAccount.APISecret(),
egoscale.WithHTTPClient(httpClient),
egoscale.WithoutV2Client())
// During the Exoscale API V1 -> V2 transition, we need to initialize the
// V2 client independently of the V1 client because of HTTP middleware
// (http.Transport) clashes.
// This can be removed once the only API used is V2.
clientExoV2, err := exov2.NewClient(
gCurrentAccount.Key,
gCurrentAccount.APISecret(),
exov2.ClientOptWithAPIEndpoint(gCurrentAccount.Endpoint),
exov2.ClientOptWithTimeout(5*time.Minute),
exov2.ClientOptWithHTTPClient(func() *http.Client {
return &http.Client{
Transport: newCLIRoundTripper(http.DefaultTransport, gCurrentAccount.CustomHeaders),
}
}()),
exov2.ClientOptCond(func() bool {
if v := os.Getenv("EXOSCALE_TRACE"); v != "" {
return true
}
return false
}, exov2.ClientOptWithTrace()),
)
if err != nil {
panic(fmt.Sprintf("unable to initialize Exoscale API V2 client: %v", err))
}
cs.Client = clientExoV2
csDNS = egoscale.NewClient(gCurrentAccount.DNSEndpoint,
gCurrentAccount.Key,
gCurrentAccount.APISecret())
csRunstatus = egoscale.NewClient(gCurrentAccount.RunstatusEndpoint,
gCurrentAccount.Key,
gCurrentAccount.APISecret())
}
|
[
"\"EXOSCALE_TRACE\""
] |
[] |
[
"EXOSCALE_TRACE"
] |
[]
|
["EXOSCALE_TRACE"]
|
go
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.