filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
pkg/registry/endpoint/etcd/etcd.go | /*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/endpoint"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic"
etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
"github.com/cnaize/kubernetes/pkg/api"
)
// rest implements a RESTStorage for endpoints against etcd
type REST struct {
*etcdgeneric.Etcd
}
// NewStorage returns a RESTStorage object that will work against endpoints.
func NewStorage(h tools.EtcdHelper) *REST {
prefix := "/registry/services/endpoints"
return &REST{
&etcdgeneric.Etcd{
NewFunc: func() runtime.Object { return &api.Endpoints{} },
NewListFunc: func() runtime.Object { return &api.EndpointsList{} },
KeyRootFunc: func(ctx api.Context) string {
return etcdgeneric.NamespaceKeyRootFunc(ctx, prefix)
},
KeyFunc: func(ctx api.Context, name string) (string, error) {
return etcdgeneric.NamespaceKeyFunc(ctx, prefix, name)
},
ObjectNameFunc: func(obj runtime.Object) (string, error) {
return obj.(*api.Endpoints).Name, nil
},
PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher {
return endpoint.MatchEndpoints(label, field)
},
EndpointName: "endpoints",
CreateStrategy: endpoint.Strategy,
UpdateStrategy: endpoint.Strategy,
Helper: h,
},
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
web/datasets/tasks.py | from logging import info
from pathlib import Path
import tika
from web.datasets.services import get_s3_client
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from dotenv import find_dotenv, load_dotenv
from dramatiq import actor, set_broker
from dramatiq.brokers.rabbitmq import RabbitmqBroker
from tika import parser
tika.initVM() # noqa
# Esse bloco (feio) faz com que esse módulo funcione dentro ou fora do Django
try:
from web.datasets.models import File
except ImproperlyConfigured:
import configurations
import os
os.environ.setdefault("DJANGO_CONFIGURATION", "Dev")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "web.settings")
load_dotenv(find_dotenv())
configurations.setup()
from web.datasets.models import File
rabbitmq_broker = RabbitmqBroker(url=settings.BROKER_URL)
set_broker(rabbitmq_broker)
client = get_s3_client(settings)
@actor(max_retries=5)
def content_from_file(file_pk=None, path=None, keep_file=True):
if not any([file_pk, path]):
raise Exception("Ou `file_pk` ou `path` devem ser informados.")
a_file = None
if file_pk:
a_file = File.objects.get(pk=file_pk)
if a_file.content is not None:
return a_file.content
path = client.download_file(a_file.s3_file_path)
keep_file = False
if not Path(path).exists():
info(f"Arquivo {path} não encontrado.")
return
raw = parser.from_file(path)
if not keep_file:
Path(path).unlink()
if a_file:
a_file.content = raw["content"]
a_file.save()
return raw["content"]
@actor(max_retries=5)
def backup_file(file_id):
try:
file_obj = File.objects.get(pk=file_id, s3_url__isnull=True)
except File.DoesNotExist:
info(f"O arquivo ({file_id}) não existe ou já possui backup.")
return
model_name = file_obj.content_object._meta.model_name
relative_file_path = (
f"{model_name}/{file_obj.created_at.year}/"
f"{file_obj.created_at.month}/{file_obj.created_at.day}/"
)
s3_url, s3_file_path = client.upload_file(
file_obj.url, relative_file_path, prefix=file_obj.checksum
)
file_obj.s3_file_path = s3_file_path
file_obj.s3_url = s3_url
file_obj.save()
return s3_url
| []
| []
| []
| [] | [] | python | 0 | 0 | |
backend_project/asgi.py | """
ASGI config for backend_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend_project.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/server/main.go | package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
"strconv"
"github.com/spinnaker/internal"
)
var cliPort = flag.Int("port", 8080, "Ignored if PORT is set as environmental variable")
func main() {
flag.Parse()
http.HandleFunc("/log", internal.LogEvent)
port := 0
envPort := os.Getenv("PORT")
if envPort == "" {
port = *cliPort
} else {
var err error
if port, err = strconv.Atoi(envPort); err != nil {
log.Printf("Cannot parse port from environment variable (value %v): %v", envPort, err)
port = *cliPort
}
}
log.Println("Listening on port", port)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", port), nil))
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
dvc/testing/fixtures.py | import os
import subprocess
import pytest
__all__ = [
"make_tmp_dir",
"tmp_dir",
"scm",
"dvc",
"make_cloud",
"make_local",
"cloud",
"local_cloud",
"make_remote",
"remote",
"local_remote",
"workspace",
"make_workspace",
"local_workspace",
"docker",
"docker_compose",
"docker_compose_project_name",
"docker_services",
]
CACHE = {} # type: ignore
@pytest.fixture(scope="session")
def make_tmp_dir(tmp_path_factory, request, worker_id):
def make(
name, *, scm=False, dvc=False, subdir=False
): # pylint: disable=W0621
from shutil import copytree, ignore_patterns
from scmrepo.git import Git
from dvc.repo import Repo
from .tmp_dir import TmpDir
cache = CACHE.get((scm, dvc, subdir))
if not cache:
cache = tmp_path_factory.mktemp("dvc-test-cache" + worker_id)
TmpDir(cache).init(scm=scm, dvc=dvc, subdir=subdir)
CACHE[(scm, dvc, subdir)] = os.fspath(cache)
path = tmp_path_factory.mktemp(name) if isinstance(name, str) else name
# ignore sqlite files from .dvc/tmp. We might not be closing the cache
# connection resulting in PermissionErrors in Windows.
ignore = ignore_patterns("cache.db*")
copytree(cache, path, dirs_exist_ok=True, ignore=ignore)
new_dir = TmpDir(path)
str_path = os.fspath(new_dir)
if dvc:
new_dir.dvc = Repo(str_path)
if scm:
new_dir.scm = (
new_dir.dvc.scm if hasattr(new_dir, "dvc") else Git(str_path)
)
request.addfinalizer(new_dir.close)
return new_dir
return make
@pytest.fixture
def tmp_dir(tmp_path, make_tmp_dir, request, monkeypatch):
monkeypatch.chdir(tmp_path)
fixtures = request.fixturenames
return make_tmp_dir(tmp_path, scm="scm" in fixtures, dvc="dvc" in fixtures)
@pytest.fixture
def scm(tmp_dir):
return tmp_dir.scm
@pytest.fixture
def dvc(tmp_dir):
with tmp_dir.dvc as _dvc:
yield _dvc
@pytest.fixture
def make_local(make_tmp_dir):
def _make_local():
return make_tmp_dir("local-cloud")
return _make_local
@pytest.fixture
def make_cloud(request):
def _make_cloud(typ):
return request.getfixturevalue(f"make_{typ}")()
return _make_cloud
@pytest.fixture
def cloud(make_cloud, request):
typ = getattr(request, "param", "local")
return make_cloud(typ)
@pytest.fixture
def local_cloud(make_cloud):
return make_cloud("local")
@pytest.fixture
def make_remote(tmp_dir, dvc, make_cloud):
def _make_remote(name, typ="local", **kwargs):
cloud = make_cloud(typ) # pylint: disable=W0621
tmp_dir.add_remote(name=name, config=cloud.config, **kwargs)
return cloud
return _make_remote
@pytest.fixture
def remote(make_remote, request):
typ = getattr(request, "param", "local")
return make_remote("upstream", typ=typ)
@pytest.fixture
def local_remote(make_remote):
return make_remote("upstream", typ="local")
@pytest.fixture
def make_workspace(tmp_dir, dvc, make_cloud):
def _make_workspace(name, typ="local"):
from dvc.odbmgr import ODBManager
cloud = make_cloud(typ) # pylint: disable=W0621
tmp_dir.add_remote(name=name, config=cloud.config, default=False)
tmp_dir.add_remote(
name=f"{name}-cache", url="remote://workspace/cache", default=False
)
scheme = getattr(cloud, "scheme", "local")
if scheme != "http":
with dvc.config.edit() as conf:
conf["cache"][scheme] = f"{name}-cache"
dvc.odb = ODBManager(dvc)
return cloud
return _make_workspace
@pytest.fixture
def workspace(make_workspace, request):
typ = getattr(request, "param", "local")
return make_workspace("workspace", typ=typ)
@pytest.fixture
def local_workspace(make_workspace):
return make_workspace("workspace", typ="local")
@pytest.fixture(scope="session")
def docker():
# See https://travis-ci.community/t/docker-linux-containers-on-windows/301
if os.environ.get("CI") and os.name == "nt":
pytest.skip("disabled for Windows on Github Actions")
try:
subprocess.check_output("docker ps", shell=True)
except (subprocess.CalledProcessError, OSError):
pytest.skip("no docker installed")
@pytest.fixture(scope="session")
def docker_compose(docker):
try:
subprocess.check_output("docker-compose version", shell=True)
except (subprocess.CalledProcessError, OSError):
pytest.skip("no docker-compose installed")
@pytest.fixture(scope="session")
def docker_compose_project_name():
return "pytest-dvc-test"
@pytest.fixture(scope="session")
def docker_services(
docker_compose_file, docker_compose_project_name, tmp_path_factory
):
# overriding `docker_services` fixture from `pytest_docker` plugin to
# only launch docker images once.
from filelock import FileLock
from pytest_docker.plugin import DockerComposeExecutor, Services
executor = DockerComposeExecutor(
docker_compose_file, docker_compose_project_name
)
# making sure we don't accidentally launch docker-compose in parallel,
# as it might result in network conflicts. Inspired by:
# https://github.com/pytest-dev/pytest-xdist#making-session-scoped-fixtures-execute-only-once
lockfile = tmp_path_factory.getbasetemp().parent / "docker-compose.lock"
with FileLock(str(lockfile)): # pylint:disable=abstract-class-instantiated
executor.execute("up --build -d")
return Services(executor)
| []
| []
| [
"CI"
]
| [] | ["CI"] | python | 1 | 0 | |
vendor/github.com/gophercloud/gophercloud/acceptance/clients/clients.go | // Package clients contains functions for creating OpenStack service clients
// for use in acceptance tests. It also manages the required environment
// variables to run the tests.
package clients
import (
"fmt"
"os"
"strings"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack"
"github.com/gophercloud/gophercloud/openstack/blockstorage/noauth"
)
// AcceptanceTestChoices contains image and flavor selections for use by the acceptance tests.
type AcceptanceTestChoices struct {
// ImageID contains the ID of a valid image.
ImageID string
// FlavorID contains the ID of a valid flavor.
FlavorID string
// FlavorIDResize contains the ID of a different flavor available on the same OpenStack installation, that is distinct
// from FlavorID.
FlavorIDResize string
// FloatingIPPool contains the name of the pool from where to obtain floating IPs.
FloatingIPPoolName string
// NetworkName is the name of a network to launch the instance on.
NetworkName string
// ExternalNetworkID is the network ID of the external network.
ExternalNetworkID string
// ShareNetworkID is the Manila Share network ID
ShareNetworkID string
// DBDatastoreType is the datastore type for DB tests.
DBDatastoreType string
// DBDatastoreTypeID is the datastore type version for DB tests.
DBDatastoreVersion string
}
// AcceptanceTestChoicesFromEnv populates a ComputeChoices struct from environment variables.
// If any required state is missing, an `error` will be returned that enumerates the missing properties.
func AcceptanceTestChoicesFromEnv() (*AcceptanceTestChoices, error) {
imageID := os.Getenv("OS_IMAGE_ID")
flavorID := os.Getenv("OS_FLAVOR_ID")
flavorIDResize := os.Getenv("OS_FLAVOR_ID_RESIZE")
networkName := os.Getenv("OS_NETWORK_NAME")
floatingIPPoolName := os.Getenv("OS_POOL_NAME")
externalNetworkID := os.Getenv("OS_EXTGW_ID")
shareNetworkID := os.Getenv("OS_SHARE_NETWORK_ID")
dbDatastoreType := os.Getenv("OS_DB_DATASTORE_TYPE")
dbDatastoreVersion := os.Getenv("OS_DB_DATASTORE_VERSION")
missing := make([]string, 0, 3)
if imageID == "" {
missing = append(missing, "OS_IMAGE_ID")
}
if flavorID == "" {
missing = append(missing, "OS_FLAVOR_ID")
}
if flavorIDResize == "" {
missing = append(missing, "OS_FLAVOR_ID_RESIZE")
}
if floatingIPPoolName == "" {
missing = append(missing, "OS_POOL_NAME")
}
if externalNetworkID == "" {
missing = append(missing, "OS_EXTGW_ID")
}
if networkName == "" {
networkName = "private"
}
if shareNetworkID == "" {
missing = append(missing, "OS_SHARE_NETWORK_ID")
}
notDistinct := ""
if flavorID == flavorIDResize {
notDistinct = "OS_FLAVOR_ID and OS_FLAVOR_ID_RESIZE must be distinct."
}
if len(missing) > 0 || notDistinct != "" {
text := "You're missing some important setup:\n"
if len(missing) > 0 {
text += " * These environment variables must be provided: " + strings.Join(missing, ", ") + "\n"
}
if notDistinct != "" {
text += " * " + notDistinct + "\n"
}
return nil, fmt.Errorf(text)
}
return &AcceptanceTestChoices{
ImageID: imageID,
FlavorID: flavorID,
FlavorIDResize: flavorIDResize,
FloatingIPPoolName: floatingIPPoolName,
NetworkName: networkName,
ExternalNetworkID: externalNetworkID,
ShareNetworkID: shareNetworkID,
DBDatastoreType: dbDatastoreType,
DBDatastoreVersion: dbDatastoreVersion,
}, nil
}
// NewBlockStorageV1Client returns a *ServiceClient for making calls
// to the OpenStack Block Storage v1 API. An error will be returned
// if authentication or client creation was not possible.
func NewBlockStorageV1Client() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return openstack.NewBlockStorageV1(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
}
// NewBlockStorageV2Client returns a *ServiceClient for making calls
// to the OpenStack Block Storage v2 API. An error will be returned
// if authentication or client creation was not possible.
func NewBlockStorageV2Client() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return openstack.NewBlockStorageV2(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
}
// NewBlockStorageV2NoAuthClient returns a noauth *ServiceClient for
// making calls to the OpenStack Block Storage v2 API. An error will be
// returned if client creation was not possible.
func NewBlockStorageV2NoAuthClient() (*gophercloud.ServiceClient, error) {
client, err := noauth.NewClient(gophercloud.AuthOptions{
Username: os.Getenv("OS_USERNAME"),
TenantName: os.Getenv("OS_TENANT_NAME"),
})
if err != nil {
return nil, err
}
return noauth.NewBlockStorageV2(client, noauth.EndpointOpts{
CinderEndpoint: os.Getenv("CINDER_ENDPOINT"),
})
}
// NewComputeV2Client returns a *ServiceClient for making calls
// to the OpenStack Compute v2 API. An error will be returned
// if authentication or client creation was not possible.
func NewComputeV2Client() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return openstack.NewComputeV2(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
}
// NewDBV1Client returns a *ServiceClient for making calls
// to the OpenStack Database v1 API. An error will be returned
// if authentication or client creation was not possible.
func NewDBV1Client() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return openstack.NewDBV1(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
}
// NewDNSV2Client returns a *ServiceClient for making calls
// to the OpenStack Compute v2 API. An error will be returned
// if authentication or client creation was not possible.
func NewDNSV2Client() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return openstack.NewDNSV2(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
}
// NewIdentityV2Client returns a *ServiceClient for making calls
// to the OpenStack Identity v2 API. An error will be returned
// if authentication or client creation was not possible.
func NewIdentityV2Client() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return openstack.NewIdentityV2(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
}
// NewIdentityV2AdminClient returns a *ServiceClient for making calls
// to the Admin Endpoint of the OpenStack Identity v2 API. An error
// will be returned if authentication or client creation was not possible.
func NewIdentityV2AdminClient() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return openstack.NewIdentityV2(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
Availability: gophercloud.AvailabilityAdmin,
})
}
// NewIdentityV2UnauthenticatedClient returns an unauthenticated *ServiceClient
// for the OpenStack Identity v2 API. An error will be returned if
// authentication or client creation was not possible.
func NewIdentityV2UnauthenticatedClient() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.NewClient(ao.IdentityEndpoint)
if err != nil {
return nil, err
}
return openstack.NewIdentityV2(client, gophercloud.EndpointOpts{})
}
// NewIdentityV3Client returns a *ServiceClient for making calls
// to the OpenStack Identity v3 API. An error will be returned
// if authentication or client creation was not possible.
func NewIdentityV3Client() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return openstack.NewIdentityV3(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
}
// NewIdentityV3UnauthenticatedClient returns an unauthenticated *ServiceClient
// for the OpenStack Identity v3 API. An error will be returned if
// authentication or client creation was not possible.
func NewIdentityV3UnauthenticatedClient() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.NewClient(ao.IdentityEndpoint)
if err != nil {
return nil, err
}
return openstack.NewIdentityV3(client, gophercloud.EndpointOpts{})
}
// NewImageServiceV2Client returns a *ServiceClient for making calls to the
// OpenStack Image v2 API. An error will be returned if authentication or
// client creation was not possible.
func NewImageServiceV2Client() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return openstack.NewImageServiceV2(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
}
// NewNetworkV2Client returns a *ServiceClient for making calls to the
// OpenStack Networking v2 API. An error will be returned if authentication
// or client creation was not possible.
func NewNetworkV2Client() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return openstack.NewNetworkV2(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
}
// NewObjectStorageV1Client returns a *ServiceClient for making calls to the
// OpenStack Object Storage v1 API. An error will be returned if authentication
// or client creation was not possible.
func NewObjectStorageV1Client() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return openstack.NewObjectStorageV1(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
}
// NewSharedFileSystemV2Client returns a *ServiceClient for making calls
// to the OpenStack Shared File System v2 API. An error will be returned
// if authentication or client creation was not possible.
func NewSharedFileSystemV2Client() (*gophercloud.ServiceClient, error) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
return nil, err
}
client, err := openstack.AuthenticatedClient(ao)
if err != nil {
return nil, err
}
return openstack.NewSharedFileSystemV2(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
}
| [
"\"OS_IMAGE_ID\"",
"\"OS_FLAVOR_ID\"",
"\"OS_FLAVOR_ID_RESIZE\"",
"\"OS_NETWORK_NAME\"",
"\"OS_POOL_NAME\"",
"\"OS_EXTGW_ID\"",
"\"OS_SHARE_NETWORK_ID\"",
"\"OS_DB_DATASTORE_TYPE\"",
"\"OS_DB_DATASTORE_VERSION\"",
"\"OS_REGION_NAME\"",
"\"OS_REGION_NAME\"",
"\"OS_USERNAME\"",
"\"OS_TENANT_NAME\"",
"\"CINDER_ENDPOINT\"",
"\"OS_REGION_NAME\"",
"\"OS_REGION_NAME\"",
"\"OS_REGION_NAME\"",
"\"OS_REGION_NAME\"",
"\"OS_REGION_NAME\"",
"\"OS_REGION_NAME\"",
"\"OS_REGION_NAME\"",
"\"OS_REGION_NAME\"",
"\"OS_REGION_NAME\"",
"\"OS_REGION_NAME\""
]
| []
| [
"OS_FLAVOR_ID",
"OS_REGION_NAME",
"OS_USERNAME",
"OS_TENANT_NAME",
"OS_NETWORK_NAME",
"CINDER_ENDPOINT",
"OS_POOL_NAME",
"OS_EXTGW_ID",
"OS_SHARE_NETWORK_ID",
"OS_DB_DATASTORE_TYPE",
"OS_IMAGE_ID",
"OS_DB_DATASTORE_VERSION",
"OS_FLAVOR_ID_RESIZE"
]
| [] | ["OS_FLAVOR_ID", "OS_REGION_NAME", "OS_USERNAME", "OS_TENANT_NAME", "OS_NETWORK_NAME", "CINDER_ENDPOINT", "OS_POOL_NAME", "OS_EXTGW_ID", "OS_SHARE_NETWORK_ID", "OS_DB_DATASTORE_TYPE", "OS_IMAGE_ID", "OS_DB_DATASTORE_VERSION", "OS_FLAVOR_ID_RESIZE"] | go | 13 | 0 | |
locust/runners.py | # -*- coding: utf-8 -*-
import logging
import random
import socket
import sys
import traceback
from typing import Type, List
import warnings
from uuid import uuid4
from time import time
import gevent
import greenlet
import psutil
from gevent.pool import Group
from . import User
from .log import greenlet_exception_logger
from .rpc import Message, rpc
from .stats import RequestStats, setup_distributed_stats_event_listeners
from .exception import RPCError
from .user.task import LOCUST_STATE_STOPPING
logger = logging.getLogger(__name__)
STATE_INIT, STATE_SPAWNING, STATE_RUNNING, STATE_CLEANUP, STATE_STOPPING, STATE_STOPPED, STATE_MISSING = [
"ready",
"spawning",
"running",
"cleanup",
"stopping",
"stopped",
"missing",
]
WORKER_REPORT_INTERVAL = 3.0
CPU_MONITOR_INTERVAL = 5.0
HEARTBEAT_INTERVAL = 1
HEARTBEAT_LIVENESS = 3
FALLBACK_INTERVAL = 5
greenlet_exception_handler = greenlet_exception_logger(logger)
class Runner:
"""
Orchestrates the load test by starting and stopping the users.
Use one of the :meth:`create_local_runner <locust.env.Environment.create_local_runner>`,
:meth:`create_master_runner <locust.env.Environment.create_master_runner>` or
:meth:`create_worker_runner <locust.env.Environment.create_worker_runner>` methods on
the :class:`Environment <locust.env.Environment>` instance to create a runner of the
desired type.
"""
def __init__(self, environment):
self.environment = environment
self.user_greenlets = Group()
self.greenlet = Group()
self.state = STATE_INIT
self.spawning_greenlet = None
self.shape_greenlet = None
self.shape_last_state = None
self.current_cpu_usage = 0
self.cpu_warning_emitted = False
self.greenlet.spawn(self.monitor_cpu).link_exception(greenlet_exception_handler)
self.exceptions = {}
self.target_user_count = None
# set up event listeners for recording requests
def on_request_success(request_type, name, response_time, response_length, **kwargs):
self.stats.log_request(request_type, name, response_time, response_length)
def on_request_failure(request_type, name, response_time, response_length, exception, **kwargs):
self.stats.log_request(request_type, name, response_time, response_length)
self.stats.log_error(request_type, name, exception)
self.environment.events.request_success.add_listener(on_request_success)
self.environment.events.request_failure.add_listener(on_request_failure)
self.connection_broken = False
# register listener that resets stats when spawning is complete
def on_spawning_complete(user_count):
self.update_state(STATE_RUNNING)
if environment.reset_stats:
logger.info("Resetting stats\n")
self.stats.reset_all()
self.environment.events.spawning_complete.add_listener(on_spawning_complete)
def __del__(self):
# don't leave any stray greenlets if runner is removed
if self.greenlet and len(self.greenlet) > 0:
self.greenlet.kill(block=False)
@property
def user_classes(self):
return self.environment.user_classes
@property
def stats(self) -> RequestStats:
return self.environment.stats
@property
def errors(self):
return self.stats.errors
@property
def user_count(self):
"""
:returns: Number of currently running users
"""
return len(self.user_greenlets)
def update_state(self, new_state):
"""
Updates the current state
"""
# I (cyberwiz) commented out this logging, because it is too noisy even for debug level
# Uncomment it if you are specifically debugging state transitions
# logger.debug("Updating state to '%s', old state was '%s'" % (new_state, self.state))
self.state = new_state
def cpu_log_warning(self):
"""Called at the end of the test to repeat the warning & return the status"""
if self.cpu_warning_emitted:
logger.warning(
"CPU usage was too high at some point during the test! See https://docs.locust.io/en/stable/running-locust-distributed.html for how to distribute the load over multiple CPU cores or machines"
)
return True
return False
def weight_users(self, amount) -> List[Type[User]]:
"""
Distributes the amount of users for each WebLocust-class according to it's weight
returns a list "bucket" with the weighted users
"""
bucket = []
weight_sum = sum([user.weight for user in self.user_classes])
residuals = {}
for user in self.user_classes:
if self.environment.host is not None:
user.host = self.environment.host
# create users depending on weight
percent = user.weight / float(weight_sum)
num_users = int(round(amount * percent))
bucket.extend([user for x in range(num_users)])
# used to keep track of the amount of rounding was done if we need
# to add/remove some instances from bucket
residuals[user] = amount * percent - round(amount * percent)
if len(bucket) < amount:
# We got too few User classes in the bucket, so we need to create a few extra users,
# and we do this by iterating over each of the User classes - starting with the one
# where the residual from the rounding was the largest - and creating one of each until
# we get the correct amount
for user in [l for l, r in sorted(residuals.items(), key=lambda x: x[1], reverse=True)][
: amount - len(bucket)
]:
bucket.append(user)
elif len(bucket) > amount:
# We've got too many users due to rounding errors so we need to remove some
for user in [l for l, r in sorted(residuals.items(), key=lambda x: x[1])][: len(bucket) - amount]:
bucket.remove(user)
return bucket
def spawn_users(self, spawn_count, spawn_rate, wait=False):
bucket = self.weight_users(spawn_count)
spawn_count = len(bucket)
if self.state == STATE_INIT or self.state == STATE_STOPPED:
self.update_state(STATE_SPAWNING)
existing_count = len(self.user_greenlets)
logger.info(
"Spawning %i users at the rate %g users/s (%i users already running)..."
% (spawn_count, spawn_rate, existing_count)
)
occurrence_count = dict([(l.__name__, 0) for l in self.user_classes])
def spawn():
sleep_time = 1.0 / spawn_rate
while True:
if not bucket:
logger.info(
"All users spawned: %s (%i total running)"
% (
", ".join(["%s: %d" % (name, count) for name, count in occurrence_count.items()]),
len(self.user_greenlets),
)
)
self.environment.events.spawning_complete.fire(user_count=len(self.user_greenlets))
return
user_class = bucket.pop(random.randint(0, len(bucket) - 1))
occurrence_count[user_class.__name__] += 1
new_user = user_class(self.environment)
new_user.start(self.user_greenlets)
if len(self.user_greenlets) % 10 == 0:
logger.debug("%i users spawned" % len(self.user_greenlets))
if bucket:
gevent.sleep(sleep_time)
spawn()
if wait:
self.user_greenlets.join()
logger.info("All users stopped\n")
def stop_users(self, user_count, stop_rate=None):
"""
Stop `user_count` weighted users at a rate of `stop_rate`
"""
if user_count == 0 or stop_rate == 0:
return
bucket = self.weight_users(user_count)
user_count = len(bucket)
to_stop = []
for user_greenlet in self.user_greenlets:
try:
user = user_greenlet.args[0]
except IndexError:
logger.error(
"While stopping users, we encountered a user that didnt have proper args %s", user_greenlet
)
continue
for user_class in bucket:
if isinstance(user, user_class):
to_stop.append(user)
bucket.remove(user_class)
break
if not to_stop:
return
if stop_rate is None or stop_rate >= user_count:
sleep_time = 0
logger.info("Stopping %i users" % (user_count))
else:
sleep_time = 1.0 / stop_rate
logger.info("Stopping %i users at rate of %g users/s" % (user_count, stop_rate))
async_calls_to_stop = Group()
stop_group = Group()
while True:
user_to_stop: User = to_stop.pop(random.randint(0, len(to_stop) - 1))
logger.debug("Stopping %s" % user_to_stop._greenlet.name)
if user_to_stop._greenlet is greenlet.getcurrent():
# User called runner.quit(), so dont block waiting for killing to finish"
user_to_stop._group.killone(user_to_stop._greenlet, block=False)
elif self.environment.stop_timeout:
async_calls_to_stop.add(gevent.spawn_later(0, User.stop, user_to_stop, force=False))
stop_group.add(user_to_stop._greenlet)
else:
async_calls_to_stop.add(gevent.spawn_later(0, User.stop, user_to_stop, force=True))
if to_stop:
gevent.sleep(sleep_time)
else:
break
async_calls_to_stop.join()
if not stop_group.join(timeout=self.environment.stop_timeout):
logger.info(
"Not all users finished their tasks & terminated in %s seconds. Stopping them..."
% self.environment.stop_timeout
)
stop_group.kill(block=True)
logger.info("%i Users have been stopped, %g still running" % (user_count, len(self.user_greenlets)))
def monitor_cpu(self):
process = psutil.Process()
while True:
self.current_cpu_usage = process.cpu_percent()
if self.current_cpu_usage > 90 and not self.cpu_warning_emitted:
logging.warning(
"CPU usage above 90%! This may constrain your throughput and may even give inconsistent response time measurements! See https://docs.locust.io/en/stable/running-locust-distributed.html for how to distribute the load over multiple CPU cores or machines"
)
self.cpu_warning_emitted = True
gevent.sleep(CPU_MONITOR_INTERVAL)
def start(self, user_count, spawn_rate, wait=False):
"""
Start running a load test
:param user_count: Total number of users to start
:param spawn_rate: Number of users to spawn per second
:param wait: If True calls to this method will block until all users are spawned.
If False (the default), a greenlet that spawns the users will be
started and the call to this method will return immediately.
"""
if self.state != STATE_RUNNING and self.state != STATE_SPAWNING:
self.stats.clear_all()
self.exceptions = {}
self.cpu_warning_emitted = False
self.worker_cpu_warning_emitted = False
self.target_user_count = user_count
if self.state != STATE_INIT and self.state != STATE_STOPPED:
logger.debug(
"Updating running test with %d users, %.2f spawn rate and wait=%r" % (user_count, spawn_rate, wait)
)
self.update_state(STATE_SPAWNING)
if self.user_count > user_count:
# Stop some users
stop_count = self.user_count - user_count
self.stop_users(stop_count, spawn_rate)
elif self.user_count < user_count:
# Spawn some users
spawn_count = user_count - self.user_count
self.spawn_users(spawn_count=spawn_count, spawn_rate=spawn_rate)
else:
self.environment.events.spawning_complete.fire(user_count=self.user_count)
else:
self.spawn_rate = spawn_rate
self.spawn_users(user_count, spawn_rate=spawn_rate, wait=wait)
def start_shape(self):
if self.shape_greenlet:
logger.info("There is an ongoing shape test running. Editing is disabled")
return
logger.info("Shape test starting. User count and spawn rate are ignored for this type of load test")
self.update_state(STATE_INIT)
self.shape_greenlet = self.greenlet.spawn(self.shape_worker)
self.shape_greenlet.link_exception(greenlet_exception_handler)
self.environment.shape_class.reset_time()
def shape_worker(self):
logger.info("Shape worker starting")
while self.state == STATE_INIT or self.state == STATE_SPAWNING or self.state == STATE_RUNNING:
new_state = self.environment.shape_class.tick()
if new_state is None:
logger.info("Shape test stopping")
if self.environment.parsed_options and self.environment.parsed_options.headless:
self.quit()
else:
self.stop()
elif self.shape_last_state == new_state:
gevent.sleep(1)
else:
user_count, spawn_rate = new_state
logger.info("Shape test updating to %d users at %.2f spawn rate" % (user_count, spawn_rate))
self.start(user_count=user_count, spawn_rate=spawn_rate)
self.shape_last_state = new_state
def stop(self):
"""
Stop a running load test by stopping all running users
"""
logger.debug("Stopping all users")
self.update_state(STATE_CLEANUP)
# if we are currently spawning users we need to kill the spawning greenlet first
if self.spawning_greenlet and not self.spawning_greenlet.ready():
self.spawning_greenlet.kill(block=True)
self.stop_users(self.user_count)
self.update_state(STATE_STOPPED)
self.cpu_log_warning()
def quit(self):
"""
Stop any running load test and kill all greenlets for the runner
"""
self.stop()
self.greenlet.kill(block=True)
def log_exception(self, node_id, msg, formatted_tb):
key = hash(formatted_tb)
row = self.exceptions.setdefault(key, {"count": 0, "msg": msg, "traceback": formatted_tb, "nodes": set()})
row["count"] += 1
row["nodes"].add(node_id)
self.exceptions[key] = row
class LocalRunner(Runner):
"""
Runner for running single process load test
"""
def __init__(self, environment):
"""
:param environment: Environment instance
"""
super().__init__(environment)
# register listener thats logs the exception for the local runner
def on_user_error(user_instance, exception, tb):
formatted_tb = "".join(traceback.format_tb(tb))
self.log_exception("local", str(exception), formatted_tb)
self.environment.events.user_error.add_listener(on_user_error)
def start(self, user_count, spawn_rate, wait=False):
self.target_user_count = user_count
if spawn_rate > 100:
logger.warning(
"Your selected spawn rate is very high (>100), and this is known to sometimes cause issues. Do you really need to ramp up that fast?"
)
if self.state != STATE_RUNNING and self.state != STATE_SPAWNING:
# if we're not already running we'll fire the test_start event
self.environment.events.test_start.fire(environment=self.environment)
if self.spawning_greenlet:
# kill existing spawning_greenlet before we start a new one
self.spawning_greenlet.kill(block=True)
self.spawning_greenlet = self.greenlet.spawn(
lambda: super(LocalRunner, self).start(user_count, spawn_rate, wait=wait)
)
self.spawning_greenlet.link_exception(greenlet_exception_handler)
def stop(self):
if self.state == STATE_STOPPED:
return
super().stop()
self.environment.events.test_stop.fire(environment=self.environment)
class DistributedRunner(Runner):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
setup_distributed_stats_event_listeners(self.environment.events, self.stats)
class WorkerNode:
def __init__(self, id, state=STATE_INIT, heartbeat_liveness=HEARTBEAT_LIVENESS):
self.id = id
self.state = state
self.user_count = 0
self.heartbeat = heartbeat_liveness
self.cpu_usage = 0
self.cpu_warning_emitted = False
class MasterRunner(DistributedRunner):
"""
Runner used to run distributed load tests across multiple processes and/or machines.
MasterRunner doesn't spawn any user greenlets itself. Instead it expects
:class:`WorkerRunners <WorkerRunner>` to connect to it, which it will then direct
to start and stop user greenlets. Stats sent back from the
:class:`WorkerRunners <WorkerRunner>` will aggregated.
"""
def __init__(self, environment, master_bind_host, master_bind_port):
"""
:param environment: Environment instance
:param master_bind_host: Host/interface to use for incoming worker connections
:param master_bind_port: Port to use for incoming worker connections
"""
super().__init__(environment)
self.worker_cpu_warning_emitted = False
self.master_bind_host = master_bind_host
self.master_bind_port = master_bind_port
class WorkerNodesDict(dict):
def get_by_state(self, state):
return [c for c in self.values() if c.state == state]
@property
def all(self):
return self.values()
@property
def ready(self):
return self.get_by_state(STATE_INIT)
@property
def spawning(self):
return self.get_by_state(STATE_SPAWNING)
@property
def running(self):
return self.get_by_state(STATE_RUNNING)
@property
def missing(self):
return self.get_by_state(STATE_MISSING)
self.clients = WorkerNodesDict()
try:
self.server = rpc.Server(master_bind_host, master_bind_port)
except RPCError as e:
if e.args[0] == "Socket bind failure: Address already in use":
port_string = master_bind_host + ":" + master_bind_port if master_bind_host != "*" else master_bind_port
logger.error(
f"The Locust master port ({port_string}) was busy. Close any applications using that port - perhaps an old instance of Locust master is still running? ({e.args[0]})"
)
sys.exit(1)
else:
raise
self.greenlet.spawn(self.heartbeat_worker).link_exception(greenlet_exception_handler)
self.greenlet.spawn(self.client_listener).link_exception(greenlet_exception_handler)
# listener that gathers info on how many users the worker has spawned
def on_worker_report(client_id, data):
if client_id not in self.clients:
logger.info("Discarded report from unrecognized worker %s", client_id)
return
self.clients[client_id].user_count = data["user_count"]
self.environment.events.worker_report.add_listener(on_worker_report)
# register listener that sends quit message to worker nodes
def on_quitting(environment, **kw):
self.quit()
self.environment.events.quitting.add_listener(on_quitting)
@property
def user_count(self):
return sum([c.user_count for c in self.clients.values()])
def cpu_log_warning(self):
warning_emitted = Runner.cpu_log_warning(self)
if self.worker_cpu_warning_emitted:
logger.warning("CPU usage threshold was exceeded on workers during the test!")
warning_emitted = True
return warning_emitted
def start(self, user_count, spawn_rate):
self.target_user_count = user_count
num_workers = len(self.clients.ready) + len(self.clients.running) + len(self.clients.spawning)
if not num_workers:
logger.warning(
"You are running in distributed mode but have no worker servers connected. "
"Please connect workers prior to swarming."
)
return
self.spawn_rate = spawn_rate
worker_num_users = user_count // (num_workers or 1)
worker_spawn_rate = float(spawn_rate) / (num_workers or 1)
remaining = user_count % num_workers
logger.info(
"Sending spawn jobs of %d users and %.2f spawn rate to %d ready clients"
% (worker_num_users, worker_spawn_rate, num_workers)
)
if worker_spawn_rate > 100:
logger.warning(
"Your selected spawn rate is very high (>100/worker), and this is known to sometimes cause issues. Do you really need to ramp up that fast?"
)
if self.state != STATE_RUNNING and self.state != STATE_SPAWNING:
self.stats.clear_all()
self.exceptions = {}
self.environment.events.test_start.fire(environment=self.environment)
if self.environment.shape_class:
self.environment.shape_class.reset_time()
for client in self.clients.ready + self.clients.running + self.clients.spawning:
data = {
"spawn_rate": worker_spawn_rate,
"num_users": worker_num_users,
"host": self.environment.host,
"stop_timeout": self.environment.stop_timeout,
}
if remaining > 0:
data["num_users"] += 1
remaining -= 1
logger.debug("Sending spawn message to client %s" % (client.id))
self.server.send_to_client(Message("spawn", data, client.id))
self.update_state(STATE_SPAWNING)
def stop(self):
if self.state not in [STATE_INIT, STATE_STOPPED, STATE_STOPPING]:
logger.debug("Stopping...")
self.update_state(STATE_STOPPING)
if self.environment.shape_class:
self.shape_last_state = None
for client in self.clients.all:
logger.debug("Sending stop message to client %s" % (client.id))
self.server.send_to_client(Message("stop", None, client.id))
self.environment.events.test_stop.fire(environment=self.environment)
def quit(self):
self.stop()
logger.debug("Quitting...")
for client in self.clients.all:
logger.debug("Sending quit message to client %s" % (client.id))
self.server.send_to_client(Message("quit", None, client.id))
gevent.sleep(0.5) # wait for final stats report from all workers
self.greenlet.kill(block=True)
def check_stopped(self):
if (
not self.state == STATE_INIT
and not self.state == STATE_STOPPED
and all(map(lambda x: x.state != STATE_RUNNING and x.state != STATE_SPAWNING, self.clients.all))
):
self.update_state(STATE_STOPPED)
def heartbeat_worker(self):
while True:
gevent.sleep(HEARTBEAT_INTERVAL)
if self.connection_broken:
self.reset_connection()
continue
for client in self.clients.all:
if client.heartbeat < 0 and client.state != STATE_MISSING:
logger.info("Worker %s failed to send heartbeat, setting state to missing." % str(client.id))
client.state = STATE_MISSING
client.user_count = 0
if self.worker_count <= 0:
logger.info("The last worker went missing, stopping test.")
self.stop()
self.check_stopped()
else:
client.heartbeat -= 1
def reset_connection(self):
logger.info("Reset connection to worker")
try:
self.server.close()
self.server = rpc.Server(self.master_bind_host, self.master_bind_port)
except RPCError as e:
logger.error("Temporary failure when resetting connection: %s, will retry later." % (e))
def client_listener(self):
while True:
try:
client_id, msg = self.server.recv_from_client()
except RPCError as e:
logger.error("RPCError found when receiving from client: %s" % (e))
self.connection_broken = True
gevent.sleep(FALLBACK_INTERVAL)
continue
self.connection_broken = False
msg.node_id = client_id
if msg.type == "client_ready":
id = msg.node_id
self.clients[id] = WorkerNode(id, heartbeat_liveness=HEARTBEAT_LIVENESS)
logger.info(
"Client %r reported as ready. Currently %i clients ready to swarm."
% (id, len(self.clients.ready + self.clients.running + self.clients.spawning))
)
if self.state == STATE_RUNNING or self.state == STATE_SPAWNING:
# balance the load distribution when new client joins
self.start(self.target_user_count, self.spawn_rate)
# emit a warning if the worker's clock seem to be out of sync with our clock
# if abs(time() - msg.data["time"]) > 5.0:
# warnings.warn("The worker node's clock seem to be out of sync. For the statistics to be correct the different locust servers need to have synchronized clocks.")
elif msg.type == "client_stopped":
del self.clients[msg.node_id]
logger.info("Removing %s client from running clients" % (msg.node_id))
elif msg.type == "heartbeat":
if msg.node_id in self.clients:
c = self.clients[msg.node_id]
c.heartbeat = HEARTBEAT_LIVENESS
client_state = msg.data["state"]
if c.state == STATE_MISSING:
logger.info(
"Worker %s self-healed with heartbeat, setting state to %s." % (str(c.id), client_state)
)
user_count = msg.data.get("count")
if user_count:
c.user_count = user_count
c.state = client_state
c.cpu_usage = msg.data["current_cpu_usage"]
if not c.cpu_warning_emitted and c.cpu_usage > 90:
self.worker_cpu_warning_emitted = True # used to fail the test in the end
c.cpu_warning_emitted = True # used to suppress logging for this node
logger.warning(
"Worker %s exceeded cpu threshold (will only log this once per worker)" % (msg.node_id)
)
elif msg.type == "stats":
self.environment.events.worker_report.fire(client_id=msg.node_id, data=msg.data)
elif msg.type == "spawning":
self.clients[msg.node_id].state = STATE_SPAWNING
elif msg.type == "spawning_complete":
self.clients[msg.node_id].state = STATE_RUNNING
self.clients[msg.node_id].user_count = msg.data["count"]
if len(self.clients.spawning) == 0:
count = sum(c.user_count for c in self.clients.values())
self.environment.events.spawning_complete.fire(user_count=count)
elif msg.type == "quit":
if msg.node_id in self.clients:
del self.clients[msg.node_id]
logger.info(
"Client %r quit. Currently %i clients connected." % (msg.node_id, len(self.clients.ready))
)
if self.worker_count - len(self.clients.missing) <= 0:
logger.info("The last worker quit, stopping test.")
self.stop()
if self.environment.parsed_options and self.environment.parsed_options.headless:
self.quit()
elif msg.type == "exception":
self.log_exception(msg.node_id, msg.data["msg"], msg.data["traceback"])
self.check_stopped()
@property
def worker_count(self):
return len(self.clients.ready) + len(self.clients.spawning) + len(self.clients.running)
class WorkerRunner(DistributedRunner):
"""
Runner used to run distributed load tests across multiple processes and/or machines.
WorkerRunner connects to a :class:`MasterRunner` from which it'll receive
instructions to start and stop user greenlets. The WorkerRunner will periodically
take the stats generated by the running users and send back to the :class:`MasterRunner`.
"""
def __init__(self, environment, master_host, master_port):
"""
:param environment: Environment instance
:param master_host: Host/IP to use for connection to the master
:param master_port: Port to use for connecting to the master
"""
super().__init__(environment)
self.worker_state = STATE_INIT
self.client_id = socket.gethostname() + "_" + uuid4().hex
self.master_host = master_host
self.master_port = master_port
self.client = rpc.Client(master_host, master_port, self.client_id)
self.greenlet.spawn(self.heartbeat).link_exception(greenlet_exception_handler)
self.greenlet.spawn(self.worker).link_exception(greenlet_exception_handler)
self.client.send(Message("client_ready", None, self.client_id))
self.greenlet.spawn(self.stats_reporter).link_exception(greenlet_exception_handler)
# register listener for when all users have spawned, and report it to the master node
def on_spawning_complete(user_count):
self.client.send(Message("spawning_complete", {"count": user_count}, self.client_id))
self.worker_state = STATE_RUNNING
self.environment.events.spawning_complete.add_listener(on_spawning_complete)
# register listener that adds the current number of spawned users to the report that is sent to the master node
def on_report_to_master(client_id, data):
data["user_count"] = self.user_count
self.environment.events.report_to_master.add_listener(on_report_to_master)
# register listener that sends quit message to master
def on_quitting(environment, **kw):
self.client.send(Message("quit", None, self.client_id))
self.environment.events.quitting.add_listener(on_quitting)
# register listener thats sends user exceptions to master
def on_user_error(user_instance, exception, tb):
formatted_tb = "".join(traceback.format_tb(tb))
self.client.send(Message("exception", {"msg": str(exception), "traceback": formatted_tb}, self.client_id))
self.environment.events.user_error.add_listener(on_user_error)
def heartbeat(self):
while True:
try:
self.client.send(
Message(
"heartbeat",
{
"state": self.worker_state,
"current_cpu_usage": self.current_cpu_usage,
"count": self.user_count,
},
self.client_id,
)
)
except RPCError as e:
logger.error("RPCError found when sending heartbeat: %s" % (e))
self.reset_connection()
gevent.sleep(HEARTBEAT_INTERVAL)
def reset_connection(self):
logger.info("Reset connection to master")
try:
self.client.close()
self.client = rpc.Client(self.master_host, self.master_port, self.client_id)
except RPCError as e:
logger.error("Temporary failure when resetting connection: %s, will retry later." % (e))
def worker(self):
while True:
try:
msg = self.client.recv()
except RPCError as e:
logger.error("RPCError found when receiving from master: %s" % (e))
continue
if msg.type == "spawn":
self.worker_state = STATE_SPAWNING
self.client.send(Message("spawning", None, self.client_id))
job = msg.data
self.spawn_rate = job["spawn_rate"]
self.target_user_count = job["num_users"]
self.environment.host = job["host"]
self.environment.stop_timeout = job["stop_timeout"]
if self.spawning_greenlet:
# kill existing spawning greenlet before we launch new one
self.spawning_greenlet.kill(block=True)
self.spawning_greenlet = self.greenlet.spawn(
lambda: self.start(user_count=job["num_users"], spawn_rate=job["spawn_rate"])
)
self.spawning_greenlet.link_exception(greenlet_exception_handler)
elif msg.type == "stop":
self.stop()
self.client.send(Message("client_stopped", None, self.client_id))
self.client.send(Message("client_ready", None, self.client_id))
self.worker_state = STATE_INIT
elif msg.type == "quit":
logger.info("Got quit message from master, shutting down...")
self.stop()
self._send_stats() # send a final report, in case there were any samples not yet reported
self.greenlet.kill(block=True)
def stats_reporter(self):
while True:
try:
self._send_stats()
except RPCError as e:
logger.error("Temporary connection lost to master server: %s, will retry later." % (e))
gevent.sleep(WORKER_REPORT_INTERVAL)
def _send_stats(self):
data = {}
self.environment.events.report_to_master.fire(client_id=self.client_id, data=data)
self.client.send(Message("stats", data, self.client_id))
| []
| []
| []
| [] | [] | python | null | null | null |
demo/main.go | package main
import (
"html/template"
"os"
"github.com/gin-contrib/static"
"github.com/gin-gonic/gin"
"github.com/yuriizinets/go-ssc"
)
func funcmap() template.FuncMap {
return ssc.Funcs()
}
func main() {
g := gin.Default()
g.GET("/", func(c *gin.Context) {
ssc.RenderPage(c.Writer, &PageIndex{})
})
g.Use(static.Serve("/static/", static.LocalFile("./static", true)))
g.POST("/SSA/:Component/:Action", func(c *gin.Context) {
ssc.HandleSSA(
c.Writer,
template.Must(template.New(c.Param("Component")).Funcs(funcmap()).ParseGlob("*.html")),
c.Param("Component"),
c.PostForm("State"),
c.Param("Action"),
c.PostForm("Args"),
[]ssc.Component{
&ComponentCounter{},
&ComponentSampleBinding{},
&ComponentSampleParent{},
&ComponentSampleChild{},
},
)
})
addr := "localhost:25025"
if os.Getenv("PORT") != "" {
addr = ":" + os.Getenv("PORT")
}
g.Run(addr)
}
| [
"\"PORT\"",
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
src/runtime/virtcontainers/pkg/rootless/rootless.go | // Copyright (c) 2019 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2015-2019 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rootless
import (
"context"
"crypto/rand"
"fmt"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/opencontainers/runc/libcontainer/userns"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
var (
// isRootless states whether execution is rootless or not
// If nil, rootless is auto-detected
isRootless *bool
// lock for the initRootless and isRootless variables
rLock sync.Mutex
// XDG_RUNTIME_DIR defines the base directory relative to
// which user-specific non-essential runtime files are stored.
rootlessDir = os.Getenv("XDG_RUNTIME_DIR")
rootlessLog = logrus.WithFields(logrus.Fields{
"source": "rootless",
})
// IsRootless is declared this way for mocking in unit tests
IsRootless = isRootlessFunc
)
func SetRootless(rootless bool) {
isRootless = &rootless
}
// SetLogger sets up a logger for the rootless pkg
func SetLogger(ctx context.Context, logger *logrus.Entry) {
fields := rootlessLog.Data
rootlessLog = logger.WithFields(fields)
}
// isRootlessFunc states whether kata is being ran with root or not
func isRootlessFunc() bool {
rLock.Lock()
defer rLock.Unlock()
// auto-detect if nil
if isRootless == nil {
SetRootless(true)
// --rootless and --systemd-cgroup options must honoured
// but with the current implementation this is not possible
// https://github.com/kata-containers/runtime/issues/2412
if os.Geteuid() != 0 {
return true
}
if userns.RunningInUserNS() {
return true
}
SetRootless(false)
}
return *isRootless
}
// GetRootlessDir returns the path to the location for rootless
// container and sandbox storage
func GetRootlessDir() string {
return rootlessDir
}
// Creates a new persistent network namespace and returns an object
// representing that namespace, without switching to it
func NewNS() (ns.NetNS, error) {
nsRunDir := filepath.Join(GetRootlessDir(), "netns")
b := make([]byte, 16)
_, err := rand.Reader.Read(b)
if err != nil {
return nil, fmt.Errorf("failed to generate random netns name: %v", err)
}
// Create the directory for mounting network namespaces
// This needs to be a shared mountpoint in case it is mounted in to
// other namespaces (containers)
err = utils.MkdirAllWithInheritedOwner(nsRunDir, 0755)
if err != nil {
return nil, err
}
nsName := fmt.Sprintf("net-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
// create an empty file at the mount point
nsPath := filepath.Join(nsRunDir, nsName)
mountPointFd, err := os.Create(nsPath)
if err != nil {
return nil, err
}
if err := mountPointFd.Close(); err != nil {
return nil, err
}
// Ensure the mount point is cleaned up on errors; if the namespace
// was successfully mounted this will have no effect because the file
// is in-use
defer func() {
_ = os.RemoveAll(nsPath)
}()
var wg sync.WaitGroup
wg.Add(1)
// do namespace work in a dedicated goroutine, so that we can safely
// Lock/Unlock OSThread without upsetting the lock/unlock state of
// the caller of this function
go (func() {
defer wg.Done()
runtime.LockOSThread()
// Don't unlock. By not unlocking, golang will kill the OS thread when the
// goroutine is done (for go1.10+)
threadNsPath := getCurrentThreadNetNSPath()
var origNS ns.NetNS
origNS, err = ns.GetNS(threadNsPath)
if err != nil {
rootlessLog.Warnf("cannot open current network namespace %s: %q", threadNsPath, err)
return
}
defer func() {
if err := origNS.Close(); err != nil {
rootlessLog.Errorf("unable to close namespace: %q", err)
}
}()
// create a new netns on the current thread
err = unix.Unshare(unix.CLONE_NEWNET)
if err != nil {
rootlessLog.Warnf("cannot create a new network namespace: %q", err)
return
}
// Put this thread back to the orig ns, since it might get reused (pre go1.10)
defer func() {
if err := origNS.Set(); err != nil {
if IsRootless() && strings.Contains(err.Error(), "operation not permitted") {
// When running in rootless mode it will fail to re-join
// the network namespace owned by root on the host.
return
}
rootlessLog.Warnf("unable to reset namespace: %q", err)
}
}()
// bind mount the netns from the current thread (from /proc) onto the
// mount point. This causes the namespace to persist, even when there
// are no threads in the ns.
err = unix.Mount(threadNsPath, nsPath, "none", unix.MS_BIND, "")
if err != nil {
err = fmt.Errorf("failed to bind mount ns at %s: %v", nsPath, err)
}
})()
wg.Wait()
if err != nil {
unix.Unmount(nsPath, unix.MNT_DETACH)
return nil, fmt.Errorf("failed to create namespace: %v", err)
}
return ns.GetNS(nsPath)
}
// getCurrentThreadNetNSPath copied from pkg/ns
func getCurrentThreadNetNSPath() string {
// /proc/self/ns/net returns the namespace of the main thread, not
// of whatever thread this goroutine is running on. Make sure we
// use the thread's net namespace since the thread is switching around
return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
}
| [
"\"XDG_RUNTIME_DIR\""
]
| []
| [
"XDG_RUNTIME_DIR"
]
| [] | ["XDG_RUNTIME_DIR"] | go | 1 | 0 | |
rain_api_core/egress_util.py | import logging
import hmac
from hashlib import sha256
import os
import urllib
from datetime import datetime
log = logging.getLogger(__name__)
# This warning is stupid
# pylint: disable=logging-fstring-interpolation
def prepend_bucketname(name):
prefix = os.getenv('BUCKETNAME_PREFIX', "gsfc-ngap-{}-".format(os.getenv('MATURITY', 'DEV')[0:1].lower()))
return "{}{}".format(prefix, name)
def hmacsha256(key, string):
return hmac.new(key, string.encode('utf-8'), sha256)
def get_presigned_url(session, bucket_name, object_name, region_name, expire_seconds, user_id, method='GET'):
timez = datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
datez = timez[:8]
hostname = "{0}.s3{1}.amazonaws.com".format(bucket_name, "."+region_name if region_name != "us-east-1" else "")
cred = session['Credentials']['AccessKeyId']
secret = session['Credentials']['SecretAccessKey']
token = session['Credentials']['SessionToken']
aws4_request = "/".join([datez, region_name, "s3", "aws4_request"])
cred_string = "{0}/{1}".format(cred, aws4_request)
# Canonical Query String Parts
parts = ["A-userid={0}".format(user_id),
"X-Amz-Algorithm=AWS4-HMAC-SHA256",
"X-Amz-Credential="+urllib.parse.quote_plus(cred_string),
"X-Amz-Date="+timez,
"X-Amz-Expires={0}".format(expire_seconds),
"X-Amz-Security-Token="+urllib.parse.quote_plus(token),
"X-Amz-SignedHeaders=host"]
can_query_string = "&".join(parts)
# Canonical Requst
can_req = method + "\n/" + object_name + "\n" + can_query_string + "\nhost:" + hostname + "\n\nhost\nUNSIGNED-PAYLOAD"
can_req_hash = sha256(can_req.encode('utf-8')).hexdigest()
# String to Sign
stringtosign = "\n".join(["AWS4-HMAC-SHA256", timez, aws4_request, can_req_hash])
# Signing Key
StepOne = hmacsha256( "AWS4{0}".format(secret).encode('utf-8'), datez).digest()
StepTwo = hmacsha256( StepOne, region_name ).digest()
StepThree = hmacsha256( StepTwo, "s3").digest()
SigningKey = hmacsha256( StepThree, "aws4_request").digest()
# Final Signature
Signature = hmacsha256(SigningKey, stringtosign).hexdigest()
# Dump URL
url = "https://" + hostname + "/" + object_name + "?" + can_query_string + "&X-Amz-Signature=" + Signature
return url
def get_bucket_dynamic_path(path_list, b_map):
# Old and REVERSE format has no 'MAP'. In either case, we don't want it fouling our dict.
if 'MAP' in b_map:
map_dict = b_map['MAP']
else:
map_dict = b_map
mapping = []
log.debug("Pathparts is {0}".format(", ".join(path_list)))
# walk the bucket map to see if this path is valid
for path_part in path_list:
# Check if we hit a leaf of the YAML tree
if (mapping and isinstance(map_dict, str)) or 'bucket' in map_dict: #
customheaders = {}
if isinstance(map_dict, dict) and 'bucket' in map_dict:
bucketname = map_dict['bucket']
if 'headers' in map_dict:
customheaders = map_dict['headers']
else:
bucketname = map_dict
log.debug(f'mapping: {mapping}')
# Pop mapping off path_list
for _ in mapping:
path_list.pop(0)
# Join the remaining bits together to form object_name
object_name = "/".join(path_list)
bucket_path = "/".join(mapping)
log.info("Bucket mapping was {0}, object was {1}".format(bucket_path, object_name))
return prepend_bucketname(bucketname), bucket_path, object_name, customheaders
if path_part in map_dict:
map_dict = map_dict[path_part]
mapping.append(path_part)
log.debug("Found {0}, Mapping is now {1}".format(path_part, "/".join(mapping)))
else:
log.warning("Could not find {0} in bucketmap".format(path_part))
log.debug('said bucketmap: {}'.format(map_dict))
return False, False, False, {}
# what? No path?
return False, False, False, {}
def process_varargs(varargs: list, b_map: dict):
"""
wrapper around process_request that returns legacy values to preserve backward compatibility
:param varargs: a list with the path to the file requested.
:param b_map: bucket map
:return: path, bucket, object_name
"""
log.warning('Deprecated process_varargs() called.')
path, bucket, object_name, _ = process_request(varargs, b_map)
return path, bucket, object_name
def process_request(varargs, b_map):
varargs = varargs.split("/")
# Make sure we got at least 1 path, and 1 file name:
if len(varargs) < 2:
return "/".join(varargs), None, None, []
# Watch for ASF-ish reverse URL mapping formats:
if len(varargs) == 3:
if os.getenv('USE_REVERSE_BUCKET_MAP', 'FALSE').lower() == 'true':
varargs[0], varargs[1] = varargs[1], varargs[0]
# Look up the bucket from path parts
bucket, path, object_name, headers = get_bucket_dynamic_path(varargs, b_map)
# If we didn't figure out the bucket, we don't know the path/object_name
if not bucket:
object_name = varargs.pop(-1)
path = "/".join(varargs)
return path, bucket, object_name, headers
def bucket_prefix_match(bucket_check, bucket_map, object_name=""):
log.debug(f"bucket_prefix_match(): checking if {bucket_check} matches {bucket_map} w/ optional obj '{object_name}'")
if bucket_check == bucket_map.split('/')[0] and object_name.startswith("/".join(bucket_map.split('/')[1:])):
log.debug(f"Prefixed Bucket Map matched: s3://{bucket_check}/{object_name} => {bucket_map}")
return True
return False
# Sort public/private buckets such that object-prefixes are processed FIRST
def get_sorted_bucket_list(b_map, bucket_group):
if bucket_group not in b_map:
# But why?!
log.warning(f"Bucket map does not contain bucket group '{bucket_group}'")
return []
# b_map[bucket_group] SHOULD be a dict, but list actually works too.
if isinstance(b_map[bucket_group], dict):
return sorted(list(b_map[bucket_group].keys()), key=lambda e: e.count("/"), reverse=True )
if isinstance(b_map[bucket_group], list):
return sorted(list(b_map[bucket_group]), key=lambda e: e.count("/"), reverse=True )
# Something went wrong.
return []
def check_private_bucket(bucket, b_map, object_name=""):
log.debug('check_private_buckets(): bucket: {}'.format(bucket))
# Check public bucket file:
if 'PRIVATE_BUCKETS' in b_map:
# Prioritize prefixed buckets first, the deeper the better!
sorted_buckets = get_sorted_bucket_list(b_map, 'PRIVATE_BUCKETS')
log.debug(f"Sorted PRIVATE buckets are {sorted_buckets}")
for priv_bucket in sorted_buckets:
if bucket_prefix_match(bucket, prepend_bucketname(priv_bucket), object_name):
# This bucket is PRIVATE, return group!
return b_map['PRIVATE_BUCKETS'][priv_bucket]
return False
def check_public_bucket(bucket, b_map, object_name=""):
# Check for PUBLIC_BUCKETS in bucket map file
if 'PUBLIC_BUCKETS' in b_map:
sorted_buckets = get_sorted_bucket_list(b_map, 'PUBLIC_BUCKETS')
log.debug(f"Sorted PUBLIC buckets are {sorted_buckets}")
for pub_bucket in sorted_buckets:
if bucket_prefix_match(bucket, prepend_bucketname(pub_bucket), object_name):
# This bucket is public!
log.debug("found a public, we'll take it")
return True
# Did not find this in public bucket list
log.debug('we did not find a public bucket for {}'.format(bucket))
return False
| []
| []
| [
"BUCKETNAME_PREFIX",
"USE_REVERSE_BUCKET_MAP",
"MATURITY"
]
| [] | ["BUCKETNAME_PREFIX", "USE_REVERSE_BUCKET_MAP", "MATURITY"] | python | 3 | 0 | |
apis/core.oam.dev/v1beta1/core_types.go | /*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"github.com/oam-dev/kubevela-core-api/apis/core.oam.dev/condition"
"github.com/oam-dev/kubevela-core-api/apis/core.oam.dev/common"
)
// A WorkloadDefinitionSpec defines the desired state of a WorkloadDefinition.
type WorkloadDefinitionSpec struct {
// Reference to the CustomResourceDefinition that defines this workload kind.
Reference common.DefinitionReference `json:"definitionRef"`
// ChildResourceKinds are the list of GVK of the child resources this workload generates
ChildResourceKinds []common.ChildResourceKind `json:"childResourceKinds,omitempty"`
// RevisionLabel indicates which label for underlying resources(e.g. pods) of this workload
// can be used by trait to create resource selectors(e.g. label selector for pods).
// +optional
RevisionLabel string `json:"revisionLabel,omitempty"`
// PodSpecPath indicates where/if this workload has K8s podSpec field
// if one workload has podSpec, trait can do lot's of assumption such as port, env, volume fields.
// +optional
PodSpecPath string `json:"podSpecPath,omitempty"`
// Status defines the custom health policy and status message for workload
// +optional
Status *common.Status `json:"status,omitempty"`
// Schematic defines the data format and template of the encapsulation of the workload
// +optional
Schematic *common.Schematic `json:"schematic,omitempty"`
// Extension is used for extension needs by OAM platform builders
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
Extension *runtime.RawExtension `json:"extension,omitempty"`
}
// WorkloadDefinitionStatus is the status of WorkloadDefinition
type WorkloadDefinitionStatus struct {
condition.ConditionedStatus `json:",inline"`
}
// +kubebuilder:object:root=true
// A WorkloadDefinition registers a kind of Kubernetes custom resource as a
// valid OAM workload kind by referencing its CustomResourceDefinition. The CRD
// is used to validate the schema of the workload when it is embedded in an OAM
// Component.
// +kubebuilder:resource:scope=Namespaced,categories={oam},shortName=workload
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="DEFINITION-NAME",type=string,JSONPath=".spec.definitionRef.name"
// +kubebuilder:printcolumn:name="DESCRIPTION",type=string,JSONPath=".metadata.annotations.definition\\.oam\\.dev/description"
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type WorkloadDefinition struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec WorkloadDefinitionSpec `json:"spec,omitempty"`
Status WorkloadDefinitionStatus `json:"status,omitempty"`
}
// SetConditions set condition for WorkloadDefinition
func (wd *WorkloadDefinition) SetConditions(c ...condition.Condition) {
wd.Status.SetConditions(c...)
}
// GetCondition gets condition from WorkloadDefinition
func (wd *WorkloadDefinition) GetCondition(conditionType condition.ConditionType) condition.Condition {
return wd.Status.GetCondition(conditionType)
}
// +kubebuilder:object:root=true
// WorkloadDefinitionList contains a list of WorkloadDefinition.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type WorkloadDefinitionList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []WorkloadDefinition `json:"items"`
}
// A TraitDefinitionSpec defines the desired state of a TraitDefinition.
type TraitDefinitionSpec struct {
// Reference to the CustomResourceDefinition that defines this trait kind.
Reference common.DefinitionReference `json:"definitionRef,omitempty"`
// Revision indicates whether a trait is aware of component revision
// +optional
RevisionEnabled bool `json:"revisionEnabled,omitempty"`
// WorkloadRefPath indicates where/if a trait accepts a workloadRef object
// +optional
WorkloadRefPath string `json:"workloadRefPath,omitempty"`
// PodDisruptive specifies whether using the trait will cause the pod to restart or not.
// +optional
PodDisruptive bool `json:"podDisruptive,omitempty"`
// AppliesToWorkloads specifies the list of workload kinds this trait
// applies to. Workload kinds are specified in kind.group/version format,
// e.g. server.core.oam.dev/v1alpha2. Traits that omit this field apply to
// all workload kinds.
// +optional
AppliesToWorkloads []string `json:"appliesToWorkloads,omitempty"`
// ConflictsWith specifies the list of traits(CRD name, Definition name, CRD group)
// which could not apply to the same workloads with this trait.
// Traits that omit this field can work with any other traits.
// Example rules:
// "service" # Trait definition name
// "services.k8s.io" # API resource/crd name
// "*.networking.k8s.io" # API group
// "labelSelector:foo=bar" # label selector
// labelSelector format: https://pkg.go.dev/k8s.io/apimachinery/pkg/labels#Parse
// +optional
ConflictsWith []string `json:"conflictsWith,omitempty"`
// Schematic defines the data format and template of the encapsulation of the trait
// +optional
Schematic *common.Schematic `json:"schematic,omitempty"`
// Status defines the custom health policy and status message for trait
// +optional
Status *common.Status `json:"status,omitempty"`
// Extension is used for extension needs by OAM platform builders
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
Extension *runtime.RawExtension `json:"extension,omitempty"`
// ManageWorkload defines the trait would be responsible for creating the workload
// +optional
ManageWorkload bool `json:"manageWorkload,omitempty"`
// SkipRevisionAffect defines the update this trait will not generate a new application Revision
// +optional
SkipRevisionAffect bool `json:"skipRevisionAffect,omitempty"`
}
// TraitDefinitionStatus is the status of TraitDefinition
type TraitDefinitionStatus struct {
// ConditionedStatus reflects the observed status of a resource
condition.ConditionedStatus `json:",inline"`
// ConfigMapRef refer to a ConfigMap which contains OpenAPI V3 JSON schema of Component parameters.
ConfigMapRef string `json:"configMapRef,omitempty"`
// LatestRevision of the component definition
// +optional
LatestRevision *common.Revision `json:"latestRevision,omitempty"`
}
// +kubebuilder:object:root=true
// A TraitDefinition registers a kind of Kubernetes custom resource as a valid
// OAM trait kind by referencing its CustomResourceDefinition. The CRD is used
// to validate the schema of the trait when it is embedded in an OAM
// ApplicationConfiguration.
// +kubebuilder:resource:scope=Namespaced,categories={oam},shortName=trait
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="APPLIES-TO",type=string,JSONPath=".spec.appliesToWorkloads"
// +kubebuilder:printcolumn:name="DESCRIPTION",type=string,JSONPath=".metadata.annotations.definition\\.oam\\.dev/description"
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type TraitDefinition struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec TraitDefinitionSpec `json:"spec,omitempty"`
Status TraitDefinitionStatus `json:"status,omitempty"`
}
// SetConditions set condition for TraitDefinition
func (td *TraitDefinition) SetConditions(c ...condition.Condition) {
td.Status.SetConditions(c...)
}
// GetCondition gets condition from TraitDefinition
func (td *TraitDefinition) GetCondition(conditionType condition.ConditionType) condition.Condition {
return td.Status.GetCondition(conditionType)
}
// +kubebuilder:object:root=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// TraitDefinitionList contains a list of TraitDefinition.
type TraitDefinitionList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []TraitDefinition `json:"items"`
}
// A ScopeDefinitionSpec defines the desired state of a ScopeDefinition.
type ScopeDefinitionSpec struct {
// Reference to the CustomResourceDefinition that defines this scope kind.
Reference common.DefinitionReference `json:"definitionRef"`
// WorkloadRefsPath indicates if/where a scope accepts workloadRef objects
WorkloadRefsPath string `json:"workloadRefsPath,omitempty"`
// AllowComponentOverlap specifies whether an OAM component may exist in
// multiple instances of this kind of scope.
AllowComponentOverlap bool `json:"allowComponentOverlap"`
// Extension is used for extension needs by OAM platform builders
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
Extension *runtime.RawExtension `json:"extension,omitempty"`
}
// +kubebuilder:object:root=true
// A ScopeDefinition registers a kind of Kubernetes custom resource as a valid
// OAM scope kind by referencing its CustomResourceDefinition. The CRD is used
// to validate the schema of the scope when it is embedded in an OAM
// ApplicationConfiguration.
// +kubebuilder:printcolumn:JSONPath=".spec.definitionRef.name",name=DEFINITION-NAME,type=string
// +kubebuilder:resource:scope=Namespaced,categories={oam},shortName=scope
// +kubebuilder:storageversion
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ScopeDefinition struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ScopeDefinitionSpec `json:"spec,omitempty"`
}
// +kubebuilder:object:root=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ScopeDefinitionList contains a list of ScopeDefinition.
type ScopeDefinitionList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ScopeDefinition `json:"items"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// An ResourceTracker represents a tracker for track cross namespace resources
// +kubebuilder:resource:scope=Cluster,categories={oam},shortName=tracker
type ResourceTracker struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Status ResourceTrackerStatus `json:"status,omitempty"`
}
// ResourceTrackerStatus define the status of resourceTracker
type ResourceTrackerStatus struct {
TrackedResources []corev1.ObjectReference `json:"trackedResources,omitempty"`
}
// +kubebuilder:object:root=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceTrackerList contains a list of ResourceTracker
type ResourceTrackerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ResourceTracker `json:"items"`
}
| []
| []
| []
| [] | [] | go | null | null | null |
monitor-agent/archive_mysql_tool/funcs/config.go | package funcs
import (
"sync"
"log"
"os"
"io/ioutil"
"strings"
"encoding/json"
)
type MysqlConfig struct {
Type string `json:"type"`
Server string `json:"server"`
Port string `json:"port"`
User string `json:"user"`
Password string `json:"password"`
DataBase string `json:"database"`
DatabasePrefix string `json:"database_prefix"`
MaxOpen int `json:"maxOpen"`
MaxIdle int `json:"maxIdle"`
Timeout int `json:"timeout"`
}
type PrometheusConfig struct {
Server string `json:"server"`
Port int `json:"port"`
MaxHttpOpen int `json:"max_http_open"`
MaxHttpIdle int `json:"max_http_idle"`
HttpIdleTimeout int `json:"http_idle_timeout"`
QueryStep int `json:"query_step"`
IgnoreTags []string `json:"ignore_tags"`
}
type MonitorConfig struct {
Mysql MysqlConfig `json:"mysql"`
}
type TransConfig struct {
MaxUnitSpeed int `json:"max_unit_speed"`
FiveMinStartDay int64 `json:"five_min_start_day"`
}
type HttpConfig struct {
Enable bool `json:"enable"`
Port int `json:"port"`
}
type GlobalConfig struct {
Enable string `json:"enable"`
Mysql MysqlConfig `json:"mysql"`
Prometheus PrometheusConfig `json:"prometheus"`
Monitor MonitorConfig `json:"monitor"`
Trans TransConfig `json:"trans"`
Http HttpConfig `json:"http"`
}
var (
config *GlobalConfig
lock = new(sync.RWMutex)
)
func Config() *GlobalConfig {
lock.RLock()
defer lock.RUnlock()
return config
}
func InitConfig(cfg string) error {
if cfg == "" {
log.Println("use -c to specify configuration file")
}
_, err := os.Stat(cfg)
if os.IsExist(err) {
log.Println("config file not found")
return err
}
b,err := ioutil.ReadFile(cfg)
if err != nil {
log.Printf("read file %s error %v \n", cfg, err)
return err
}
configContent := strings.TrimSpace(string(b))
var c GlobalConfig
err = json.Unmarshal([]byte(configContent), &c)
if err != nil {
log.Println("parse config file:", cfg, "fail:", err)
return err
}
lock.Lock()
config = &c
log.Println("read config file:", cfg, "successfully")
lock.Unlock()
hostIp = "127.0.0.1"
if os.Getenv("MONITOR_HOST_IP") != "" {
hostIp = os.Getenv("MONITOR_HOST_IP")
}
return nil
}
| [
"\"MONITOR_HOST_IP\"",
"\"MONITOR_HOST_IP\""
]
| []
| [
"MONITOR_HOST_IP"
]
| [] | ["MONITOR_HOST_IP"] | go | 1 | 0 | |
cmd/config.go | package cmd
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/spf13/cobra"
)
const kubensConfigFile = ".kubens.yaml"
// configCmd represents the config command
var configCmd = &cobra.Command{
Use: "config",
Short: "modify kubens configuration",
Long: `
config command allows configuration of ~/.kubens.yaml for running kubens`,
Run: func(cmd *cobra.Command, args []string) {
cmd.Help()
},
}
var configAddCmd = &cobra.Command{
Use: "add",
Short: "add config to ~/.kubens.yaml",
Long: `
Adds config to ~/.kubens.yaml`,
Run: func(cmd *cobra.Command, args []string) {
cmd.Help()
},
}
var configViewCmd = &cobra.Command{
Use: "view",
Short: "view ~/.kubens.yaml",
Long: `
Display the contents of the contents of ~/.kubens.yaml`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Fprintln(os.Stderr, "Contents of ~/.kubens.yaml")
configFile, err := ioutil.ReadFile(filepath.Join(os.Getenv("HOME"), kubensConfigFile))
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
fmt.Print(string(configFile))
},
}
func init() {
RootCmd.AddCommand(configCmd)
configCmd.AddCommand(
configAddCmd,
configViewCmd,
)
configAddCmd.AddCommand(
netpolConfigCmd,
)
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
src/olympia/versions/tests/test_views.py | # -*- coding: utf-8 -*-
import os
from django.conf import settings
from django.core.files import temp
from django.core.files.base import File as DjangoFile
from django.test.utils import override_settings
from django.utils.http import urlquote
import mock
import pytest
from pyquery import PyQuery
from olympia import amo
from olympia.access import acl
from olympia.access.models import Group, GroupUser
from olympia.addons.models import Addon
from olympia.amo.templatetags.jinja_helpers import user_media_url
from olympia.amo.tests import TestCase, addon_factory, version_factory
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import urlencode, urlparams
from olympia.files.models import File
from olympia.users.models import UserProfile
from olympia.versions import views
class TestViews(TestCase):
def setUp(self):
super(TestViews, self).setUp()
self.addon = addon_factory(
slug=u'my-addôn', file_kw={'size': 1024},
version_kw={'version': '1.0'})
self.version = self.addon.current_version
self.addon.current_version.update(created=self.days_ago(3))
self.url_list = reverse('addons.versions', args=[self.addon.slug])
self.url_detail = reverse(
'addons.versions',
args=[self.addon.slug, self.addon.current_version.version])
@mock.patch.object(views, 'PER_PAGE', 1)
def test_version_detail(self):
version = version_factory(addon=self.addon, version='2.0')
version.update(created=self.days_ago(2))
version = version_factory(addon=self.addon, version='2.1')
version.update(created=self.days_ago(1))
urls = [(v.version, reverse('addons.versions',
args=[self.addon.slug, v.version]))
for v in self.addon.versions.all()]
version, url = urls[0]
assert version == '2.1'
response = self.client.get(url, follow=True)
self.assert3xx(
response, self.url_list + '?page=1#version-%s' % version)
version, url = urls[1]
assert version == '2.0'
response = self.client.get(url, follow=True)
self.assert3xx(
response, self.url_list + '?page=2#version-%s' % version)
version, url = urls[2]
assert version == '1.0'
response = self.client.get(url, follow=True)
self.assert3xx(
response, self.url_list + '?page=3#version-%s' % version)
# We are overriding this here for now till
# https://github.com/mozilla/addons-server/issues/8602 is fixed.
@override_settings(CACHES={'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': os.environ.get('MEMCACHE_LOCATION', 'localhost:11211')
}})
def test_version_detail_cache_key_normalized(self):
"""Test regression with memcached cache-key.
https://github.com/mozilla/addons-server/issues/8622
"""
url = reverse(
'addons.versions', args=[self.addon.slug, u'Âûáèðàåì âåðñèþ 23.0'])
response = self.client.get(url, follow=True)
assert response.status_code == 404
def test_version_detail_404(self):
bad_pk = self.addon.current_version.pk + 42
response = self.client.get(reverse('addons.versions',
args=[self.addon.slug, bad_pk]))
assert response.status_code == 404
bad_pk = u'lolé'
response = self.client.get(reverse('addons.versions',
args=[self.addon.slug, bad_pk]))
assert response.status_code == 404
def get_content(self):
response = self.client.get(self.url_list)
assert response.status_code == 200
return PyQuery(response.content)
@pytest.mark.xfail(reason='Temporarily hidden, #5431')
def test_version_source(self):
self.addon.update(view_source=True)
assert len(self.get_content()('a.source-code')) == 1
def test_version_no_source_one(self):
self.addon.update(view_source=False)
assert len(self.get_content()('a.source-code')) == 0
def test_version_addon_not_public(self):
self.addon.update(view_source=True, status=amo.STATUS_NULL)
response = self.client.get(self.url_list)
assert response.status_code == 404
def test_version_link(self):
version = self.addon.current_version.version
doc = self.get_content()
link = doc('.version h3 > a').attr('href')
assert link == self.url_detail
assert doc('.version').attr('id') == 'version-%s' % version
def test_version_list_button_shows_download_anyway(self):
first_version = self.addon.current_version
first_version.update(created=self.days_ago(1))
first_file = first_version.files.all()[0]
second_version = version_factory(addon=self.addon, version='2.0')
second_file = second_version.files.all()[0]
doc = self.get_content()
links = doc('.download-anyway a')
assert links
assert links[0].attrib['href'] == second_file.get_url_path(
'version-history', attachment=True)
assert links[1].attrib['href'] == first_file.get_url_path(
'version-history', attachment=True)
def test_version_list_doesnt_show_unreviewed_versions_public_addon(self):
version = self.addon.current_version.version
version_factory(
addon=self.addon, file_kw={'status': amo.STATUS_AWAITING_REVIEW},
version='2.1')
doc = self.get_content()
assert len(doc('.version')) == 1
assert doc('.version').attr('id') == 'version-%s' % version
def test_version_list_does_show_unreviewed_versions_unreviewed_addon(self):
version = self.addon.current_version.version
file_ = self.addon.current_version.files.all()[0]
file_.update(status=amo.STATUS_AWAITING_REVIEW)
doc = self.get_content()
assert len(doc('.version')) == 1
assert doc('.version').attr('id') == 'version-%s' % version
def test_version_list_for_unlisted_addon_returns_404(self):
"""Unlisted addons are not listed and have no version list."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url_list).status_code == 404
def test_version_detail_does_not_return_unlisted_versions(self):
self.addon.versions.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url_detail)
assert response.status_code == 404
def test_version_list_file_size_uses_binary_prefix(self):
response = self.client.get(self.url_list)
assert '1.0 KiB' in response.content
def test_version_list_no_compat_displayed_if_not_necessary(self):
doc = self.get_content()
compat_info = doc('.compat').text()
assert compat_info
assert 'Firefox 4.0.99 and later' in compat_info
self.addon.update(type=amo.ADDON_DICT)
doc = self.get_content()
compat_info = doc('.compat').text()
assert not compat_info
def test_version_update_info(self):
self.version.releasenotes = {
'en-US': u'Fix for an important bug',
'fr': u'Quelque chose en français.\n\nQuelque chose d\'autre.'
}
self.version.save()
file_ = self.version.files.all()[0]
file_.update(platform=amo.PLATFORM_WIN.id)
# Copy the file to create a new one attached to the same version.
# This tests https://github.com/mozilla/addons-server/issues/8950
file_.pk = None
file_.platform = amo.PLATFORM_MAC.id
file_.save()
response = self.client.get(
reverse('addons.versions.update_info',
args=(self.addon.slug, self.version.version)))
assert response.status_code == 200
assert response['Content-Type'] == 'application/xhtml+xml'
# pyquery is annoying to use with XML and namespaces. Use the HTML
# parser, but do check that xmlns attribute is present (required by
# Firefox for the notes to be shown properly).
doc = PyQuery(response.content, parser='html')
assert doc('html').attr('xmlns') == 'http://www.w3.org/1999/xhtml'
assert doc('p').html() == 'Fix for an important bug'
# Test update info in another language.
with self.activate(locale='fr'):
response = self.client.get(
reverse('addons.versions.update_info',
args=(self.addon.slug, self.version.version)))
assert response.status_code == 200
assert response['Content-Type'] == 'application/xhtml+xml'
assert '<br/>' in response.content, (
'Should be using XHTML self-closing tags!')
doc = PyQuery(response.content, parser='html')
assert doc('html').attr('xmlns') == 'http://www.w3.org/1999/xhtml'
assert doc('p').html() == (
u"Quelque chose en français.<br/><br/>Quelque chose d'autre.")
def test_version_update_info_legacy_redirect(self):
response = self.client.get('/versions/updateInfo/%s' % self.version.id,
follow=True)
url = reverse('addons.versions.update_info',
args=(self.version.addon.slug, self.version.version))
self.assert3xx(response, url, 301)
def test_version_update_info_legacy_redirect_deleted(self):
self.version.delete()
response = self.client.get(
'/en-US/firefox/versions/updateInfo/%s' % self.version.id)
assert response.status_code == 404
def test_version_update_info_no_unlisted(self):
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(
reverse('addons.versions.update_info',
args=(self.addon.slug, self.version.version)))
assert response.status_code == 404
class TestDownloadsBase(TestCase):
fixtures = ['base/addon_5299_gcal', 'base/users']
def setUp(self):
super(TestDownloadsBase, self).setUp()
self.addon = Addon.objects.get(id=5299)
self.file = File.objects.get(id=33046)
self.file_url = reverse('downloads.file', args=[self.file.id])
self.latest_url = reverse('downloads.latest', args=[self.addon.slug])
def assert_served_by_host(self, response, host, file_=None):
if not file_:
file_ = self.file
assert response.status_code == 302
assert response.url == (
urlparams('%s%s/%s' % (
host, self.addon.id, urlquote(file_.filename)
), filehash=file_.hash))
assert response['X-Target-Digest'] == file_.hash
def assert_served_internally(self, response, guarded=True):
assert response.status_code == 200
file_path = (self.file.guarded_file_path if guarded else
self.file.file_path)
assert response[settings.XSENDFILE_HEADER] == file_path
def assert_served_locally(self, response, file_=None, attachment=False):
path = user_media_url('addons')
if attachment:
path += '_attachments/'
self.assert_served_by_host(response, path, file_)
def assert_served_by_cdn(self, response, file_=None):
assert response.url.startswith(settings.MEDIA_URL)
assert response.url.startswith('http')
self.assert_served_by_host(response, user_media_url('addons'), file_)
class TestDownloadsUnlistedVersions(TestDownloadsBase):
def setUp(self):
super(TestDownloadsUnlistedVersions, self).setUp()
self.make_addon_unlisted(self.addon)
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_returns_404(self):
"""File downloading isn't allowed for unlisted addons."""
assert self.client.get(self.file_url).status_code == 404
assert self.client.get(self.latest_url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: True)
def test_download_for_unlisted_addon_owner(self):
"""File downloading is allowed for addon owners."""
self.assert_served_internally(self.client.get(self.file_url), False)
assert self.client.get(self.latest_url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: True)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_reviewer(self):
"""File downloading isn't allowed for reviewers."""
assert self.client.get(self.file_url).status_code == 404
assert self.client.get(self.latest_url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: True)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_unlisted_reviewer(self):
"""File downloading is allowed for unlisted reviewers."""
self.assert_served_internally(self.client.get(self.file_url), False)
assert self.client.get(self.latest_url).status_code == 404
class TestDownloads(TestDownloadsBase):
def test_file_404(self):
r = self.client.get(reverse('downloads.file', args=[234]))
assert r.status_code == 404
def test_public(self):
assert self.addon.status == amo.STATUS_PUBLIC
assert self.file.status == amo.STATUS_PUBLIC
self.assert_served_by_cdn(self.client.get(self.file_url))
def test_public_addon_unreviewed_file(self):
self.file.status = amo.STATUS_AWAITING_REVIEW
self.file.save()
self.assert_served_locally(self.client.get(self.file_url))
def test_unreviewed_addon(self):
self.addon.status = amo.STATUS_PENDING
self.addon.save()
self.assert_served_locally(self.client.get(self.file_url))
def test_type_attachment(self):
self.assert_served_by_cdn(self.client.get(self.file_url))
url = reverse('downloads.file', args=[self.file.id, 'attachment'])
self.assert_served_locally(self.client.get(url), attachment=True)
def test_nonbrowser_app(self):
url = self.file_url.replace('firefox', 'thunderbird')
self.assert_served_locally(self.client.get(url), attachment=True)
def test_trailing_filename(self):
url = self.file_url + self.file.filename
self.assert_served_by_cdn(self.client.get(url))
def test_null_datestatuschanged(self):
self.file.update(datestatuschanged=None)
self.assert_served_locally(self.client.get(self.file_url))
def test_unicode_url(self):
self.file.update(filename=u'图像浏览器-0.5-fx.xpi')
self.assert_served_by_cdn(self.client.get(self.file_url))
class TestDisabledFileDownloads(TestDownloadsBase):
def test_admin_disabled_404(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.get(self.file_url).status_code == 404
def test_user_disabled_404(self):
self.addon.update(disabled_by_user=True)
assert self.client.get(self.file_url).status_code == 404
def test_file_disabled_anon_404(self):
self.file.update(status=amo.STATUS_DISABLED)
assert self.client.get(self.file_url).status_code == 404
def test_file_disabled_unprivileged_404(self):
assert self.client.login(email='[email protected]')
self.file.update(status=amo.STATUS_DISABLED)
assert self.client.get(self.file_url).status_code == 404
def test_file_disabled_ok_for_author(self):
self.file.update(status=amo.STATUS_DISABLED)
assert self.client.login(email='[email protected]')
self.assert_served_internally(self.client.get(self.file_url))
def test_file_disabled_ok_for_reviewer(self):
self.file.update(status=amo.STATUS_DISABLED)
self.client.login(email='[email protected]')
self.assert_served_internally(self.client.get(self.file_url))
def test_file_disabled_ok_for_admin(self):
self.file.update(status=amo.STATUS_DISABLED)
self.client.login(email='[email protected]')
self.assert_served_internally(self.client.get(self.file_url))
def test_admin_disabled_ok_for_author(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.login(email='[email protected]')
self.assert_served_internally(self.client.get(self.file_url))
def test_admin_disabled_ok_for_admin(self):
self.addon.update(status=amo.STATUS_DISABLED)
self.client.login(email='[email protected]')
self.assert_served_internally(self.client.get(self.file_url))
def test_user_disabled_ok_for_author(self):
self.addon.update(disabled_by_user=True)
assert self.client.login(email='[email protected]')
self.assert_served_internally(self.client.get(self.file_url))
def test_user_disabled_ok_for_admin(self):
self.addon.update(disabled_by_user=True)
self.client.login(email='[email protected]')
self.assert_served_internally(self.client.get(self.file_url))
class TestUnlistedDisabledFileDownloads(TestDisabledFileDownloads):
def setUp(self):
super(TestDisabledFileDownloads, self).setUp()
self.make_addon_unlisted(self.addon)
self.grant_permission(
UserProfile.objects.get(email='[email protected]'),
'Addons:ReviewUnlisted')
class TestDownloadsLatest(TestDownloadsBase):
def setUp(self):
super(TestDownloadsLatest, self).setUp()
self.platform = 5
def test_404(self):
url = reverse('downloads.latest', args=[123])
assert self.client.get(url).status_code == 404
def test_type_none(self):
r = self.client.get(self.latest_url)
assert r.status_code == 302
url = '%s?%s' % (self.file.filename,
urlencode({'filehash': self.file.hash}))
assert r['Location'].endswith(url), r['Location']
def test_success(self):
assert self.addon.current_version
self.assert_served_by_cdn(self.client.get(self.latest_url))
def test_platform(self):
# We still match PLATFORM_ALL.
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 5})
self.assert_served_by_cdn(self.client.get(url))
# And now we match the platform in the url.
self.file.platform = self.platform
self.file.save()
self.assert_served_by_cdn(self.client.get(url))
# But we can't match platform=3.
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 3})
assert self.client.get(url).status_code == 404
def test_type(self):
url = reverse('downloads.latest', kwargs={'addon_id': self.addon.slug,
'type': 'attachment'})
self.assert_served_locally(self.client.get(url), attachment=True)
def test_platform_and_type(self):
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 5,
'type': 'attachment'})
self.assert_served_locally(self.client.get(url), attachment=True)
def test_trailing_filename(self):
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 5,
'type': 'attachment'})
url += self.file.filename
self.assert_served_locally(self.client.get(url), attachment=True)
def test_platform_multiple_objects(self):
f = File.objects.create(platform=3, version=self.file.version,
filename='unst.xpi', status=self.file.status)
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 3})
self.assert_served_locally(self.client.get(url), file_=f)
@override_settings(XSENDFILE=True)
class TestDownloadSource(TestCase):
fixtures = ['base/addon_3615', 'base/admin']
def setUp(self):
super(TestDownloadSource, self).setUp()
self.addon = Addon.objects.get(pk=3615)
# Make sure non-ascii is ok.
self.addon.update(slug=u'crosswarpex-확장')
self.version = self.addon.current_version
tdir = temp.gettempdir()
self.source_file = temp.NamedTemporaryFile(suffix=".zip", dir=tdir)
self.source_file.write('a' * (2 ** 21))
self.source_file.seek(0)
self.version.source = DjangoFile(self.source_file)
self.version.save()
self.filename = os.path.basename(self.version.source.path)
self.user = UserProfile.objects.get(email="[email protected]")
self.group = Group.objects.create(
name='Editors BinarySource',
rules='Editors:BinarySource'
)
self.url = reverse('downloads.source', args=(self.version.pk, ))
def test_owner_should_be_allowed(self):
self.client.login(email=self.user.email)
response = self.client.get(self.url)
assert response.status_code == 200
assert response[settings.XSENDFILE_HEADER]
assert 'Content-Disposition' in response
filename = self.filename
if not isinstance(filename, unicode):
filename = filename.decode('utf8')
assert filename in response['Content-Disposition'].decode('utf8')
path = self.version.source.path
if not isinstance(path, unicode):
path = path.decode('utf8')
assert response[settings.XSENDFILE_HEADER].decode('utf8') == path
def test_anonymous_should_not_be_allowed(self):
response = self.client.get(self.url)
assert response.status_code == 404
def test_deleted_version(self):
self.version.delete()
GroupUser.objects.create(user=self.user, group=self.group)
self.client.login(email=self.user.email)
response = self.client.get(self.url)
assert response.status_code == 404
def test_group_binarysource_should_be_allowed(self):
GroupUser.objects.create(user=self.user, group=self.group)
self.client.login(email=self.user.email)
response = self.client.get(self.url)
assert response.status_code == 200
assert response[settings.XSENDFILE_HEADER]
assert 'Content-Disposition' in response
filename = self.filename
if not isinstance(filename, unicode):
filename = filename.decode('utf8')
assert filename in response['Content-Disposition'].decode('utf8')
path = self.version.source.path
if not isinstance(path, unicode):
path = path.decode('utf8')
assert response[settings.XSENDFILE_HEADER].decode('utf8') == path
def test_no_source_should_go_in_404(self):
self.version.source = None
self.version.save()
response = self.client.get(self.url)
assert response.status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_returns_404(self):
"""File downloading isn't allowed for unlisted addons."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: True)
def test_download_for_unlisted_addon_owner(self):
"""File downloading is allowed for addon owners."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 200
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: True)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_reviewer(self):
"""File downloading isn't allowed for reviewers."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: True)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_unlisted_reviewer(self):
"""File downloading is allowed for unlisted reviewers."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 200
| []
| []
| [
"MEMCACHE_LOCATION"
]
| [] | ["MEMCACHE_LOCATION"] | python | 1 | 0 | |
packages.py | """helper for setup.py"""
import os
import sys
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Natural Language :: English',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
PYTHON_REQUIRES = '>=3.7'
# features in packages used by pyNastran
# numpy
# - 1.12 min for 3.6
# - 1.13: adds axis support to unique
# - 1.14: adds encoding support to savetxt (unused)
# - 1.14: adds proper writing of np.savetxt for open file objects
# (used for unicode savetxt using with statement) in Python 3.6
# - 1.15: min for Python 3.7? I guess 1.14 is fine for a requirement...
# - 1.19.4 : buggy
# scipy:
# - 0.18.1: fixed kdtree used by nodal equivalencing; min for Python 2.7
# - 0.19: min for Python 3.6
# - 0.19: min for Python 3.7?; last 0.x release
# matplotlib:
# - 1.5: min for Python 2.7; last 1.x release
# - 2.0: adds C0/C1 colors (use colors from default colormap);
# min for Python 3.6
# - 2.1: adds plt.subplots support (untested?)
# - 2.2: min for Python 3.7
# the packages that change requirements based on python version
REQS = {
'3.7' : {
'numpy' : ('1.14', '>=1.14,!=1.19.4'),
'scipy' : ('1.0', '>=1.0'),
'matplotlib' : ('2.2', '>=2.2'), # 2.2.4 adds Python 3.7 support
},
'3.8' : { # TODO: not updated
'numpy' : ('1.14', '>=1.14,!=1.19.4'),
'scipy' : ('1.0', '>=1.0'),
'matplotlib' : ('2.2', '>=2.2'), # 2.2.4 adds Python 3.7 support
},
}
def check_python_version():
"""verifies the python version"""
imajor, minor1, minor2 = sys.version_info[:3]
if sys.version_info < (3, 7, 0): # 3.7.4 used
sys.exit('Upgrade your Python to 3.7+; version=(%s.%s.%s)' % (
imajor, minor1, minor2))
def int_version(name, version):
"""splits the version into a tuple of integers"""
sversion = version.split('-')[0]
#numpy
#scipy
#matplotlib
#qtpy
#vtk
#cpylog
#pyNastran
if 'rc' not in name:
# it's gotta be something...
# matplotlib3.1rc1
sversion = sversion.split('rc')[0]
try:
return [int(val) for val in sversion.split('.')]
except ValueError:
raise SyntaxError('cannot determine version for %s %s' % (name, sversion))
def str_version(version):
"""converts a tuple of intergers to a version number"""
return '.'.join(str(versioni) for versioni in version)
def get_package_requirements(is_gui: bool=True, add_vtk_qt: bool=True,
python_version: str=None, bdist: bool=False):
"""
Gets the requirements for setup.py
Parameters
----------
is_gui: bool; default=True
add matplotlib, qtpy, pillow, imageio
not vtk or pyqt/pyside because it's harder to install
python_version: str; default=None -> sys.version_info
allows us to get dynamic requirements
bdist: bool; default=False
loosen the requirements on numpy, scipy, etc.
"""
if python_version is None:
python_version = '%s.%s' % sys.version_info[:2]
if python_version not in REQS:
python_version = '3.7'
vreqs = REQS[python_version]
all_reqs = {}
#is_dev = (
#'TRAVIS' in os.environ or
#'APPVEYOR' in os.environ or
#'READTHEDOCS' in os.environ
#)
is_continuous_integration = (
'TRAVIS' in os.environ or
'TRAVIS_PYTHON_VERSION' in os.environ or
'GITHUB_ACTOR' in os.environ
)
#user_name = getpass.getuser()
#user_name not in ['travis']
is_rtd = 'READTHEDOCS' in os.environ
#if is_dev or is_gui:
#try:
#import vtk
#vtk_version = '.'.join(vtk.VTK_VERSION.split('.'))
#all_reqs['vtk'] = vtk_version
#if vtk_version < '7.0.0':
#print("vtk.VTK_VERSION = %r < '7.0.0'" % vtk.VTK_VERSION)
#install_requires.append('vtk >= 7.0.0')
#except ImportError:
#install_requires.append('vtk >= 7.0.0') # 8.x used
install_requires = []
if is_rtd:
install_requires.append('numpy')
else:
version_check, required_version = vreqs['numpy']
if bdist:
all_reqs['numpy'] = required_version
install_requires.append('numpy %s' % required_version) # 1.18.1 used
else:
found_numpy = False
try:
import numpy as np
found_numpy = True
except RuntimeError:
raise RuntimeError(f'numpy=1.19.4 is buggy; install a different version')
except ImportError:
all_reqs['numpy'] = required_version
install_requires.append('numpy %s' % required_version) # 1.18.1 used
if found_numpy:
sver = np.lib.NumpyVersion(np.__version__)
iver = int_version('numpy', sver.version)
all_reqs['numpy'] = sver.version
iversion_check = int_version('numpy', version_check)
#srequired_version = int_version('numpy', required_version)
#print('numpy %r %r' % (sver, iversion_check))
if iver < iversion_check:
print("numpy.__version__ = %r < %s" % (np.__version__, version_check))
install_requires.append('numpy %s' % required_version)
all_reqs['numpy'] = version_check
install_requires.append('numpy %s' % required_version)
if is_rtd:
install_requires.append('scipy')
else:
version_check, required_version = vreqs['scipy']
if bdist:
all_reqs['scipy'] = required_version
install_requires.append('scipy %s' % required_version) # 1.4.1 used
else:
try:
import scipy
sver = scipy.version.short_version
iver = int_version('scipy', sver)
all_reqs['scipy'] = sver
iversion_check = int_version('scipy', version_check)
#srequired_version = int_version('scipy', required_version)
#print('scipy %r %r' % (sver, iversion_check))
#print(iver, iversion_check)
if iver < iversion_check:
print("scipy.version.short_version = %r < %r" % (
scipy.version.short_version, version_check))
all_reqs['scipy'] = required_version
install_requires.append('scipy %s' % required_version)
except ImportError:
all_reqs['scipy'] = required_version
install_requires.append('scipy %s' % required_version) # 1.4.1 used
if is_gui:
version_check, required_version = vreqs['matplotlib']
if bdist:
all_reqs['matplotlib'] = required_version
install_requires.append('matplotlib %s' % required_version) # 3.2.0 used
else:
try:
import matplotlib
iver = int_version('matplotlib', matplotlib.__version__)
all_reqs['matplotlib'] = str_version(iver)
iversion_check = int_version('matplotlib', version_check)
if iver < iversion_check:
print("matplotlib.__version__ = %r < %r" % (matplotlib.__version__, version_check))
#matplotlib.__version__, str_version(iversion_check)))
all_reqs['matplotlib'] = required_version
install_requires.append('matplotlib %s' % required_version)
except ImportError:
all_reqs['matplotlib'] = required_version
install_requires.append('matplotlib %s' % required_version) # 3.2.0 used
required_version_str = '1.4.0'
if bdist:
all_reqs['cpylog'] = f'>= {required_version_str}'
install_requires.append(f'cpylog >= {required_version_str}') # 1.3.1 used
else:
try:
import cpylog
iver = int_version('cpylog', cpylog.__version__)
all_reqs['cpylog'] = str_version(iver)
if iver < [1, 4, 0]:
print(f"cpylog.__version__ = {cpylog.__version__!r} != {required_version_str!r}")
all_reqs['cpylog'] = f'>= {required_version_str}'
install_requires.append(f'cpylog >= {required_version_str}')
except ImportError:
all_reqs['cpylog'] = f'>= {required_version_str}'
install_requires.append(f'cpylog >= {required_version_str}') # 1.3.1 used
if not is_rtd:
# nptyping, typish
# -----------------------------------------------------------
# actual rquirement somewhere between 1.6.0 and 1.9.1
# 1.5.3 fails
# 1.6.0 installs (does it work?)
# 1.7.0 according to nptyping
# 1.9.1 installs
required_version_str = '1.7.0'
try:
import typish
iver = int_version('typish', typish.__version__)
all_reqs['typish'] = str_version(iver)
if iver < [1, 4, 0]:
print(f"typish.__version__ = {typish.__version__!r} != {required_version_str!r}")
all_reqs['typish'] = f'>= {required_version_str}'
install_requires.append(f'typish >= {required_version_str}')
except ImportError:
all_reqs['typish'] = f'>= {required_version_str}'
install_requires.append(f'typish >= {required_version_str}') # 1.3.1 used
# -----------------------------------------------------------
required_version_str = '>= 1.0.1, !=1.1.0'
try:
import nptyping
iver = int_version('nptyping', nptyping.__version__)
all_reqs['nptyping'] = str_version(iver)
if iver < [1, 0, 1] or iver == [1, 1, 0]:
print(f"nptyping.__version__ = {nptyping.__version__!r} not {required_version_str!r}")
all_reqs['nptyping'] = required_version_str
install_requires.append(f'nptyping {required_version_str}')
except ImportError:
all_reqs['nptyping'] = '>= 1.0.1'
install_requires.append('nptyping >= 1.0.1, !=1.1.0') # 1.0.1 used
if bdist:
all_reqs['docopt-ng'] = '>= 0.7.2'
install_requires.append('docopt-ng >= 0.7.2') # 0.7.2 used
else:
try:
import docopt
iver = int_version('docopt', docopt.__version__)
all_reqs['docopt-ng'] = str_version(iver)
if iver < [0, 7, 2]:
print("docopt.__version__ = %r < '0.7.2'" % docopt.__version__)
all_reqs['docopt-ng'] = '>= 0.7.2'
install_requires.append('docopt-ng >= 0.7.2')
except ImportError:
all_reqs['docopt-ng'] = '>= 0.7.2'
install_requires.append('docopt-ng >= 0.7.2') # 0.7.2 used
if is_rtd:
pass
elif is_gui:
try:
import qtpy
iver = int_version('qtpy', qtpy.__version__)
all_reqs['qtpy'] = str_version(iver)
if iver < [1, 4, 0]:
print("qtpy.__version__ = %r < '1.4.0'" % qtpy.__version__)
all_reqs['qtpy'] = '>= 1.4.0'
install_requires.append('qtpy >= 1.4.0')
#except ImportError: # also PythonQtError
except: # ImportError, PythonQtError
# how do you import PythonQtError?
all_reqs['qtpy'] = '>= 1.4.0'
install_requires.append('qtpy >= 1.4.0') # 1.9.0 used
try:
import PIL
iver = int_version('pillow', PIL.__version__)
all_reqs['pillow'] = str_version(iver)
# at least 5.2.0, but not 7.1.0
if iver > [7, 1, 0]:
pass
#print("pillow.__version__ = %r > '5.2.0', !='7.1.0" % PIL.__version__)
#all_reqs['pillow'] = 'pillow >=5.2.0, !7.1.0'
#install_requires.append('pillow >= 5.2.0, !=7.1.0')
elif iver == [7, 1, 0]:
print("pillow.__version__ = %r > '5.2.0', !='7.1.0" % PIL.__version__)
all_reqs['pillow'] = 'pillow >=5.2.0, !7.1.0'
install_requires.append('pillow >= 5.2.0, !=7.1.0')
elif iver < [5, 2, 0]:
print("pillow.__version__ = %r > '5.2.0', !=7.1.0" % PIL.__version__)
all_reqs['pillow'] = 'pillow >=5.2.0, !=7.1.0'
install_requires.append('pillow >= 5.2.0, !=7.1.0')
except ImportError:
#print('missing pillow...')
all_reqs['pillow'] = 'pillow >=5.2.0, !=7.1.0'
install_requires.append('pillow >= 5.2.0, !=7.1.0') # 7.1.1 used
try:
import imageio
if imageio.__version__ < '2.2.0':
print("imageio.version = %r < '2.2.0'" % imageio.__version__)
all_reqs['imageio'] = '>= 2.2.0'
install_requires.append('imageio >= 2.2.0')
else:
all_reqs['imageio'] = imageio.__version__
except ImportError:
all_reqs['imageio'] = '>= 2.2.0'
install_requires.append('imageio >= 2.2.0') # 2.6.1 used
#is_windows = 'nt' in os.name
if is_continuous_integration:
#install_requires.append('coverage>=4.4.2')
#install_requires.append('python-coveralls>=2.9')
#install_requires.append('coveralls>=1.7')
install_requires.append('codecov')
#install_requires.append('coverage')
#print(all_reqs)
print('install_requires =', install_requires)
return all_reqs, install_requires
def update_version_file():
"""
Creates the version.py file with the github string
to lock down the version when the user the following
on the dev version:
>>> python setup.py install
instead of:
>>> python setup.py develop
This is handy
"""
import pyNastran
if 'install' not in sys.argv:
return
if 'dev' in pyNastran.__version__:
return
pkg_path = pyNastran.__path__[0]
init_filename = os.path.join(pkg_path, '__init__.py')
version_filename = os.path.join(pkg_path, 'version.py')
with open(version_filename, 'w') as version_file:
version_file.write(f'__version__ = {pyNastran.__version__!r}\n')
version_file.write(f'__releaseDate__ = {pyNastran.__releaseDate__!r}\n')
with open(init_filename, 'r') as init_file:
data = init_file.read()
data2 = data.replace('is_installed = False', 'is_installed = True')
with open(init_filename, 'w') as init_file_out:
data = init_file_out.write(data2)
#__version__ = '1.3.0+%s' % revision
def cat_files(*filenames, encoding='utf8', sep='\n'):
"""Get the long description from the relevant file"""
# thanks to harold!
buf = []
for filename in filenames:
with open(filename, encoding=encoding) as file_obj:
buf.append(file_obj.read())
return sep.join(buf)
LONG_DESCRIPTION = cat_files('README.rst')
#assert '\r' not in LONG_DESCRIPTION, LONG_DESCRIPTION
#for i, line in enumerate(LONG_DESCRIPTION.split('\n')):
#print(f'%4i: %s' % (i, line))
# print(LONG_DESCRIPTION)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pizzabot.py | import os
import random
import re
import asyncio
import discord
import requests
import pytz
from hockey import game_search, scores
from datetime import datetime, timedelta
from discord.ext import commands
# import youtube_dl
owners = [193721090755788801]
client = commands.Bot(command_prefix = '!', case_insensitive=True, owner_ids = set(owners))
# token = open("token.txt", "r").read() # concealing token # NOT NEEDED FOR HEROKU
male = 'first_name_male.txt' # male name file
female = 'first_name_female.txt' # female name file
last = 'last_name.txt' # last name name file
bslist = 'bs.txt' # bullshit list
gamelist = 'games.txt' # game list
#FUNCTIONS
def make_pizza(ingredients):
pizzatoppings = 'pizzatoppings.txt' # pizza toppings file
topping_list = random_topping(pizzatoppings, ingredients) # creating list for toppings
your_pizza = "" # creating string for output
your_pizza = ", ".join(topping_list[0:-1:]) # forming a string with everything but last ingredient because grammar
if ingredients == 1: # 1 ingredient
return f"You should order a pizza with {topping_list[0]}."
elif ingredients == 2: # 2 ingredients
your_pizza = your_pizza + " and " + topping_list[-1] # adding back last ingredient
return f"You should order a pizza with {your_pizza}."
else: # for ingredients between 3 and 7
your_pizza = your_pizza + ", and " + topping_list[-1] # adding back last ingredient
return f"You should order a pizza with {your_pizza}."
def random_name(fname):
lines = open(fname).read().splitlines()
return random.choice(lines)
def random_topping(fname, ingredients):
toppings = open(fname).read().splitlines()
return random.sample(toppings, ingredients)
#BOT CONNECTING
@client.event # event decorator/wrapper
async def on_ready():
activity = discord.Activity(name="Pizza Cook", type=discord.ActivityType.watching)
await client.change_presence(status=discord.Status.online, activity=activity)
print(f"DING! {client.user} has emerged from the oven.")
#EVENTS
@client.event
async def on_message(message):
if re.findall(r"(?i)\bpizzas?\b", message.content.lower()): # adds reaction whenever "pizza" is mentioned
await message.add_reaction("\U0001F355")
await client.process_commands(message)
#COMMANDS
@client.command(brief='This will display and add to happy hour games.')
async def game(ctx, action, *args):
gamename = " ".join(args)
if action == "add":
with open(gamelist, 'a') as file:
file.writelines(f'{gamename}\n')
await ctx.send(f"```Added {gamename} to your list of Happy Hour games```")
elif action == "remove":
with open(gamelist, 'r') as ofile:
lines = ofile.readlines()
with open(gamelist, 'w') as nfile:
for line in lines:
if line.strip("\n") != gamename:
nfile.write(line)
await ctx.send(f"```Removed {gamename} from your list of Happy Hour games```")
elif action == "random":
result = open(gamelist).read().splitlines()
await ctx.send(f'I choose...\n```{random.choice(result)}```')
elif action == "list":
games = open(gamelist).read()
await ctx.send(f"```{games}```")
else:
await ctx.send("Please use a valid action:\n```!game add gamename\n!game remove gamename\n!game random\n!game list```")
@client.command(brief="Random first or full name for males or females.")
async def name(ctx, style="rand", gender="rand"):
style_opt = ('first', 'full')
gender_opt = ('male', 'female')
if style == "rand": style = random.choice(style_opt)
if gender == "rand": gender = random.choice(gender_opt)
style, gender = style.lower(), gender.lower()
if style not in style_opt or gender not in gender_opt:
await ctx.send("```Must be in this format:\n!name [first|full|rand] [male|female|rand]\nYou may also just use !name for all random.```")
elif style == 'first':
if gender == 'male':
await ctx.send(f"```{random_name(male)}```")
else:
await ctx.send(f"```{random_name(female)}```")
else:
if gender == 'male':
await ctx.send(f"```{random_name(male)} {random_name(last)}```")
else:
await ctx.send(f"```{random_name(female)} {random_name(last)}```")
@client.command()
async def football(ctx):
await ctx.send("https://sportsdata.usatoday.com/football/nfl/scores")
@client.command(brief='Gives a pizza with random toppings.')
async def toppings(ctx, ingredients=""):
try:
if ingredients == "": ingredients = random.randint(1,3)
ingredients = int(ingredients)
if ingredients > 7 or ingredients < 1:
raise Exception
await ctx.send(make_pizza(ingredients))
except ValueError:
await ctx.send("You must use only numbers.")
except Exception:
await ctx.send("You must only use a single number between 1 and 7.")
@client.command(brief='Selects a random choice from those supplied.')
async def rand(ctx, *args):
if 'forza' == args[0].lower():
forza_class = ('D', 'C', 'B', 'A', 'S1', 'S2', 'X')
forza_race = ('Road', 'Dirt', 'Cross Country', 'Street', 'Drag')
forza_class_choice = random.choices(forza_class, weights=[55, 160, 210, 210, 160, 125, 75], k=1)[0]
forza_race_choice = random.choices(forza_race, weights=[2475, 2475, 2475, 2475, 100], k=1)[0]
if len(args) == 2 and args[1].upper() in forza_class:
forza_class_choice = args[1].upper()
await ctx.send(f'I choose...\n```{forza_race_choice} Racing with \'{forza_class_choice}\' ranked vehicles.```')
else:
choices = " ".join(args)
choices = choices.split(',')
choices = [i for i in choices if i != '']
try:
if len(choices) < 2:
await ctx.send('You must give me at least 2 choices.')
else:
result = random.choice(choices).strip()
await ctx.send(f'I choose...\n```{result}```')
except IndexError:
await ctx.send('You didn\'t provide any choices.')
@client.command(brief='Multiple random choices from those supplied.')
async def mrand(ctx, count=None, *args):
try:
count = int(count)
except ValueError:
await ctx.send("You must only use numbers for your choices, for example:\n```!mrand 2 first, second, third```")
return
except TypeError:
await ctx.send("You must provide a count and choices, for example:\n```!mrand 2 first, second, third```")
return
choices = "".join(args)
choices = choices.replace(', ', ',')
choices = choices.split(',')
choices = [i for i in choices if i != '']
try:
if len(choices) <= count:
await ctx.send(f'You must provide more choices ({len(choices)} given) than your desired results ({count} given).')
else:
result_list = random.sample(choices, count)
result = '\n'
for i in result_list:
result += i + '\n'
await ctx.send(f'I choose...\n```{result}```')
except IndexError:
await ctx.send('You didn\'t provide any choices.')
except ValueError:
await ctx.send('You didn\'t provide enough choices.')
@client.command(brief='Really bad random, who knows what it\'s doing?')
async def srand(ctx, *args):
result = open(bslist).read().splitlines()
await ctx.send(f'I choose pizz... I mean:\n```{random.choice(result)}```')
@client.command(brief='Gives hockey scores for yesterday/today/tomorrow')
async def hockey(ctx, *, args=None):
argtuple = None
if args: argtuple = tuple(args.split(' '))
pst = pytz.timezone('America/Los_Angeles')
modopt = ["0", "+1", "-1", "today", "tomorrow", "yesterday"]
if argtuple == None:
mod = "0"
teams_req = None
elif argtuple[0].lower() in modopt:
mod = argtuple[0]
teams_req = argtuple[1:]
else:
mod = "0"
teams_req = argtuple
if mod == "-1" or mod.lower() == "yesterday": mod = -1
elif mod == "+1" or mod.lower() == "tomorrow": mod = 1
else: mod = 0
date = datetime.date(datetime.now(pst) + timedelta(days=mod)).strftime("%Y-%m-%d")
url = f"https://statsapi.web.nhl.com/api/v1/schedule?startDate={date}&endDate={date}&expand=schedule.linescore"
response = requests.get(url, headers={"Accept": "application/json"})
data = response.json()
game_count = data['totalGames']
if game_count == 0:
date = datetime.strptime(date, "%Y-%m-%d").strftime("%-m/%-d")
await ctx.send(f'There are no games scheduled on {date}!')
elif teams_req:
date = datetime.strptime(date, "%Y-%m-%d").strftime("%-m/%-d")
games = data['dates'][0]['games']
games_req = game_search(games, game_count, teams_req)
msg = scores(games, game_count, games_req)
if msg == "":
teamsout = "\n"
for i in teams_req: teamsout += i + '\n'
msg = f"There were no games matching your search criteria on {date}:\n```{teamsout}```"
await ctx.send(msg)
else:
games_req = [i for i in range(game_count)]
games = data['dates'][0]['games']
await ctx.send(scores(games, game_count, games_req))
@client.command(brief='DnD style dice roller.')
async def roll(ctx, *args):
dice_input = " ".join(args)
valid_input = re.search(
r"^[1-9][0-9]{0,2}[dD][1-9][0-9]{0,2}( ?[\+\-][0-9]+)?( [dD][lL])?( [dD][hH])?$|^(dndstats)$", dice_input)
if valid_input != None:
options = re.split(r'[dD\+\-]', dice_input)
dice_mod = re.findall(r'[\+\-]', dice_input)
if 'stats' in options:
stats = []
def dndstats():
rollcount = 0
roll_out = ""
while rollcount < 6:
roll_total = 0
badrolls = []
while roll_total < 8:
roll = [random.randint(1, 6) for val in range(4)]
lowroll = min(roll)
roll.remove(lowroll)
roll_total = sum(roll)
if roll_total < 8:
badrolls.append(roll_total)
stats.append(roll_total)
roll_full = f"[{roll[0]}, {roll[1]}, {roll[2]}, ~~{lowroll}~~]"
if badrolls:
roll_out += f'{roll_full} -- TOTAL: {roll_total} -- BADROLLS: {badrolls}\n'
else:
roll_out += f'{roll_full} -- TOTAL: {roll_total}\n'
rollcount += 1
roll_out += f'\nStats: {stats}'
return roll_out
await ctx.send(f'{dndstats()}')
else:
roll = [random.randint(1, int(options[1])) for val in range(int(options[0]))]
matches = ['l','h']
if all(any(i in j for j in options) for i in matches):
roll.remove(min(roll))
roll.remove(max(roll))
elif 'l' in options:
roll.remove(min(roll))
elif 'h' in options:
roll.remove(max(roll))
if "+" in dice_mod:
roll_total = sum(roll) + int(options[2])
elif "-" in dice_mod:
roll_total = sum(roll) - int(options[2])
else:
roll_total = sum(roll)
options.insert(2, 0) # for printing purposes
dice_mod.append("+") # for printing purposes
await ctx.send(f"{roll} {dice_mod[0]}{options[2]} -- TOTAL: {roll_total}")
@client.command(brief='Reminds in minutes or \'tomorrow\' with message')
async def remindme(ctx, rtime=None, *, args='You requested a reminder.'):
try:
if rtime.lower() == 'tomorrow':
await asyncio.sleep(86400)
elif int(rtime) <= 1440 and int(rtime) > 0:
rtime = int(rtime) * 60
await asyncio.sleep(rtime)
else:
await ctx.send('Reminders must be between 1 minute and a full day.')
return
await ctx.send(f'{ctx.author.mention}:\n{args}')
except ValueError:
await ctx.send('You must use any of these formats:\n```!remindme [minutes] [message]\n!remindme tomorrow [message]```')
except AttributeError:
await ctx.send('You must enter a valid remind time.')
#HIDDEN OWNER COMMANDS
# @client.command(hidden=True)
# @commands.is_owner()
# async def say(ctx, *, args):
# await ctx.send(args)
@client.command(hidden=True)
@commands.is_owner()
async def say(ctx, channel, *, args):
try:
channel = await commands.TextChannelConverter().convert(ctx, channel)
message = args
await channel.send(message)
except commands.errors.ChannelNotFound:
await ctx.send('You must send to an active chat channel. EX:\n!say general hi')
except Exception as e:
await ctx.send(f'Something went wrong: {e}')
# client.run(token) # NOT NEEDED FOR HEROKU
client.run(os.environ['BOT_TOKEN']) | []
| []
| [
"BOT_TOKEN"
]
| [] | ["BOT_TOKEN"] | python | 1 | 0 | |
pkg/network/multus.go | package network
import (
"os"
"path/filepath"
"github.com/openshift/cluster-network-operator/pkg/render"
"github.com/pkg/errors"
uns "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// renderMultusConfig returns the manifests of Multus
func renderMultusConfig(manifestDir string, useDHCP bool) ([]*uns.Unstructured, error) {
objs := []*uns.Unstructured{}
// render the manifests on disk
data := render.MakeRenderData()
data.Data["ReleaseVersion"] = os.Getenv("RELEASE_VERSION")
data.Data["MultusImage"] = os.Getenv("MULTUS_IMAGE")
data.Data["CNIPluginsSupportedImage"] = os.Getenv("CNI_PLUGINS_SUPPORTED_IMAGE")
data.Data["CNIPluginsUnsupportedImage"] = os.Getenv("CNI_PLUGINS_UNSUPPORTED_IMAGE")
data.Data["RenderDHCP"] = useDHCP
manifests, err := render.RenderDir(filepath.Join(manifestDir, "network/multus"), &data)
if err != nil {
return nil, errors.Wrap(err, "failed to render multus manifests")
}
objs = append(objs, manifests...)
return objs, nil
}
| [
"\"RELEASE_VERSION\"",
"\"MULTUS_IMAGE\"",
"\"CNI_PLUGINS_SUPPORTED_IMAGE\"",
"\"CNI_PLUGINS_UNSUPPORTED_IMAGE\""
]
| []
| [
"CNI_PLUGINS_UNSUPPORTED_IMAGE",
"MULTUS_IMAGE",
"RELEASE_VERSION",
"CNI_PLUGINS_SUPPORTED_IMAGE"
]
| [] | ["CNI_PLUGINS_UNSUPPORTED_IMAGE", "MULTUS_IMAGE", "RELEASE_VERSION", "CNI_PLUGINS_SUPPORTED_IMAGE"] | go | 4 | 0 | |
stats_sharpness.py | import glob
import logging
import os
import sys
import warnings
import numpy as np
import tensorflow as tf
from scipy import signal, stats
from timbral_models import (filter_audio_highpass, tf_filter_audio_highpass,
tf_timbral_sharpness, timbral_sharpness,
timbral_util)
try:
import manage_gpus as gpl
at_ircam = True
print("manage_gpus detected. IRCAM computer here.")
except ImportError:
gpl = None
at_ircam = False
max_num_threads = 6
print("manage_gpus was not found. Assuming it is not an IRCAM computer.")
except Exception as inst:
print("Unexpected error while importing manage_gpus. Exiting.")
print(type(inst)) # the exception instance exit(-1)
raise inst
if not at_ircam:
# tf.profiler.experimental.server.start(6009)
pass
if gpl:
try:
gpu_id_locked = gpl.get_gpu_lock(gpu_device_id=-1, soft=True)
print("GPU {} successfully locked !".format(gpu_id_locked))
except gpl.NoGpuManager:
print(
"no gpu manager available - will use all available GPUs", file=sys.stderr
)
except gpl.NoGpuAvailable:
print(
"there is no GPU available for locking, continuing with CPU",
file=sys.stderr,
)
comp_device = "/cpu:0"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
warnings.filterwarnings("ignore")
data_dir = "/data/anasynth_nonbp/lavault/data_for_pickle_augmented_general_noc/data_hd_curated"
tt = 128 * 128
fps = glob.glob(os.path.join(data_dir, "**/*sd*.wav"), recursive=True)
error = []
timbral_util.print_blue(
"Preparing the test. This may take a while. Please wait....")
grad = True
ll = len(fps)
for i, fname in enumerate(fps):
audio_samples, fs = timbral_util.file_read(
fname, 0, phase_correction=False)
audio_samples_t = tf.convert_to_tensor(
[audio_samples[:tt]], dtype=tf.float32)
audio_samples_t = tf.expand_dims(audio_samples_t, -1)
acm_score = np.array(timbral_sharpness(
fname, dev_output=False, take_first=tt))
tf_score_2 = tf_timbral_sharpness(
audio_samples_t, fs=fs, dev_output=False,
)
error.append(tf_score_2.numpy())
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("{} / {} :: {:.2f}%".format(i+1, ll, ((i+1)/ll) * 100))
sys.stdout.flush()
print("mean sharpness :: {} , std :: {}, min :: {}, max :: {}".format(
np.mean(error), np.std(error), min(error), max(error)))
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["CUDA_VISIBLE_DEVICES", "TF_CPP_MIN_LOG_LEVEL"] | python | 2 | 0 | |
tables/tests/common.py | """Utilities for PyTables' test suites."""
import os
import re
import sys
import locale
import platform
import tempfile
from pathlib import Path
from time import perf_counter as clock
from packaging.version import Version
import unittest
import numexpr as ne
import numpy as np
import tables as tb
from tables.req_versions import min_blosc_bitshuffle_version
hdf5_version = Version(tb.hdf5_version)
blosc_version = Version(tb.which_lib_version("blosc")[1])
verbose = os.environ.get("VERBOSE", "FALSE") == "TRUE"
"""Show detailed output of the testing process."""
heavy = False
"""Run all tests even when they take long to complete."""
show_memory = False
"""Show the progress of memory consumption."""
def parse_argv(argv):
global verbose, heavy
if 'verbose' in argv:
verbose = True
argv.remove('verbose')
if 'silent' in argv: # take care of old flag, just in case
verbose = False
argv.remove('silent')
if '--heavy' in argv:
heavy = True
argv.remove('--heavy')
return argv
zlib_avail = tb.which_lib_version("zlib") is not None
lzo_avail = tb.which_lib_version("lzo") is not None
bzip2_avail = tb.which_lib_version("bzip2") is not None
blosc_avail = tb.which_lib_version("blosc") is not None
def print_heavy(heavy):
if heavy:
print("""Performing the complete test suite!""")
else:
print("""\
Performing only a light (yet comprehensive) subset of the test suite.
If you want a more complete test, try passing the --heavy flag to this script
(or set the 'heavy' parameter in case you are using tables.test() call).
The whole suite will take more than 4 hours to complete on a relatively
modern CPU and around 512 MB of main memory.""")
print('-=' * 38)
def print_versions():
"""Print all the versions of software that PyTables relies on."""
print('-=' * 38)
print("PyTables version: %s" % tb.__version__)
print("HDF5 version: %s" % tb.which_lib_version("hdf5")[1])
print("NumPy version: %s" % np.__version__)
tinfo = tb.which_lib_version("zlib")
if ne.use_vml:
# Get only the main version number and strip out all the rest
vml_version = ne.get_vml_version()
vml_version = re.findall("[0-9.]+", vml_version)[0]
vml_avail = "using VML/MKL %s" % vml_version
else:
vml_avail = "not using Intel's VML/MKL"
print(f"Numexpr version: {ne.__version__} ({vml_avail})")
if tinfo is not None:
print(f"Zlib version: {tinfo[1]} (in Python interpreter)")
tinfo = tb.which_lib_version("lzo")
if tinfo is not None:
print("LZO version: {} ({})".format(tinfo[1], tinfo[2]))
tinfo = tb.which_lib_version("bzip2")
if tinfo is not None:
print("BZIP2 version: {} ({})".format(tinfo[1], tinfo[2]))
tinfo = tb.which_lib_version("blosc")
if tinfo is not None:
blosc_date = tinfo[2].split()[1]
print("Blosc version: {} ({})".format(tinfo[1], blosc_date))
blosc_cinfo = tb.blosc_get_complib_info()
blosc_cinfo = [
"{} ({})".format(k, v[1]) for k, v in sorted(blosc_cinfo.items())
]
print("Blosc compressors: %s" % ', '.join(blosc_cinfo))
blosc_finfo = ['shuffle']
if Version(tinfo[1]) >= tb.req_versions.min_blosc_bitshuffle_version:
blosc_finfo.append('bitshuffle')
print("Blosc filters: %s" % ', '.join(blosc_finfo))
try:
from Cython import __version__ as cython_version
print('Cython version: %s' % cython_version)
except Exception:
pass
print('Python version: %s' % sys.version)
print('Platform: %s' % platform.platform())
# if os.name == 'posix':
# (sysname, nodename, release, version, machine) = os.uname()
# print('Platform: %s-%s' % (sys.platform, machine))
print('Byte-ordering: %s' % sys.byteorder)
print('Detected cores: %s' % tb.utils.detect_number_of_cores())
print('Default encoding: %s' % sys.getdefaultencoding())
print('Default FS encoding: %s' % sys.getfilesystemencoding())
print('Default locale: (%s, %s)' % locale.getdefaultlocale())
print('-=' * 38)
# This should improve readability whan tests are run by CI tools
sys.stdout.flush()
def test_filename(filename):
from pkg_resources import resource_filename
return resource_filename('tables.tests', filename)
def verbosePrint(string, nonl=False):
"""Print out the `string` if verbose output is enabled."""
if not verbose:
return
if nonl:
print(string, end=' ')
else:
print(string)
def allequal(a, b, flavor="numpy"):
"""Checks if two numerical objects are equal."""
# print("a-->", repr(a))
# print("b-->", repr(b))
if not hasattr(b, "shape"):
# Scalar case
return a == b
if ((not hasattr(a, "shape") or a.shape == ()) and
(not hasattr(b, "shape") or b.shape == ())):
return a == b
if a.shape != b.shape:
if verbose:
print("Shape is not equal:", a.shape, "!=", b.shape)
return 0
# Way to check the type equality without byteorder considerations
if hasattr(b, "dtype") and a.dtype.str[1:] != b.dtype.str[1:]:
if verbose:
print("dtype is not equal:", a.dtype, "!=", b.dtype)
return 0
# Rank-0 case
if len(a.shape) == 0:
if a[()] == b[()]:
return 1
else:
if verbose:
print("Shape is not equal:", a.shape, "!=", b.shape)
return 0
# null arrays
if a.size == 0: # len(a) is not correct for generic shapes
if b.size == 0:
return 1
else:
if verbose:
print("length is not equal")
print("len(a.data) ==>", len(a.data))
print("len(b.data) ==>", len(b.data))
return 0
# Multidimensional case
result = (a == b)
result = np.all(result)
if not result and verbose:
print("Some of the elements in arrays are not equal")
return result
def areArraysEqual(arr1, arr2):
"""Are both `arr1` and `arr2` equal arrays?
Arguments can be regular NumPy arrays, chararray arrays or
structured arrays (including structured record arrays). They are
checked for type and value equality.
"""
t1 = type(arr1)
t2 = type(arr2)
if not ((hasattr(arr1, 'dtype') and arr1.dtype == arr2.dtype) or
issubclass(t1, t2) or issubclass(t2, t1)):
return False
return np.all(arr1 == arr2)
class PyTablesTestCase(unittest.TestCase):
def tearDown(self):
super().tearDown()
for key in self.__dict__:
if self.__dict__[key].__class__.__name__ != 'instancemethod':
self.__dict__[key] = None
def _getName(self):
"""Get the name of this test case."""
return self.id().split('.')[-2]
def _getMethodName(self):
"""Get the name of the method currently running in the test case."""
return self.id().split('.')[-1]
def _verboseHeader(self):
"""Print a nice header for the current test method if verbose."""
if verbose:
name = self._getName()
methodName = self._getMethodName()
title = f"Running {name}.{methodName}"
print('{}\n{}'.format(title, '-' * len(title)))
def _checkEqualityGroup(self, node1, node2, hardlink=False):
if verbose:
print("Group 1:", node1)
print("Group 2:", node2)
if hardlink:
self.assertTrue(
node1._v_pathname != node2._v_pathname,
"node1 and node2 have the same pathnames.")
else:
self.assertTrue(
node1._v_pathname == node2._v_pathname,
"node1 and node2 does not have the same pathnames.")
self.assertTrue(
node1._v_children == node2._v_children,
"node1 and node2 does not have the same children.")
def _checkEqualityLeaf(self, node1, node2, hardlink=False):
if verbose:
print("Leaf 1:", node1)
print("Leaf 2:", node2)
if hardlink:
self.assertTrue(
node1._v_pathname != node2._v_pathname,
"node1 and node2 have the same pathnames.")
else:
self.assertTrue(
node1._v_pathname == node2._v_pathname,
"node1 and node2 does not have the same pathnames.")
self.assertTrue(
areArraysEqual(node1[:], node2[:]),
"node1 and node2 does not have the same values.")
class TestFileMixin:
h5fname = None
open_kwargs = {}
def setUp(self):
super().setUp()
self.h5file = tb.open_file(
self.h5fname, title=self._getName(), **self.open_kwargs)
def tearDown(self):
"""Close ``h5file``."""
self.h5file.close()
super().tearDown()
class TempFileMixin:
open_mode = 'w'
open_kwargs = {}
def _getTempFileName(self):
return tempfile.mktemp(prefix=self._getName(), suffix='.h5')
def setUp(self):
"""Set ``h5file`` and ``h5fname`` instance attributes.
* ``h5fname``: the name of the temporary HDF5 file.
* ``h5file``: the writable, empty, temporary HDF5 file.
"""
super().setUp()
self.h5fname = self._getTempFileName()
self.h5file = tb.open_file(
self.h5fname, self.open_mode, title=self._getName(),
**self.open_kwargs)
def tearDown(self):
"""Close ``h5file`` and remove ``h5fname``."""
self.h5file.close()
self.h5file = None
Path(self.h5fname).unlink() # comment this for debug only
super().tearDown()
def _reopen(self, mode='r', **kwargs):
"""Reopen ``h5file`` in the specified ``mode``.
Returns a true or false value depending on whether the file was
reopenend or not. If not, nothing is changed.
"""
self.h5file.close()
self.h5file = tb.open_file(self.h5fname, mode, **kwargs)
return True
class ShowMemTime(PyTablesTestCase):
tref = clock()
"""Test for showing memory and time consumption."""
def test00(self):
"""Showing memory and time consumption."""
# Obtain memory info (only for Linux 2.6.x)
for line in Path("/proc/self/status").read_text().splitlines():
if line.startswith("VmSize:"):
vmsize = int(line.split()[1])
elif line.startswith("VmRSS:"):
vmrss = int(line.split()[1])
elif line.startswith("VmData:"):
vmdata = int(line.split()[1])
elif line.startswith("VmStk:"):
vmstk = int(line.split()[1])
elif line.startswith("VmExe:"):
vmexe = int(line.split()[1])
elif line.startswith("VmLib:"):
vmlib = int(line.split()[1])
print("\nWallClock time:", clock() - self.tref)
print("Memory usage: ******* %s *******" % self._getName())
print(f"VmSize: {vmsize:>7} kB\tVmRSS: {vmrss:>7} kB")
print(f"VmData: {vmdata:>7} kB\tVmStk: {vmstk:>7} kB")
print(f"VmExe: {vmexe:>7} kB\tVmLib: {vmlib:>7} kB")
| []
| []
| [
"VERBOSE"
]
| [] | ["VERBOSE"] | python | 1 | 0 | |
DeepAnalogs/AnEnDataset.py | # "`-''-/").___..--''"`-._
# (`6_ 6 ) `-. ( ).`-.__.`) WE ARE ...
# (_Y_.)' ._ ) `._ `. ``-..-' PENN STATE!
# _ ..`--'_..-_/ /--'_.' ,'
# (il),-'' (li),' ((!.-'
#
# Author: Weiming Hu <[email protected]>
# Geoinformatics and Earth Observation Laboratory (http://geolab.psu.edu)
# Department of Geography and Institute for CyberScience
# The Pennsylvania State University
#
# This file defines a customized dataset for perfect analogs.
#
import os
import sys
import glob
import torch
import pickle
import random
import warnings
import itertools
import numpy as np
import bottleneck as bn
from tqdm import tqdm
from torch.utils.data import Dataset
from DeepAnalogs.AnEnDict import AnEnDict
# Global variables read from the environment and used when printing
NROWS = os.getenv('DA_MAX_ROWS') if os.getenv('DA_MAX_ROWS') else 20
NCOLS = os.getenv('DA_MAX_COLS') if os.getenv('DA_MAX_COLS') else 15
NSTATIONS = os.getenv('DA_MAX_STATIONS') if os.getenv('DA_MAX_STATIONS') else 50
class AnEnDataset(Dataset):
"""
AnEnDataset is an abstract class for generating triplet samples for training the embedding network.
This class only prepares the triplet sample indices of forecasts to avoid copying forecast values which
might potentially be very memory and computationally expensive. The values should be copied when the forecasts
are actually indexed.
This class contains a pointer to the input forecasts and sorted members but this could be potentially dangerous
if they are changed after this class and the corresponding indices have been created. So make sure they ARE NOT
CHANGED after this class is created.
The BEST PRACTICE is to remove them (del *) from the outer environment once you have created an object of this
class to ensure that forecasts can only be access from objects of this class.
"""
def __init__(self, forecasts, sorted_members, num_analogs,
margin=np.nan, positive_predictand_index=None,
triplet_sample_prob=1, triplet_sample_method='fitness',
forecast_data_key='Data', to_tensor=True, disable_pbar=False, tqdm=tqdm,
fitness_num_negative=1, add_lead_time_index=False):
"""
Initialize an AnEnDataset
:param forecasts: An AnEnDict for forecasts
:param sorted_members: A dictionary for sorted members
:param num_analogs: The number of analogs to extract from search entries
:param margin: The distance margin while creating the triplet. If the positive distance plus the margin is
still smaller than the negative distance, this triplet is considered too easy and will be ignored.
:param positive_predictand_index: If the positivity of the predictand is of concern, set this to the
index that points to the predictand that should be positive in the key `aligned_obs` from the sorted members.
:param triplet_sample_prob: The sample probability for whether to include a given triplet sample
:param triplet_sample_method: The sample method
:param forecast_data_key: The forecast key to use for querying data values
:param to_tensor: Whether to convert results to tensors
:param disable_pbar: Whether to be disable the progress bar
:param tqdm: A tqdm progress bar
:param fitness_num_negative: If the sample method is `fitness`, this argument specifies the number of
negative candidates to select for each positive candidate. The selection will be sampling without replacement
to ensure that a particular negative candidate is only selected once for a particular positive candidate.
:param add_lead_time_index: Whether to add lead time index in the results of __get_item__
"""
# Sanity checks
assert isinstance(forecasts, AnEnDict), 'Forecasts must be an object of AnEnDict!'
assert isinstance(sorted_members, dict), 'Sorted members must be adictionary!'
expected_dict_keys = ['index', 'distance', 'anchor_times_index', 'search_times_index']
assert all([key in sorted_members.keys() for key in expected_dict_keys]), \
'{} are required in sorted members'.format(sorted_members)
assert num_analogs <= sorted_members['index'].shape[3], 'Not enough search entries to select analogs from!'
if positive_predictand_index is not None:
assert 0 <= positive_predictand_index < sorted_members['aligned_obs'].shape[0]
# Decide the triplet selection method
if triplet_sample_method == 'fitness':
select_func = self._select_fitness
elif triplet_sample_method == 'sequential':
select_func = self._select_sequential
else:
raise Exception('Unknown selection method {}!'.format(triplet_sample_method))
# These variables will be used inside the for loops
num_stations = sorted_members['index'].shape[0]
num_lead_times = sorted_members['index'].shape[2]
self.num_total_entries = sorted_members['index'].shape[3]
# Initialization
self.forecasts = forecasts
self.sorted_members = sorted_members
self.num_analogs = num_analogs
self.margin = margin
self.positive_predictand_index = positive_predictand_index
self.triplet_sample_prob = triplet_sample_prob
self.triplet_sample_method = triplet_sample_method
self.forecast_data_key = forecast_data_key
self.to_tensor = to_tensor
self.fitness_num_negative = fitness_num_negative
self.add_lead_time_index = add_lead_time_index
self.tqdm = tqdm
self.disable_pbar = disable_pbar
self.samples = []
self.anchor_sample_times = []
self.positive_sample_times = []
self.negative_sample_times = []
# Create index samples
#
# Each sample is a length-of-5 list containing the following information:
# - the station index
# - the lead time index
# - the anchor time index
# - the positive candidate time index
# - the negative candidate time index
#
print('Generating triplet samples ...')
with self.tqdm(total=num_stations * num_lead_times, disable=self.disable_pbar, leave=True) as pbar:
for station_index in range(num_stations):
for lead_time_index in range(num_lead_times):
for anchor_index, anchor_time_index in enumerate(sorted_members['anchor_times_index']):
# If the predictand should be positive, exclude NaN and non-positive cases
if positive_predictand_index is not None:
o = sorted_members['aligned_obs'][
positive_predictand_index, station_index, anchor_time_index, lead_time_index]
if np.isnan(o) or o <= 0:
continue
# Generate triplets for this [station, lead time, anchor] from all possible search entries
select_func(station_index, lead_time_index, anchor_index, anchor_time_index)
# Update the progress bar
pbar.update(1)
def save(self, dirname):
self.save_samples('{}/samples.pkl'.format(dirname))
self.save_forecasts('{}/forecasts.pkl'.format(dirname))
self.save_sorted_members('{}/sorted_members.pkl'.format(dirname))
def save_samples(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self.samples, f)
def save_forecasts(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self.forecasts, f)
def save_sorted_members(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self.sorted_members, f)
def _select_sequential(self, station_index, lead_time_index, anchor_index, anchor_time_index):
"""
Sequential selection is defined as follows:
1. Each search entry within the number of analogs from the start will be positive and the entry at the number
of analogs is the negative.
2. The entry at the number of the analogs is the positive and all entries left are considered negative compared
to the positive entry.
"""
for analog_index in range(self.num_analogs):
positive_candidate_index = analog_index
negative_candidate_index = self.num_analogs
self._check_and_add(station_index, anchor_index, lead_time_index,
positive_candidate_index, negative_candidate_index, anchor_time_index)
for index_offset in range(1, self.num_total_entries - self.num_analogs):
positive_candidate_index = self.num_analogs
negative_candidate_index = positive_candidate_index + index_offset
self._check_and_add(station_index, anchor_index, lead_time_index,
positive_candidate_index, negative_candidate_index, anchor_time_index)
def _select_fitness(self, station_index, lead_time_index, anchor_index, anchor_time_index):
"""
Fitness selection is defined as follows:
1. Search entries within the number of analogs are positive and every entries left are considered negative.
2. For each positive entry, several negative entries can be selected to form a triplet.
3. Negative entries are selected with a probability that is proportional to its normalized fitness.
"""
# Get the distance for all negative entries
distance = self.sorted_members['distance'][station_index, anchor_index, lead_time_index, self.num_analogs:]
if all(np.isnan(distance)):
warnings.warn('All NANs found sorted_members["distance"][{}, {}, {}, {}:]'.format(station_index, anchor_index, lead_time_index, self.num_analogs))
return
# Inverse distances
distance_inverse = bn.nansum(distance) - distance
# Replace NAN with 0
distance_inverse[np.isnan(distance_inverse)] = 0
for analog_index in range(self.num_analogs):
positive_candidate_index = analog_index
# Normalize the inverse distance to initialize fitness
fitness = distance_inverse / bn.nansum(distance_inverse)
for repetition in range(self.fitness_num_negative):
# Calculate cumulative sum
fitness_cumsum = np.cumsum(fitness)
# Decide on the negative candidate
negative_candidate_index = np.digitize(random.random(), fitness_cumsum)
# Remove this negative candidate from future selection
fitness[negative_candidate_index] = 0
# Rescale the fitness to [0, 1]
fitness = fitness / bn.nansum(fitness)
self._check_and_add(station_index, anchor_index, lead_time_index, positive_candidate_index,
negative_candidate_index + self.num_analogs, anchor_time_index)
def _check_and_add(self, station_index, anchor_index, lead_time_index,
positive_candidate_index, negative_candidate_index, anchor_time_index):
"""
Checks validity of the specified triplet and then add the valid triplet to the sample list.
:param station_index: The station index
:param anchor_index: The anchor index to be used with sorted members
:param lead_time_index: The lead time index
:param positive_candidate_index: The positive search entry index
:param negative_candidate_index: The negative search entry index
:param anchor_time_index: The anchor index to be used with forecasts
"""
# Check for the probability of random sampling
if random.random() > self.triplet_sample_prob:
return
# This is the distance between the positive candidate and the anchor
d_p = self.sorted_members['distance'][station_index, anchor_index, lead_time_index, positive_candidate_index]
# This is the distance between the negative candidate and the anchor
d_n = self.sorted_members['distance'][station_index, anchor_index, lead_time_index, negative_candidate_index]
# Distances should both be valid and they should be different. Otherwise, skip this pair.
if np.isnan(d_p) or np.isnan(d_n) or d_p == d_n:
return
# The comparison must not be negative
if d_p > d_n:
raise Exception('I found a distance pair that is not sorted! This is fatal!')
if d_p + self.margin < d_n:
# This triplet is considered too easy, skip it
return
# This is the index of the positive candidate
i_p = self.sorted_members['index'][station_index, anchor_index, lead_time_index, positive_candidate_index]
# This is the index of the negative candidate
i_n = self.sorted_members['index'][station_index, anchor_index, lead_time_index, negative_candidate_index]
# Construct a triplet
triplet = [station_index, lead_time_index, anchor_time_index, i_p, i_n]
self.samples.append(triplet)
current_lead_time = self.forecasts['FLTs'][lead_time_index]
self.positive_sample_times.append(self.forecasts['Times'][i_p] + current_lead_time)
self.negative_sample_times.append(self.forecasts['Times'][i_n] + current_lead_time)
self.anchor_sample_times.append(self.forecasts['Times'][anchor_time_index] + current_lead_time)
def _get_summary(self):
"""
Generates a list of messages as a summary for the dataset.
:return: A list of messages
"""
msg = [
'*************** A Customized Dataset for AnEn ***************',
'Class name: {}'.format(type(self).__name__),
'Number of analogs: {}'.format(self.num_analogs),
'Triplet margin: {}'.format(self.margin),
'Predictand being positive: {}'.format('No' if self.positive_predictand_index is None
else 'Yes (index: {})'.format(self.positive_predictand_index)),
'Triplet sample probability: {}'.format(self.triplet_sample_prob),
'Triplet sample method: {}'.format(self.triplet_sample_method),
]
if self.triplet_sample_method == 'fitness':
msg.append('Number of negative candidates (fitness selection): {}'.format(self.fitness_num_negative))
msg.extend([
'Forecast data key: {}'.format(self.forecast_data_key),
'Convert to tensor: {}'.format(self.to_tensor),
'Number of total triplets: {}'.format(len(self)),
'Add lead time index for one hot coding: {}'.format(self.add_lead_time_index),
'********************** End of messages **********************',
])
return msg
def __len__(self):
return len(self.samples)
def __str__(self):
return '\n'.join(self._get_summary())
def __getitem__(self, index):
"""
Returns the triplet forecasts [anchor, positive, negative]. This operation actually copies the data values.
:param index: A sample index
:return: The returned sample is a list of 3 arrays, an anchor, a positive, and a negative forecast. All three
forecasts have the exact same dimensions. The dimensions are [parameters, 1 station, 1 lead time].
"""
assert isinstance(index, int), "Only support indexing using a single integer!"
# Extract the triplet sample
triplet = self.samples[index]
# Get forecast values at a single station and a single lead time
anchor = self.forecasts[self.forecast_data_key][:, triplet[0], triplet[2], triplet[1]]
positive = self.forecasts[self.forecast_data_key][:, triplet[0], triplet[3], triplet[1]]
negative = self.forecasts[self.forecast_data_key][:, triplet[0], triplet[4], triplet[1]]
# Fix dimensions to be [parameters, 1 station, 1 lead time]
anchor = np.expand_dims(anchor, (1, 2))
positive = np.expand_dims(positive, (1, 2))
negative = np.expand_dims(negative, (1, 2))
if self.to_tensor:
anchor = torch.tensor(anchor, dtype=torch.float)
positive = torch.tensor(positive, dtype=torch.float)
negative = torch.tensor(negative, dtype=torch.float)
ret = [anchor, positive, negative]
if self.add_lead_time_index:
lead_time_index = triplet[1]
if self.to_tensor:
lead_time_index = torch.tensor(lead_time_index, dtype=torch.long)
ret.append(lead_time_index)
return ret
class AnEnDatasetWithTimeWindow(AnEnDataset):
"""
AnEnDatasetWithTimeWindow is inherited from AnEnDataset. Instead of generating forecast triplets at a single
lead time, this dataset prepares the triplets from nearby forecast lead times to form a small time window. This
behavior mirrors how Analog Ensembles are typically generated with a small lead time window.
"""
def __init__(self, lead_time_radius, **kw):
"""
Initialize an AnEnDatasetWithTimeWindow class
:param lead_time_radius: The radius of lead times to include. The lead time window at lead time t will be
[t - lead_time_radius, t + lead_time_radius].
:param kw: Additional arguments to `AnEnDataset`
"""
super().__init__(**kw)
self.lead_time_radius = lead_time_radius
num_lead_times = self.forecasts[self.forecast_data_key].shape[3]
# Calculate a mask for which samples to keep or to remove
keep_samples = [True if self.samples[sample_index][1] - lead_time_radius >= 0 and
self.samples[sample_index][1] + lead_time_radius < num_lead_times
else False for sample_index in range(len(self))]
# Copy samples and times
self.samples = list(itertools.compress(self.samples, keep_samples))
self.anchor_sample_times = list(itertools.compress(self.anchor_sample_times, keep_samples))
self.positive_sample_times = list(itertools.compress(self.positive_sample_times, keep_samples))
self.negative_sample_times = list(itertools.compress(self.negative_sample_times, keep_samples))
def __str__(self):
msg = super()._get_summary()
msg.insert(-1, 'Lead time radius: {}'.format(self.lead_time_radius))
return '\n'.join(msg)
def __getitem__(self, index):
assert isinstance(index, int), "Only support indexing using a single integer!"
# Extract the triplet sample
triplet = self.samples[index]
# Determine the start and the end indices for the lead time window
#
# No need to check for lead time overflow because lead times at the boundary has already been removed
flt_left = triplet[1] - self.lead_time_radius
flt_right = triplet[1] + self.lead_time_radius + 1
# Get forecast values at a single station and from a lead time window
anchor = self.forecasts[self.forecast_data_key][:, triplet[0], triplet[2], flt_left:flt_right]
positive = self.forecasts[self.forecast_data_key][:, triplet[0], triplet[3], flt_left:flt_right]
negative = self.forecasts[self.forecast_data_key][:, triplet[0], triplet[4], flt_left:flt_right]
# Fix dimensions
anchor = np.expand_dims(anchor, 1)
positive = np.expand_dims(positive, 1)
negative = np.expand_dims(negative, 1)
if self.to_tensor:
anchor = torch.tensor(anchor, dtype=torch.float)
positive = torch.tensor(positive, dtype=torch.float)
negative = torch.tensor(negative, dtype=torch.float)
ret = [anchor, positive, negative]
if self.add_lead_time_index:
lead_time_index = triplet[1]
if self.to_tensor:
lead_time_index = torch.tensor(lead_time_index, dtype=torch.long)
ret.append(lead_time_index)
return ret
class AnEnDatasetOneToMany(AnEnDatasetWithTimeWindow):
"""
AnEnDatasetOneToMany is inherited from AnEnDatasetWithTimeWindow. It is mostly the same as AnEnDatasetWithTimeWindow
except that AnEnDatasetOneToMany only accepts one stations in the observation dataset and multiple stations in the
forecast dataset. Users need to specify which forecast stations is the matching station to the observation
stations. However, when creating triplets, forecasts from all stations will be used to be compared to the forecasts
at the matching station.
"""
def __init__(self, matching_forecast_station, **kw):
"""
Initialize an AnEnDatasetOneToMany class
:param matching_forecast_station: The index of the forecast station that matches the observation station.
:param kw: Additional arguments to `AnEnDatasetWithTimeWindow`
"""
# Sanity check
err_msg = 'Invalid matching station index (). The total number of forecast stations is {}'.format(
matching_forecast_station, kw['forecasts'][kw['forecast_data_key']].shape[1])
assert kw['forecasts'][kw['forecast_data_key']].shape[1] > matching_forecast_station, err_msg
assert kw['sorted_members']['index'].shape[0] == 1, 'This class only supports having one observation station!'
super().__init__(**kw)
self.matching_forecast_station = matching_forecast_station
# This is where AnEnDatasetOneToMany starts to differ from the base classes.
# Triplets will be duplicated with changing the station indices.
# Because not only the matching station is going to be similar, all stations from forecasts
# should be considered similar to the matching station.
#
assert len(np.unique([sample[0]] for sample in self.samples)) == 1, 'Fatal! There should be only 1 station!'
# Create new samples with changing the station index
print('Enumerating station indices with samples ...')
new_samples = []
num_stations = self.forecasts[self.forecast_data_key].shape[1]
for sample in self.tqdm(self.samples, disable=self.disable_pbar, leave=True):
for station_index in range(num_stations):
sample[0] = station_index
new_samples.append(sample.copy())
del self.samples
self.samples = new_samples
def __str__(self):
msg = [
super().__str__(),
'Matching forecast station index: {}'.format(self.matching_forecast_station),
'Number of forecast stations: {}'.format(self.forecasts[self.forecast_data_key].shape[1]),
]
return '\n'.join(msg)
def __getitem__(self, index):
assert isinstance(index, int), "Only support indexing using a single integer!"
# Extract the triplet sample
triplet = self.samples[index]
# Determine the start and the end indices for the lead time window
flt_left = triplet[1] - self.lead_time_radius
flt_right = triplet[1] + self.lead_time_radius + 1
# Get forecast values at a single station and from a lead time window
# Anchor is set to be the forecast at the current location
anchor = self.forecasts[self.forecast_data_key][:, self.matching_forecast_station, triplet[2], flt_left:flt_right]
# Positive is set to be the forecast at the search location
positive = self.forecasts[self.forecast_data_key][:, triplet[0], triplet[3], flt_left:flt_right]
# Negative is set to be the forecast at the search location
negative = self.forecasts[self.forecast_data_key][:, triplet[0], triplet[4], flt_left:flt_right]
# Fix dimensions
anchor = np.expand_dims(anchor, 1)
positive = np.expand_dims(positive, 1)
negative = np.expand_dims(negative, 1)
if self.to_tensor:
anchor = torch.tensor(anchor, dtype=torch.float)
positive = torch.tensor(positive, dtype=torch.float)
negative = torch.tensor(negative, dtype=torch.float)
ret = [anchor, positive, negative]
if self.add_lead_time_index:
lead_time_index = triplet[1]
if self.to_tensor:
lead_time_index = torch.tensor(lead_time_index, dtype=torch.long)
ret.append(lead_time_index)
return ret
class AnEnDatasetSpatial(AnEnDataset):
def __init__(self, forecasts, forecast_grid_file,
sorted_members, obs_x, obs_y,
num_analogs, lead_time_radius,
metric_width, metric_height,
margin=np.nan, positive_predictand_index=None,
triplet_sample_prob=1, triplet_sample_method='fitness',
forecast_data_key='Data', to_tensor=True, disable_pbar=False, tqdm=tqdm,
fitness_num_negative=1):
# Sanity checks
assert isinstance(forecasts, AnEnDict), 'Forecasts much be an object of AnEnDict'
assert isinstance(sorted_members, dict), 'Sorted members must be a dictionary!'
expected_dict_keys = ['index', 'distance', 'anchor_times_index', 'search_times_index']
assert all([key in sorted_members.keys() for key in expected_dict_keys]), \
'{} are required in sorted members'.format(sorted_members)
assert num_analogs <= sorted_members['index'].shape[3], 'Not enough search entries to select analogs from!'
if positive_predictand_index is not None:
assert 0 <= positive_predictand_index < sorted_members['aligned_obs'].shape[0]
# Decide the triplet selection method
if triplet_sample_method == 'fitness':
select_func = self._select_fitness
elif triplet_sample_method == 'sequential':
select_func = self._select_sequential
else:
raise Exception('Unknown selection method {}!'.format(triplet_sample_method))
# Initialization
self.forecasts = forecasts
self.sorted_members = sorted_members
self.num_analogs = num_analogs
self.lead_time_radius = lead_time_radius
self.margin = margin
self.positive_predictand_index = positive_predictand_index
self.triplet_sample_prob = triplet_sample_prob
self.triplet_sample_method = triplet_sample_method
self.forecast_data_key = forecast_data_key
self.to_tensor = to_tensor
self.fitness_num_negative = fitness_num_negative
self.tqdm = tqdm
self.disable_pbar = disable_pbar
# Preset
self.padding = True
self.spatial_metric_width = metric_width
self.spatial_metric_height = metric_height
# These members are not used in the current class
self.add_lead_time_index = False
self.samples = []
self.anchor_sample_times = []
self.positive_sample_times = []
self.negative_sample_times = []
# Parse the forecast grid file
AnEnGrid = AnEnDatasetSpatial.get_grid_class()
self.forecast_grid = AnEnGrid(forecast_grid_file)
# Determine the matching forecast station to each observation station
# `station_match_lookup` is dictionary with observation station index as the key
# and the matching forecast station index as the value.
#
self.station_match_lookup = self._match_stations(obs_x, obs_y)
# Determine the boundary of lead times during training to avoid stacking time series of different lengths
num_lead_times = self.forecasts[self.forecast_data_key].shape[3]
assert num_lead_times >= 2 * self.lead_time_radius + 1, "Not enought lead times with a radius of {}".format(self.lead_time_radius)
lead_time_start = self.lead_time_radius
lead_time_end = num_lead_times - self.lead_time_radius
print('Sampling from {} lead time indices [{}:{})'.format(lead_time_end-lead_time_start, lead_time_start, lead_time_end))
# Create index samples
#
# Each sample is a length-of-5 list containing the following information:
# - the station index
# - the lead time index
# - the anchor time index
# - the positive candidate time index
# - the negative candidate time index
#
print('Generating triplet samples ...')
# These variables will be used inside the for loops
num_stations = len(self.station_match_lookup)
self.num_total_entries = sorted_members['index'].shape[3]
with self.tqdm(total=num_stations * (lead_time_end - lead_time_start), disable=self.disable_pbar, leave=True) as pbar:
for obs_station_index in range(num_stations):
for lead_time_index in np.arange(lead_time_start, lead_time_end):
for anchor_index, anchor_time_index in enumerate(sorted_members['anchor_times_index']):
# If the predictand should be positive, exclude NaN and non-positive cases
if positive_predictand_index is not None:
o = sorted_members['aligned_obs'][
positive_predictand_index, obs_station_index, anchor_time_index, lead_time_index]
if np.isnan(o) or o <= 0:
continue
# Generate triplets for this [station, lead time, anchor] from all possible search entries
select_func(obs_station_index, lead_time_index, anchor_index, anchor_time_index)
# Update the progress bar
pbar.update(1)
def _check_and_add(self, *args):
# Call the base class routine
super()._check_and_add(*args)
# Add forecast stations
#
# The last item in the sample has just been added and that is the one I'm going to modify.
# In each item, the observation station index is on the first position, and I'm appending the matching
# forecast station index.
#
self.samples[-1].append(self.station_match_lookup[args[0]])
# The content of a triplet element is:
# [0]: obs station index
# [1]: lead time index
# [2]: anchor time index
# [3]: positive time index
# [4]: negative time index
# [5]: fcst station index
@staticmethod
def get_grid_class():
try:
from AnEnGrid import AnEnGrid
except:
# Guess the location
guess = glob.glob(os.path.expanduser('~/github/AnalogsEnsemble/build/CGrid/AnEnGrid*'))
if len(guess) == 1:
# File found. Include the path and try again
sys.path.append(os.path.dirname(guess[0]))
from AnEnGrid import AnEnGrid
else:
msg = '[AnEnDatasetSpatial] Cannot find module AnEnGrid. Please specify the directory to the shared ' \
'library using environment variable , e.g. on Linux, ' \
'export PYTHONPATH=/Users/wuh20/github/AnalogsEnsemble/build/CGrid'
raise ImportError(msg)
return AnEnGrid
def _match_stations(self, obs_x, obs_y):
# Get forecast coordinates
fcst_x, fcst_y = self.forecasts['Xs'], self.forecasts['Ys']
# Initialization
station_dict = {}
for obs_i in range(len(obs_x)):
o_x, o_y = obs_x[obs_i], obs_y[obs_i]
distances = [(o_x-fcst_x[fcst_i])**2+(o_y-fcst_y[fcst_i])**2 for fcst_i in range(len(fcst_x))]
station_dict[obs_i] = np.nanargmin(distances)
return station_dict
def __str__(self):
msg = super()._get_summary()
del msg[-2:]
msg.append('Lead time radius: {}'.format(self.lead_time_radius))
msg.append('Forecast grid: {}'.format(self.forecast_grid.summary()))
if self.forecast_grid.nrows() <= NROWS and self.forecast_grid.nrows() <= NCOLS:
msg.append(self.forecast_grid.detail())
if len(self.station_match_lookup) <= NSTATIONS:
msg.append('Matching stations:')
msg.extend(['obs [{}] --> fcst [{}]'.format(k, v) for k, v in self.station_match_lookup.items()])
msg.append('********************** End of messages **********************')
return '\n'.join(msg)
def __getitem__(self, index):
"""
Returns the triplet forecasts [anchor, positive, negative].
Elements have the same dimension of [Parameters, Height, Width, Lead times].
Heigth is counted top down, and width is counted left right.
"""
assert isinstance(index, int), 'Only support indexing using a single integer!'
# Extract the triplet sample
triplet = self.samples[index]
# Determine the start and the end indices for the lead time window
flt_left = triplet[1] - self.lead_time_radius
flt_right = triplet[1] + self.lead_time_radius + 1
# Get spatial mask
fcst_station_mask = self.forecast_grid.getRectangle(
triplet[5], self.spatial_metric_width, self.spatial_metric_height, self.padding)
fcst_station_mask_flat = [int(e) for sub_list in fcst_station_mask for e in sub_list]
# Get forecast values at a single station and from a lead time window
anchor = self.forecasts[self.forecast_data_key][:, fcst_station_mask_flat, triplet[2], flt_left:flt_right]
positive = self.forecasts[self.forecast_data_key][:, fcst_station_mask_flat, triplet[3], flt_left:flt_right]
negative = self.forecasts[self.forecast_data_key][:, fcst_station_mask_flat, triplet[4], flt_left:flt_right]
# Reconstruct the structure [parameters, height, width, lead times]
anchor = anchor.reshape(anchor.shape[0], self.spatial_metric_height, self.spatial_metric_width, anchor.shape[2])
positive = positive.reshape(positive.shape[0], self.spatial_metric_height, self.spatial_metric_width, positive.shape[2])
negative = negative.reshape(negative.shape[0], self.spatial_metric_height, self.spatial_metric_width, negative.shape[2])
if self.to_tensor:
anchor = torch.tensor(anchor, dtype=torch.float)
positive = torch.tensor(positive, dtype=torch.float)
negative = torch.tensor(negative, dtype=torch.float)
ret = [anchor, positive, negative]
if self.add_lead_time_index:
lead_time_index = triplet[1]
if self.to_tensor:
lead_time_index = torch.tensor(lead_time_index, dtype=torch.long)
ret.append(lead_time_index)
return ret
| []
| []
| [
"DA_MAX_STATIONS",
"DA_MAX_ROWS",
"DA_MAX_COLS"
]
| [] | ["DA_MAX_STATIONS", "DA_MAX_ROWS", "DA_MAX_COLS"] | python | 3 | 0 | |
pkg/sentry/certs/store.go | package certs
import (
"context"
"io/ioutil"
"os"
"github.com/dapr/dapr/pkg/credentials"
"github.com/dapr/dapr/pkg/sentry/config"
"github.com/dapr/dapr/pkg/sentry/kubernetes"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
defaultSecretNamespace = "default"
)
// StoreCredentials saves the trust bundle in a Kubernetes secret store or locally on disk, depending on the hosting platform
func StoreCredentials(conf config.SentryConfig, rootCertPem, issuerCertPem, issuerKeyPem []byte) error {
if config.IsKubernetesHosted() {
return storeKubernetes(rootCertPem, issuerCertPem, issuerKeyPem)
}
return storeSelfhosted(rootCertPem, issuerCertPem, issuerKeyPem, conf.RootCertPath, conf.IssuerCertPath, conf.IssuerKeyPath)
}
func storeKubernetes(rootCertPem, issuerCertPem, issuerCertKey []byte) error {
kubeClient, err := kubernetes.GetClient()
if err != nil {
return err
}
namespace := getNamespace()
secret := &v1.Secret{
Data: map[string][]byte{
credentials.RootCertFilename: rootCertPem,
credentials.IssuerCertFilename: issuerCertPem,
credentials.IssuerKeyFilename: issuerCertKey,
},
ObjectMeta: metav1.ObjectMeta{
Name: KubeScrtName,
Namespace: namespace,
},
Type: v1.SecretTypeOpaque,
}
// We update and not create because sentry expects a secret to already exist
_, err = kubeClient.CoreV1().Secrets(namespace).Update(context.TODO(), secret, metav1.UpdateOptions{})
if err != nil {
return errors.Wrap(err, "failed saving secret to kubernetes")
}
return nil
}
func getNamespace() string {
namespace := os.Getenv("NAMESPACE")
if namespace == "" {
namespace = defaultSecretNamespace
}
return namespace
}
// CredentialsExist checks root and issuer credentials exist on a hosting platform
func CredentialsExist(conf config.SentryConfig) (bool, error) {
if config.IsKubernetesHosted() {
namespace := getNamespace()
kubeClient, err := kubernetes.GetClient()
if err != nil {
return false, err
}
s, err := kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), KubeScrtName, metav1.GetOptions{})
if err != nil {
return false, err
}
return len(s.Data) > 0, nil
}
return false, nil
}
/* #nosec */
func storeSelfhosted(rootCertPem, issuerCertPem, issuerKeyPem []byte, rootCertPath, issuerCertPath, issuerKeyPath string) error {
err := ioutil.WriteFile(rootCertPath, rootCertPem, 0644)
if err != nil {
return errors.Wrapf(err, "failed saving file to %s", rootCertPath)
}
err = ioutil.WriteFile(issuerCertPath, issuerCertPem, 0644)
if err != nil {
return errors.Wrapf(err, "failed saving file to %s", issuerCertPath)
}
err = ioutil.WriteFile(issuerKeyPath, issuerKeyPem, 0644)
if err != nil {
return errors.Wrapf(err, "failed saving file to %s", issuerKeyPath)
}
return nil
}
| [
"\"NAMESPACE\""
]
| []
| [
"NAMESPACE"
]
| [] | ["NAMESPACE"] | go | 1 | 0 | |
examples/pwr_run/checkpointing/timed/max_par/job2.py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 128
args_lr = 0.001
args_model = 'vgg16'
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_param/' + job_name + '*'
total_epochs = 5
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_param/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
def on_epoch_end(self, epoch, logs=None):
open('epoch/' + job_name + '.txt', 'a').close()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
if not args.resume:
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
param_dict = {}
modify = False
with open('param_lock.json', 'r') as fp:
param_dict = json.load(fp)
if job_name not in param_dict:
param_dict[job_name] = trainable_count
modify = True
elif param_dict[job_name] != trainable_count:
param_dict[job_name] = trainable_count
modify = True
if modify:
json_file = json.dumps(param_dict)
with open('param_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('param_lock.json', 'param.json')
ckpt_qual_dict = {}
while True:
if os.path.exists('ckpt_qual.json'):
os.rename('ckpt_qual.json', 'ckpt_qual_lock.json')
break
else:
time.sleep(1)
with open('ckpt_qual_lock.json', 'r') as fp:
ckpt_qual_dict = json.load(fp)
ckpt_qual_dict[job_name] = 1
json_file2 = json.dumps(ckpt_qual_dict)
with open('ckpt_qual_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('ckpt_qual_lock.json', 'ckpt_qual.json')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
finish_dict = {}
while True:
if os.path.exists('finish.json'):
os.rename('finish.json', 'finish_lock.json')
break
else:
time.sleep(1)
with open('finish_lock.json', 'r') as fp:
finish_dict = json.load(fp)
finish_dict[job_name] = 1
json_file2 = json.dumps(finish_dict)
with open('finish_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('finish_lock.json', 'finish.json')
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
google/cloud/forseti/common/util/metadata_server.py | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metadata server utilities.
The metadata server is only accessible on GCE.
"""
import http.client
import os
import socket
from future import standard_library
from google.cloud.forseti.common.util import errors
from google.cloud.forseti.common.util import logger
standard_library.install_aliases()
# This is used to ping the metadata server, it avoids the cost of a DNS
# lookup.
_METADATA_IP = '{}'.format(
os.getenv('GCE_METADATA_IP', '169.254.169.254'))
METADATA_SERVER_HOSTNAME = 'metadata.google.internal'
METADATA_SERVER_CONN_TIMEOUT = 2
_METADATA_FLAVOR_HEADER = 'metadata-flavor'
_METADATA_FLAVOR_VALUE = 'Google'
REQUIRED_METADATA_HEADER = {_METADATA_FLAVOR_HEADER: _METADATA_FLAVOR_VALUE}
HTTP_SUCCESS = http.HTTPStatus.OK
HTTP_GET = 'GET'
LOGGER = logger.get_logger(__name__)
def _obtain_http_client(hostname=METADATA_SERVER_HOSTNAME):
"""Get an HTTP client to the GCP metadata server.
Args:
hostname (str): A qualified hostname.
Returns:
HttpClient: A simple HTTP client to the GCP metadata server.
"""
return http.client.HTTPConnection(hostname,
timeout=METADATA_SERVER_CONN_TIMEOUT)
def _issue_http_request(method, path, headers):
"""Perform a request on a specified httplib connection object.
Args:
method (str): The http request method.
path (str): The path on the server.
headers (dict): A key-value pairs of headers.
Returns:
httplib.HTTPResponse: The HTTP response object.
Raises:
MetadataServerHttpError: When we can't reach the requested host.
"""
http_client = _obtain_http_client()
try:
http_client.request(method, path, headers=headers)
return http_client.getresponse()
except (socket.error, http.client.HTTPException):
LOGGER.exception('Error occurred while issuing http request.')
raise errors.MetadataServerHttpError
def can_reach_metadata_server():
"""Determine if we can reach the metadata server.
Returns:
bool: True if metadata server can be reached, False otherwise.
"""
try:
http_client = _obtain_http_client(hostname=_METADATA_IP)
http_client.request('GET', '/', headers=REQUIRED_METADATA_HEADER)
response = http_client.getresponse()
metadata_flavor = response.getheader(_METADATA_FLAVOR_HEADER, '')
return (response.status == http.HTTPStatus.OK and
metadata_flavor == _METADATA_FLAVOR_VALUE)
except (socket.error, http.client.HTTPException) as e:
LOGGER.warning('Compute Engine Metadata server unreachable: %s', e)
return False
def get_value_for_attribute(attribute):
"""For a given key return the value.
Args:
attribute (str): Some metadata key.
Returns:
str: The value of the requested key, if key isn't present then None.
"""
path = '/computeMetadata/v1/instance/attributes/%s' % attribute
try:
http_response = _issue_http_request(
HTTP_GET, path, REQUIRED_METADATA_HEADER)
return http_response.read()
except (TypeError, ValueError, errors.MetadataServerHttpError):
LOGGER.exception('Unable to read value for attribute key %s '
'from metadata server.', attribute)
return None
def get_project_id():
"""Get the project id from the metadata server.
Returns:
str: The of the project id, on error, returns None.
"""
path = '/computeMetadata/v1/project/project-id'
try:
http_response = _issue_http_request(
HTTP_GET, path, REQUIRED_METADATA_HEADER)
return http_response.read()
except errors.MetadataServerHttpError:
LOGGER.exception('Unable to read project id from metadata server.')
return None
| []
| []
| [
"GCE_METADATA_IP"
]
| [] | ["GCE_METADATA_IP"] | python | 1 | 0 | |
cmd/kubelet/kubelet.go | /*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// The kubelet binary is responsible for maintaining a set of containers on a particular host VM.
// It syncs data from both configuration file(s) as well as from a quorum of etcd servers.
// It then queries Docker to see what is currently running. It synchronizes the configuration data,
// with the running set of containers by starting or stopping Docker containers.
package main
import (
"flag"
"math/rand"
"os"
"os/exec"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/coreos/go-etcd/etcd"
"github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
)
var (
config = flag.String("config", "", "Path to the config file or directory of files")
etcdServers = flag.String("etcd_servers", "", "Url of etcd servers in the cluster")
syncFrequency = flag.Duration("sync_frequency", 10*time.Second, "Max period between synchronizing running containers and config")
fileCheckFrequency = flag.Duration("file_check_frequency", 20*time.Second, "Duration between checking config files for new data")
httpCheckFrequency = flag.Duration("http_check_frequency", 20*time.Second, "Duration between checking http for new data")
manifestURL = flag.String("manifest_url", "", "URL for accessing the container manifest")
address = flag.String("address", "127.0.0.1", "The address for the info server to serve on")
port = flag.Uint("port", 10250, "The port for the info server to serve on")
hostnameOverride = flag.String("hostname_override", "", "If non-empty, will use this string as identification instead of the actual hostname.")
dockerEndpoint = flag.String("docker_endpoint", "", "If non-empty, use this for the docker endpoint to communicate with")
)
func main() {
flag.Parse()
util.InitLogs()
defer util.FlushLogs()
rand.Seed(time.Now().UTC().UnixNano())
// Set up logger for etcd client
etcd.SetLogger(util.NewLogger("etcd "))
var endpoint string
if len(*dockerEndpoint) > 0 {
endpoint = *dockerEndpoint
} else if len(os.Getenv("DOCKER_HOST")) > 0 {
endpoint = os.Getenv("DOCKER_HOST")
} else {
endpoint = "unix:///var/run/docker.sock"
}
glog.Infof("Connecting to docker on %s", endpoint)
dockerClient, err := docker.NewClient(endpoint)
if err != nil {
glog.Fatal("Couldn't connnect to docker.")
}
hostname := []byte(*hostnameOverride)
if string(hostname) == "" {
// Note: We use exec here instead of os.Hostname() because we
// want the FQDN, and this is the easiest way to get it.
hostname, err = exec.Command("hostname", "-f").Output()
if err != nil {
glog.Fatalf("Couldn't determine hostname: %v", err)
}
}
k := kubelet.Kubelet{
Hostname: string(hostname),
DockerClient: dockerClient,
FileCheckFrequency: *fileCheckFrequency,
SyncFrequency: *syncFrequency,
HTTPCheckFrequency: *httpCheckFrequency,
}
k.RunKubelet(*dockerEndpoint, *config, *manifestURL, *etcdServers, *address, *port)
}
| [
"\"DOCKER_HOST\"",
"\"DOCKER_HOST\""
]
| []
| [
"DOCKER_HOST"
]
| [] | ["DOCKER_HOST"] | go | 1 | 0 | |
fetch-all-questions.go | // StackOverflow analysis using its API in Go.
//
// This program just fetches data from the StackOverflow API. The idea is that
// you run it once to fetch all the data you need, and can then analyze this
// data locally by repeatedly invoking analyze-question-sentiment with different
// parameters.
//
// To get the increased API quota, get a key from stackapps.com and run with the
// env var STACK_KEY=<key>
//
// Eli Bendersky [https://eli.thegreenplace.net]
// This code is in the public domain.
package main
import (
"encoding/json"
"flag"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
)
// Base query built with the explorer on
// https://api.stackexchange.com/docs/questions
//
// "https://api.stackexchange.com/2.2/questions?page=2&pagesize=100&fromdate=1610409600&todate=1613088000&order=desc&sort=activity&tagged=go&site=stackoverflow"
// Struct generated with https://mholt.github.io/json-to-go/
type Reply struct {
Items []struct {
Tags []string `json:"tags"`
Owner struct {
Reputation int `json:"reputation"`
UserID int `json:"user_id"`
UserType string `json:"user_type"`
ProfileImage string `json:"profile_image"`
DisplayName string `json:"display_name"`
Link string `json:"link"`
} `json:"owner"`
IsAnswered bool `json:"is_answered"`
ClosedDate int64 `json:"closed_date"`
ViewCount int `json:"view_count"`
AcceptedAnswerID int `json:"accepted_answer_id,omitempty"`
AnswerCount int `json:"answer_count"`
Score int `json:"score"`
LastActivityDate int `json:"last_activity_date"`
CreationDate int `json:"creation_date"`
LastEditDate int `json:"last_edit_date"`
QuestionID int `json:"question_id"`
ContentLicense string `json:"content_license"`
Link string `json:"link"`
Title string `json:"title"`
} `json:"items"`
HasMore bool `json:"has_more"`
QuotaMax int `json:"quota_max"`
QuotaRemaining int `json:"quota_remaining"`
Total int `json:"total"`
}
func makePageQuery(page int, tag string, fromDate time.Time, toDate time.Time) string {
v := url.Values{}
v.Set("page", strconv.Itoa(page))
v.Set("pagesize", strconv.Itoa(100))
v.Set("fromdate", strconv.FormatInt(fromDate.Unix(), 10))
v.Set("todate", strconv.FormatInt(toDate.Unix(), 10))
v.Set("order", "desc")
v.Set("sort", "activity")
v.Set("tagged", tag)
v.Set("site", "stackoverflow")
v.Set("key", os.Getenv("STACK_KEY"))
return v.Encode()
}
func fetchResults(baseDir string, tags []string, fromDate time.Time, toDate time.Time, erase bool) {
for _, tag := range tags {
dirName := fmt.Sprintf("%s/%s", baseDir, tag)
if erase {
// Clear out subdirectory if it already exists
fmt.Println("Erasing directory", dirName)
os.RemoveAll(dirName)
}
os.Mkdir(dirName, 0777)
if !isEmptyDir(dirName) {
log.Fatalf("Directory %s is not empty. You may clear previous data with -erase", dirName)
}
fmt.Println("")
fmt.Printf("Fetching tag '%s' to dir '%s'\n", tag, dirName)
for page := 1; ; page++ {
qs := makePageQuery(page, tag, fromDate, toDate)
url := "https://api.stackexchange.com/2.2/questions?" + qs
fmt.Println(url)
resp, err := http.Get(url)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
fmt.Println("Response status:", resp.Status)
body, err := io.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
pageFilename := fmt.Sprintf("%s/so%03d.json", dirName, page)
err = os.WriteFile(pageFilename, body, 0644)
if err != nil {
log.Fatal(err)
}
fmt.Println("Wrote", pageFilename)
var reply Reply
if err = json.Unmarshal(body, &reply); err != nil {
log.Fatal(err)
}
if !reply.HasMore {
break
}
// Try not to get throttled...
time.Sleep(300 * time.Millisecond)
}
}
}
func isEmptyDir(dirpath string) bool {
dir, err := os.Open(dirpath)
if err != nil {
log.Fatal(err)
}
defer dir.Close()
_, err = dir.Readdirnames(1)
b := err == io.EOF
// true if couldn't find 1 entry in dir
return b
}
func mustParseTime(date string) time.Time {
if len(strings.TrimSpace(date)) == 0 {
log.Fatal("empty time string")
}
t, err := time.Parse("2006-01-02", date)
if err != nil {
log.Fatal(err)
}
return t
}
func main() {
dirFlag := flag.String("dir", "", "base directory to store results")
fromDate := flag.String("fromdate", "", "start date in 2006-01-02 format")
toDate := flag.String("todate", "", "end date in 2006-01-02 format")
tagsFlag := flag.String("tags", "", "tags separated by commas")
eraseFlag := flag.Bool("erase", false, "erase previous contents of fetched directories")
flag.Parse()
fDate := mustParseTime(*fromDate)
tDate := mustParseTime(*toDate)
tags := strings.Split(*tagsFlag, ",")
if len(*dirFlag) == 0 {
log.Fatal("-dir must be provided and cannot be empty")
}
if len(*tagsFlag) == 0 || len(tags) == 0 {
log.Fatal("provide at least one tag with -tags")
}
// Try to create the directory; ignore error (if it already exists, etc.)
_ = os.Mkdir(*dirFlag, 0777)
fetchResults(*dirFlag, tags, fDate, tDate, *eraseFlag)
}
| [
"\"STACK_KEY\""
]
| []
| [
"STACK_KEY"
]
| [] | ["STACK_KEY"] | go | 1 | 0 | |
pkg/client/clientcmd/client_config.go | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clientcmd
import (
"fmt"
"io"
"net/url"
"os"
"github.com/imdario/mergo"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
clientcmdapi "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/clientauth"
)
var (
// DefaultCluster is the cluster config used when no other config is specified
// TODO: eventually apiserver should start on 443 and be secure by default
DefaultCluster = clientcmdapi.Cluster{Server: "http://localhost:8080"}
// EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name
EnvVarCluster = clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")}
)
// ClientConfig is used to make it easy to get an api server client
type ClientConfig interface {
// RawConfig returns the merged result of all overrides
RawConfig() (clientcmdapi.Config, error)
// ClientConfig returns a complete client config
ClientConfig() (*client.Config, error)
// Namespace returns the namespace resulting from the merged result of all overrides
Namespace() (string, error)
}
// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information
type DirectClientConfig struct {
config clientcmdapi.Config
contextName string
overrides *ConfigOverrides
fallbackReader io.Reader
}
// NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name
func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig {
return DirectClientConfig{config, config.CurrentContext, overrides, nil}
}
// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information
func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides) ClientConfig {
return DirectClientConfig{config, contextName, overrides, nil}
}
// NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags
func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig {
return DirectClientConfig{config, contextName, overrides, fallbackReader}
}
func (config DirectClientConfig) RawConfig() (clientcmdapi.Config, error) {
return config.config, nil
}
// ClientConfig implements ClientConfig
func (config DirectClientConfig) ClientConfig() (*client.Config, error) {
if err := config.ConfirmUsable(); err != nil {
return nil, err
}
configAuthInfo := config.getAuthInfo()
configClusterInfo := config.getCluster()
clientConfig := &client.Config{}
clientConfig.Host = configClusterInfo.Server
if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 {
clientConfig.Prefix = u.Path
u.Path = ""
u.RawQuery = ""
u.Fragment = ""
clientConfig.Host = u.String()
}
clientConfig.Version = configClusterInfo.APIVersion
// only try to read the auth information if we are secure
if client.IsConfigTransportTLS(*clientConfig) {
var err error
// mergo is a first write wins for map value and a last writing wins for interface values
userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader)
if err != nil {
return nil, err
}
mergo.Merge(clientConfig, userAuthPartialConfig)
serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
if err != nil {
return nil, err
}
mergo.Merge(clientConfig, serverAuthPartialConfig)
}
return clientConfig, nil
}
// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
// both, so we have to split the objects and merge them separately
// we want this order of precedence for the server identification
// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files)
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
// 3. load the ~/.kubernetes_auth file as a default
func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*client.Config, error) {
mergedConfig := &client.Config{}
// configClusterInfo holds the information identify the server provided by .kubeconfig
configClientConfig := &client.Config{}
configClientConfig.CAFile = configClusterInfo.CertificateAuthority
configClientConfig.CAData = configClusterInfo.CertificateAuthorityData
configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
mergo.Merge(mergedConfig, configClientConfig)
return mergedConfig, nil
}
// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
// both, so we have to split the objects and merge them separately
// we want this order of precedence for user identifcation
// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file
// 4. if there is not enough information to identify the user, prompt if possible
func getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader) (*client.Config, error) {
mergedConfig := &client.Config{}
// blindly overwrite existing values based on precedence
if len(configAuthInfo.Token) > 0 {
mergedConfig.BearerToken = configAuthInfo.Token
}
if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 {
mergedConfig.CertFile = configAuthInfo.ClientCertificate
mergedConfig.CertData = configAuthInfo.ClientCertificateData
mergedConfig.KeyFile = configAuthInfo.ClientKey
mergedConfig.KeyData = configAuthInfo.ClientKeyData
}
if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 {
mergedConfig.Username = configAuthInfo.Username
mergedConfig.Password = configAuthInfo.Password
}
// if there still isn't enough information to authenticate the user, try prompting
if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) {
prompter := NewPromptingAuthLoader(fallbackReader)
promptedAuthInfo := prompter.Prompt()
promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo)
previouslyMergedConfig := mergedConfig
mergedConfig = &client.Config{}
mergo.Merge(mergedConfig, promptedConfig)
mergo.Merge(mergedConfig, previouslyMergedConfig)
}
return mergedConfig, nil
}
// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information
func makeUserIdentificationConfig(info clientauth.Info) *client.Config {
config := &client.Config{}
config.Username = info.User
config.Password = info.Password
config.CertFile = info.CertFile
config.KeyFile = info.KeyFile
config.BearerToken = info.BearerToken
return config
}
// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only server identification information
func makeServerIdentificationConfig(info clientauth.Info) client.Config {
config := client.Config{}
config.CAFile = info.CAFile
if info.Insecure != nil {
config.Insecure = *info.Insecure
}
return config
}
func canIdentifyUser(config client.Config) bool {
return len(config.Username) > 0 ||
(len(config.CertFile) > 0 || len(config.CertData) > 0) ||
len(config.BearerToken) > 0
}
// Namespace implements KubeConfig
func (config DirectClientConfig) Namespace() (string, error) {
if err := config.ConfirmUsable(); err != nil {
return "", err
}
configContext := config.getContext()
if len(configContext.Namespace) == 0 {
return api.NamespaceDefault, nil
}
return configContext.Namespace, nil
}
// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config,
// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible.
func (config DirectClientConfig) ConfirmUsable() error {
validationErrors := make([]error, 0)
validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...)
validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...)
return newErrConfigurationInvalid(validationErrors)
}
func (config DirectClientConfig) getContextName() string {
if len(config.overrides.CurrentContext) != 0 {
return config.overrides.CurrentContext
}
if len(config.contextName) != 0 {
return config.contextName
}
return config.config.CurrentContext
}
func (config DirectClientConfig) getAuthInfoName() string {
if len(config.overrides.Context.AuthInfo) != 0 {
return config.overrides.Context.AuthInfo
}
return config.getContext().AuthInfo
}
func (config DirectClientConfig) getClusterName() string {
if len(config.overrides.Context.Cluster) != 0 {
return config.overrides.Context.Cluster
}
return config.getContext().Cluster
}
func (config DirectClientConfig) getContext() clientcmdapi.Context {
contexts := config.config.Contexts
contextName := config.getContextName()
var mergedContext clientcmdapi.Context
if configContext, exists := contexts[contextName]; exists {
mergo.Merge(&mergedContext, configContext)
}
mergo.Merge(&mergedContext, config.overrides.Context)
return mergedContext
}
func (config DirectClientConfig) getAuthInfo() clientcmdapi.AuthInfo {
authInfos := config.config.AuthInfos
authInfoName := config.getAuthInfoName()
var mergedAuthInfo clientcmdapi.AuthInfo
if configAuthInfo, exists := authInfos[authInfoName]; exists {
mergo.Merge(&mergedAuthInfo, configAuthInfo)
}
mergo.Merge(&mergedAuthInfo, config.overrides.AuthInfo)
return mergedAuthInfo
}
func (config DirectClientConfig) getCluster() clientcmdapi.Cluster {
clusterInfos := config.config.Clusters
clusterInfoName := config.getClusterName()
var mergedClusterInfo clientcmdapi.Cluster
mergo.Merge(&mergedClusterInfo, DefaultCluster)
mergo.Merge(&mergedClusterInfo, EnvVarCluster)
if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
mergo.Merge(&mergedClusterInfo, configClusterInfo)
}
mergo.Merge(&mergedClusterInfo, config.overrides.ClusterInfo)
return mergedClusterInfo
}
// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment.
type inClusterClientConfig struct{}
func (inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) {
return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters")
}
func (inClusterClientConfig) ClientConfig() (*client.Config, error) {
return client.InClusterConfig()
}
func (inClusterClientConfig) Namespace() (string, error) {
// TODO: generic way to figure out what namespace you are running in?
// This way assumes you've set the POD_NAMESPACE environment variable
// using the downward API.
if ns := os.Getenv("POD_NAMESPACE"); ns != "" {
return ns, nil
}
return "default", nil
}
// Possible returns true if loading an inside-kubernetes-cluster is possible.
func (inClusterClientConfig) Possible() bool {
fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token")
return os.Getenv("KUBERNETES_SERVICE_HOST") != "" &&
os.Getenv("KUBERNETES_SERVICE_PORT") != "" &&
err == nil && !fi.IsDir()
}
| [
"\"KUBERNETES_MASTER\"",
"\"POD_NAMESPACE\"",
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
]
| []
| [
"POD_NAMESPACE",
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT",
"KUBERNETES_MASTER"
]
| [] | ["POD_NAMESPACE", "KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT", "KUBERNETES_MASTER"] | go | 4 | 0 | |
examples/18-ship-order-completely.py | # Example: Create a shipment for an entire order using the Mollie API.
#
import os
import flask
from mollie.api.client import Client
from mollie.api.error import Error
def main():
try:
#
# Initialize the Mollie API library with your API key.
#
# See: https://www.mollie.com/dashboard/settings/profiles
#
api_key = os.environ.get("MOLLIE_API_KEY", "test_test")
mollie_client = Client()
mollie_client.set_api_key(api_key)
#
# Create a shipment for your entire first order
#
# See: https://docs.mollie.com/reference/v2/shipments-api/create-shipment
#
body = ""
order_id = flask.request.args.get("order_id")
if order_id is None:
body += "<p>No order ID specified. Attempting to retrieve the first page of "
body += "orders and grabbing the first.</p>"
order = mollie_client.orders.get(order_id) if order_id else next(mollie_client.orders.list())
shipment = order.create_shipment()
body += f"A shipment with ID {shipment.id} has been created for your order with ID {order.id}"
for line in shipment.lines:
body += f"{line.name} Status: <b>{line.status}</b>"
return body
except Error as err:
return f"API call failed: {err}"
if __name__ == "__main__":
print(main())
| []
| []
| [
"MOLLIE_API_KEY"
]
| [] | ["MOLLIE_API_KEY"] | python | 1 | 0 | |
example/azure-loganalytics/golang/azureloganalytics.go | package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"github.com/davecgh/go-spew/spew"
"github.com/golang/glog"
dif "github.com/turbonomic/turbo-go-sdk/pkg/dataingestionframework/data"
"gopkg.in/yaml.v2"
)
const (
metricPath = "/metrics"
port = 8081
baseLoginURL = "https://login.microsoftonline.com/"
baseQueryURL = "https://api.loganalytics.io/"
defaultResource = "https://api.loganalytics.io"
defaultRedirectURI = "http://localhost:3000/login"
defaultGrantType = "client_credentials"
defaultTargetInfoLocation = "/etc/targets"
queryComputer = `
Heartbeat | summarize arg_max(TimeGenerated, *) by Computer | project Computer, ComputerIP`
queryMemory = `
Perf
| where TimeGenerated > ago(10m)
| where ObjectName == "Memory" and
(CounterName == "Used Memory MBytes" or // the name used in Linux records
CounterName == "Committed Bytes") // the name used in Windows records
| summarize avg(CounterValue) by Computer, CounterName, bin(TimeGenerated, 10m)
| order by TimeGenerated
`
)
var (
workspaces []string
tenantID string
clientID string
clientSecret string
hostMap map[string]string
client *http.Client
)
type Column struct {
Name string
Type string
}
type Table struct {
Columns []*Column
Name string
Rows [][]interface{}
}
type LogAnalyticsQueryRequest struct {
Query string `json:"query"`
}
type LogAnalyticsQueryResults struct {
Tables []*Table
}
type ErrorDetail struct {
Code string
Message string
Resources []string
Target string
Value string
}
type ErrorInfo struct {
Details []*ErrorDetail
Code string
Message string
}
type LogAnalyticsErrorResponse struct {
Errors *ErrorInfo
}
type AccessToken struct {
Type string `json:"token_type"`
ExpiresIn string `json:"expires_in"`
ExpiresOn string `json:"expires_on"`
Resource string `json:"resource"`
AccessToken string `json:"access_token"`
}
type TargetInfo struct {
ClientID string `yaml:"client"`
TenantID string `yaml:"tenant"`
ClientSecret string `yaml:"key"`
}
func init() {
_ = flag.Set("alsologtostderr", "true")
_ = flag.Set("stderrthreshold", "INFO")
_ = flag.Set("v", "2")
flag.Parse()
workspaceIDs := os.Getenv("AZURE_LOG_ANALYTICS_WORKSPACES")
if workspaceIDs == "" {
glog.Fatalf("AZURE_LOG_ANALYTICS_WORKSPACES is missing.")
}
workspaces = strings.Split(workspaceIDs, ",")
targetInfoLocation := os.Getenv("TARGET_INFO_LOCATION")
if targetInfoLocation == "" {
targetInfoLocation = defaultTargetInfoLocation
}
targetID := os.Getenv("TARGET_ID")
if targetID == "" {
glog.Fatalf("TARGET_ID is missing.")
}
targetInfoFilePath := path.Join(targetInfoLocation, targetID)
targetInfoFile, err := ioutil.ReadFile(targetInfoFilePath)
if err != nil {
glog.Fatalf("Failed to read target info from file %v: %v", targetInfoFilePath, err)
}
var targetInfo TargetInfo
err = yaml.Unmarshal(targetInfoFile, &targetInfo)
if err != nil {
glog.Fatalf("Failed to unmarshal target info from file %v: %v", targetInfoFilePath, err)
}
tenantID = targetInfo.TenantID
if tenantID == "" {
glog.Fatalf("Tenant ID is missing.")
}
clientID = targetInfo.ClientID
if clientID == "" {
glog.Fatalf("Client ID is missing.")
}
clientSecret = targetInfo.ClientSecret
if clientSecret == "" {
glog.Fatalf("Client secret is missing")
}
client = &http.Client{}
hostMap = make(map[string]string)
}
func login() (string, error) {
loginURL, _ := url.Parse(baseLoginURL)
loginURL.Path = path.Join(tenantID, "oauth2", "token")
data := url.Values{}
data.Set("grant_type", defaultGrantType)
data.Set("resource", defaultResource)
data.Set("redirect_uri", defaultRedirectURI)
data.Set("client_id", clientID)
data.Set("client_secret", clientSecret)
req, err := http.NewRequest(http.MethodPost, loginURL.String(), strings.NewReader(data.Encode()))
if err != nil {
return "", fmt.Errorf(
"failed to create request POST %v: %v", loginURL, err)
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
res, err := client.Do(req)
defer res.Body.Close()
if err != nil {
return "", fmt.Errorf(
"request GET %v failed with error %v", loginURL, err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", fmt.Errorf(
"failed to read response of request POST %v: %v", loginURL, err)
}
if res.StatusCode != http.StatusOK {
return "", fmt.Errorf("request POST %v failed with status %v", loginURL, res.StatusCode)
}
var accessToken AccessToken
if err := json.Unmarshal(body, &accessToken); err != nil {
return "", fmt.Errorf(
"failed to unmarshal json [%v]: %v", string(body), err)
}
return accessToken.AccessToken, nil
}
func createAndSendTopology(w http.ResponseWriter, r *http.Request) {
// Always obtain a new token first
token, err := login()
if err != nil {
glog.Errorf("Failed to login: %v", err)
}
glog.V(2).Infof("Token: %s", token)
topology, err := createTopology(token)
if err != nil {
glog.Errorf("Failed to create topology: %v", err)
}
sendResult(topology, w, r)
}
func createTopology(token string) (*dif.Topology, error) {
// Create topology
topology := dif.NewTopology().SetUpdateTime()
// Iterate through all workspaces
for _, workspace := range workspaces {
queryResults, err := doQuery(token, queryComputer, workspace)
if err != nil {
glog.Errorf("failed to query computer: %v", err)
continue
}
for _, table := range queryResults.Tables {
for _, row := range table.Rows {
// row[0]: Computer
// row[1]: ComputerIP
computer := row[0].(string)
computerIP := row[1].(string)
hostMap[computer] = computerIP
}
}
glog.V(2).Infof(spew.Sdump(hostMap))
queryResults, err = doQuery(token, queryMemory, workspace)
if err != nil {
glog.Errorf("failed to query memory: %v", err)
continue
}
hostSeen := make(map[string]bool)
for _, table := range queryResults.Tables {
for _, row := range table.Rows {
// row[0]: Computer
// row[1]: CounterName
// row[2]: TimeGenerated
// row[3]: CounterValue
computer := row[0].(string)
computerIP, found := hostMap[computer]
if !found {
glog.Warningf("Cannot find IP address for computer %s.", computer)
continue
}
seen, _ := hostSeen[computer]
if seen {
continue
}
hostSeen[computer] = true
counterName := row[1].(string)
var avgMemUsedKB float64
if counterName == "Used Memory MBytes" {
avgMemUsedKB = row[3].(float64) * 1024
} else if counterName == "Committed Bytes" {
avgMemUsedKB = row[3].(float64) / 1024
} else {
glog.Warningf("Unrecognized CounterName %s.", counterName)
}
// Create the VM entity
vm := dif.NewDIFEntity(computer, "virtualMachine").Matching(computerIP)
// Add the memory metrics
vm.AddMetrics("memory", []*dif.DIFMetricVal{{
Average: &avgMemUsedKB,
}})
topology.AddEntity(vm)
}
}
}
return topology, nil
}
func doQuery(token, query, workspace string) (*LogAnalyticsQueryResults, error) {
queryURL, _ := url.Parse(baseQueryURL)
queryURL.Path = path.Join("v1", "workspaces", workspace, "query")
data, _ := json.Marshal(&LogAnalyticsQueryRequest{Query: query})
glog.V(2).Infof("Marshalled JSON %s", string(data))
req, err := http.NewRequest(http.MethodPost, queryURL.String(), bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf(
"failed to create request POST %v: %v", queryURL, err)
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
res, err := client.Do(req)
defer res.Body.Close()
if err != nil {
return nil, fmt.Errorf(
"request POST %v failed with error %v", queryURL, err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf(
"failed to read response of request POST %v: %v", queryURL, err)
}
if res.StatusCode != http.StatusOK {
var errorResponse LogAnalyticsErrorResponse
_ = json.Unmarshal(body, &errorResponse)
return nil, fmt.Errorf("request POST %v failed with status %v and error %v",
queryURL, res.StatusCode, errorResponse)
}
glog.V(2).Infof("Response body: %s", string(body))
var queryResults LogAnalyticsQueryResults
if err := json.Unmarshal(body, &queryResults); err != nil {
return nil, fmt.Errorf(
"failed to unmarshal json [%v]: %v", string(body), err)
}
return &queryResults, nil
}
func sendResult(topology *dif.Topology, w http.ResponseWriter, r *http.Request) {
var status = http.StatusOK
var result []byte
var err error
defer func() {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
w.Write(result)
}()
// Marshal to json
if result, err = json.Marshal(topology); err != nil {
status = http.StatusInternalServerError
result = []byte(fmt.Sprintf("{\"status\": \"%v\"}", err))
}
glog.V(2).Infof("Sending result: %v.", string(result))
}
func main() {
http.HandleFunc(metricPath, createAndSendTopology)
err := http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
if err != nil {
glog.Fatalf("Failed to create server: %v.", err)
}
}
| [
"\"AZURE_LOG_ANALYTICS_WORKSPACES\"",
"\"TARGET_INFO_LOCATION\"",
"\"TARGET_ID\""
]
| []
| [
"TARGET_INFO_LOCATION",
"AZURE_LOG_ANALYTICS_WORKSPACES",
"TARGET_ID"
]
| [] | ["TARGET_INFO_LOCATION", "AZURE_LOG_ANALYTICS_WORKSPACES", "TARGET_ID"] | go | 3 | 0 | |
selfdrive/locationd/test/ubloxd_easy.py | #!/usr/bin/env python
import os
import time
from selfdrive.locationd.test import ublox
from common import realtime
from selfdrive.locationd.test.ubloxd import gen_raw, gen_solution
import zmq
import selfdrive.messaging as messaging
from selfdrive.services import service_list
from selfdrive.car.tesla.readconfig import read_config_file,CarSettings
unlogger = os.getenv("UNLOGGER") is not None # debug prints
def main(gctx=None):
poller = zmq.Poller()
if not CarSettings().get_value("useTeslaGPS"):
gpsLocationExternal = messaging.pub_sock(service_list['gpsLocationExternal'].port)
ubloxGnss = messaging.pub_sock(service_list['ubloxGnss'].port)
# ubloxRaw = messaging.sub_sock(service_list['ubloxRaw'].port, poller)
# buffer with all the messages that still need to be input into the kalman
while 1:
polld = poller.poll(timeout=1000)
for sock, mode in polld:
if mode != zmq.POLLIN:
continue
logs = messaging.drain_sock(sock)
for log in logs:
buff = log.ubloxRaw
ttime = log.logMonoTime
msg = ublox.UBloxMessage()
msg.add(buff)
if msg.valid():
if msg.name() == 'NAV_PVT':
sol = gen_solution(msg)
if unlogger:
sol.logMonoTime = ttime
else:
sol.logMonoTime = int(realtime.sec_since_boot() * 1e9)
gpsLocationExternal.send(sol.to_bytes())
elif msg.name() == 'RXM_RAW':
raw = gen_raw(msg)
if unlogger:
raw.logMonoTime = ttime
else:
raw.logMonoTime = int(realtime.sec_since_boot() * 1e9)
ubloxGnss.send(raw.to_bytes())
else:
print "INVALID MESSAGE"
else:
while True:
time.sleep(1.1)
if __name__ == "__main__":
main()
| []
| []
| [
"UNLOGGER"
]
| [] | ["UNLOGGER"] | python | 1 | 0 | |
src/internal_local/goroot/gc.go | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gc
package goroot
import (
exec "internal_local/execabs"
"os"
"path/filepath"
"strings"
"sync"
)
// IsStandardPackage reports whether path is a standard package,
// given goroot and compiler.
func IsStandardPackage(goroot, compiler, path string) bool {
switch compiler {
case "gc":
dir := filepath.Join(goroot, "src", path)
_, err := os.Stat(dir)
return err == nil
case "gccgo":
return gccgoSearch.isStandard(path)
default:
panic("unknown compiler " + compiler)
}
}
// gccgoSearch holds the gccgo search directories.
type gccgoDirs struct {
once sync.Once
dirs []string
}
// gccgoSearch is used to check whether a gccgo package exists in the
// standard library.
var gccgoSearch gccgoDirs
// init finds the gccgo search directories. If this fails it leaves dirs == nil.
func (gd *gccgoDirs) init() {
gccgo := os.Getenv("GCCGO")
if gccgo == "" {
gccgo = "gccgo"
}
bin, err := exec.LookPath(gccgo)
if err != nil {
return
}
allDirs, err := exec.Command(bin, "-print-search-dirs").Output()
if err != nil {
return
}
versionB, err := exec.Command(bin, "-dumpversion").Output()
if err != nil {
return
}
version := strings.TrimSpace(string(versionB))
machineB, err := exec.Command(bin, "-dumpmachine").Output()
if err != nil {
return
}
machine := strings.TrimSpace(string(machineB))
dirsEntries := strings.Split(string(allDirs), "\n")
const prefix = "libraries: ="
var dirs []string
for _, dirEntry := range dirsEntries {
if strings.HasPrefix(dirEntry, prefix) {
dirs = filepath.SplitList(strings.TrimPrefix(dirEntry, prefix))
break
}
}
if len(dirs) == 0 {
return
}
var lastDirs []string
for _, dir := range dirs {
goDir := filepath.Join(dir, "go", version)
if fi, err := os.Stat(goDir); err == nil && fi.IsDir() {
gd.dirs = append(gd.dirs, goDir)
goDir = filepath.Join(goDir, machine)
if fi, err = os.Stat(goDir); err == nil && fi.IsDir() {
gd.dirs = append(gd.dirs, goDir)
}
}
if fi, err := os.Stat(dir); err == nil && fi.IsDir() {
lastDirs = append(lastDirs, dir)
}
}
gd.dirs = append(gd.dirs, lastDirs...)
}
// isStandard reports whether path is a standard library for gccgo.
func (gd *gccgoDirs) isStandard(path string) bool {
// Quick check: if the first path component has a '.', it's not
// in the standard library. This skips most GOPATH directories.
i := strings.Index(path, "/")
if i < 0 {
i = len(path)
}
if strings.Contains(path[:i], ".") {
return false
}
if path == "unsafe" {
// Special case.
return true
}
gd.once.Do(gd.init)
if gd.dirs == nil {
// We couldn't find the gccgo search directories.
// Best guess, since the first component did not contain
// '.', is that this is a standard library package.
return true
}
for _, dir := range gd.dirs {
full := filepath.Join(dir, path) + ".gox"
if fi, err := os.Stat(full); err == nil && !fi.IsDir() {
return true
}
}
return false
}
| [
"\"GCCGO\""
]
| []
| [
"GCCGO"
]
| [] | ["GCCGO"] | go | 1 | 0 | |
app.py | # ----------------------------------------------------------------------------#
# Imports
# ----------------------------------------------------------------------------#
from flask import Flask, flash, request, redirect, render_template, Response
from werkzeug.utils import secure_filename # from flask.ext.sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
from forms import *
import os
import secrets
import stat
from tika import parser
import sys
# ----------------------------------------------------------------------------#
# App Config.
# ----------------------------------------------------------------------------#
app = Flask(__name__)
app.config.from_object('config')
app.secret_key = secrets.token_urlsafe(16)
UPLOAD_FOLDER = ''
for i in sys.path:
if 'Pycharm' in i:
UPLOAD_FOLDER += '/home/daniel/PycharmProjects/pdfscholar/files'
break
else:
UPLOAD_FOLDER += '/home/DanielHHowell/pdfscholar/files'
break
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 1024 * 1024 * 1024
app.jinja_env.lstrip_blocks = False
app.jinja_env.trim_blocks = False
app.jinja_env.keep_trailing_newline = False
basedir = os.path.abspath(os.path.dirname(__file__))
ALLOWED_EXTENSIONS = set(['TXT', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# db = SQLAlchemy(app)
# Automatically tear down SQLAlchemy.
'''
@app.teardown_request
def shutdown_session(exception=None):
db_session.remove()
'''
# Login required decorator.
'''
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
'''
# ----------------------------------------------------------------------------#
# Controllers.
# ----------------------------------------------------------------------------#
@app.route('/')
def home():
itemList = os.listdir(UPLOAD_FOLDER)
return render_template('pages/home.html', itemList=itemList)
@app.route('/', methods=['POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
pathloc = os.path.join(app.config['UPLOAD_FOLDER'], 'PDF', file.filename)
file.save(pathloc)
processed = parser.from_file(pathloc)['content']
txtname = file.filename.split('.')[0] + '.txt'
with open(os.path.join(app.config['UPLOAD_FOLDER'], 'TXT', txtname), 'w+') as fw:
fw.write(processed)
return redirect('/')
# if file.filename == '':
# flash('No file selected for uploading')
# return redirect(request.url)
# if file and allowed_file(file.filename):
# #filename = secure_filename(file.filename)
# file = textract.process(file.read())
# file.save(os.path.join(app.config['UPLOAD_FOLDER'], 'TXT', file.filename))
# return redirect('/')
# else:
# flash('Allowed file types are TXT, pdf, png, jpg, jpeg, gif')
# return redirect(request.url)
@app.route('/browser/<path:urlFilePath>/file')
def browser(urlFilePath):
nestedFilePath = os.path.join(UPLOAD_FOLDER, urlFilePath)
if os.path.isdir(nestedFilePath):
itemList = os.listdir(nestedFilePath)
fileProperties = {"filepath": nestedFilePath}
if not urlFilePath.startswith("/"):
urlFilePath = "/" + urlFilePath
return render_template('pages/home.html', urlFilePath=urlFilePath, itemList=itemList)
if os.path.isfile(nestedFilePath):
if not urlFilePath.startswith("/"):
urlFilePath = "/" + urlFilePath
print(urlFilePath)
with open(nestedFilePath, 'r') as f:
text = f.read()
groups = text.split("\n")
paragraphs = [i.replace("\n", "") for i in groups if (i != "")]
for i in range(5):
for i, j in enumerate(paragraphs):
if (j[-1] == ' ') or (j[-1] == '-'):
paragraphs[i:i + 2] = [''.join(paragraphs[i:i + 2])]
return render_template('pages/file.html', text=paragraphs)
# return Response(text, mimetype='text/plain')
return 'something bad happened'
# Error handlers.
@app.errorhandler(500)
def internal_error(error):
# db_session.rollback()
return render_template('errors/500.html'), 500
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
# ----------------------------------------------------------------------------#
# Launch.
# ----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
kite-go/navigation/git/git_test.go | package git
import (
"os"
"path/filepath"
"testing"
"github.com/kiteco/kiteco/kite-go/navigation/localpath"
"github.com/kiteco/kiteco/kite-golib/kitectx"
"github.com/stretchr/testify/require"
)
func TestRepo(t *testing.T) {
kiteco := localpath.Absolute(filepath.Join(os.Getenv("GOPATH"), "src", "github.com", "kiteco", "kiteco"))
s, err := NewStorage(StorageOptions{})
require.NoError(t, err)
var noCache []Commit
for i := 0; i < 3; i++ {
repo, err := Open(kiteco, DefaultComputedCommitsLimit, s)
require.NoError(t, err)
var batch []Commit
for j := 0; j < 10; j++ {
commit, err := repo.Next(kitectx.Background())
require.NoError(t, err)
batch = append(batch, commit)
if i == 0 {
noCache = append(noCache, commit)
continue
}
require.Equal(t, noCache[j], commit)
}
err = repo.Save(s)
require.NoError(t, err)
}
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
experiment/autobumper/bumper/bumper.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bumper
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strings"
"github.com/sirupsen/logrus"
imagebumper "k8s.io/test-infra/experiment/image-bumper/bumper"
"k8s.io/test-infra/prow/config/secret"
"k8s.io/test-infra/prow/github"
"k8s.io/test-infra/robots/pr-creator/updater"
)
const (
prowPrefix = "gcr.io/k8s-prow/"
testImagePrefix = "gcr.io/k8s-testimages/"
prowRepo = "https://github.com/kubernetes/test-infra"
testImageRepo = prowRepo
forkRemoteName = "bumper-fork-remote"
latestVersion = "latest"
upstreamVersion = "upstream"
upstreamStagingVersion = "upstream-staging"
tagVersion = "vYYYYMMDD-deadbeef"
upstreamURLBase = "https://raw.githubusercontent.com/kubernetes/test-infra/master"
prowRefConfigFile = "config/prow/cluster/deck_deployment.yaml"
prowStagingRefConfigFile = "config/prow-staging/cluster/deck_deployment.yaml"
errOncallMsgTempl = "An error occurred while finding an assignee: `%s`.\nFalling back to Blunderbuss."
noOncallMsg = "Nobody is currently oncall, so falling back to Blunderbuss."
)
var (
tagRegexp = regexp.MustCompile("v[0-9]{8}-[a-f0-9]{6,9}")
imageMatcher = regexp.MustCompile(`(?s)^.+image:.+:(v[a-zA-Z0-9_.-]+)`)
)
type fileArrayFlag []string
func (af *fileArrayFlag) String() string {
return fmt.Sprint(*af)
}
func (af *fileArrayFlag) Set(value string) error {
for _, e := range strings.Split(value, ",") {
fn := strings.TrimSpace(e)
info, err := os.Stat(fn)
if err != nil {
return fmt.Errorf("error getting file info for %q", fn)
}
if info.IsDir() && !strings.HasSuffix(fn, string(os.PathSeparator)) {
fn = fn + string(os.PathSeparator)
}
*af = append(*af, fn)
}
return nil
}
// Options is the options for autobumper operations.
type Options struct {
GitHubOrg string
GitHubRepo string
GitHubLogin string
GitHubToken string
GitName string
GitEmail string
RemoteBranch string
OncallAddress string
BumpProwImages bool
BumpTestImages bool
TargetVersion string
IncludedConfigPaths fileArrayFlag
ExcludedConfigPaths fileArrayFlag
ExtraFiles fileArrayFlag
SkipPullRequest bool
}
func validateOptions(o *Options) error {
if !o.SkipPullRequest && o.GitHubToken == "" {
return fmt.Errorf("--github-token is mandatory when --skip-pull-request is false")
}
if !o.SkipPullRequest && (o.GitHubOrg == "" || o.GitHubRepo == "") {
return fmt.Errorf("--github-org and --github-repo are mandatory when --skip-pull-request is false")
}
if !o.SkipPullRequest && o.RemoteBranch == "" {
return fmt.Errorf("--remote-branch cannot be empty when --skip-pull-request is false")
}
if (o.GitEmail == "") != (o.GitName == "") {
return fmt.Errorf("--git-name and --git-email must be specified together")
}
if o.TargetVersion != latestVersion && o.TargetVersion != upstreamVersion &&
o.TargetVersion != upstreamStagingVersion && !tagRegexp.MatchString(o.TargetVersion) {
logrus.Warnf("Warning: --target-version is not one of %v so it might not work properly.",
[]string{latestVersion, upstreamVersion, upstreamStagingVersion, tagVersion})
}
if !o.BumpProwImages && !o.BumpTestImages {
return fmt.Errorf("at least one of --bump-prow-images and --bump-test-images must be specified")
}
if o.BumpProwImages && o.BumpTestImages && o.TargetVersion != latestVersion {
return fmt.Errorf("--target-version must be latest if you want to bump both prow and test images")
}
if o.BumpTestImages && (o.TargetVersion == upstreamVersion || o.TargetVersion == upstreamStagingVersion) {
return fmt.Errorf("%q and %q versions can only be specified to bump prow images", upstreamVersion, upstreamStagingVersion)
}
if len(o.IncludedConfigPaths) == 0 {
return fmt.Errorf("--include-config-paths is mandatory")
}
return nil
}
// Run is the entrypoint which will update Prow config files based on the provided options.
func Run(o *Options) error {
if err := validateOptions(o); err != nil {
return fmt.Errorf("error validating options: %w", err)
}
if err := cdToRootDir(); err != nil {
return fmt.Errorf("failed to change to root dir: %w", err)
}
images, err := UpdateReferences(
o.BumpProwImages, o.BumpTestImages, o.TargetVersion,
o.IncludedConfigPaths, o.ExcludedConfigPaths, o.ExtraFiles)
if err != nil {
return fmt.Errorf("failed to update image references: %w", err)
}
changed, err := HasChanges()
if err != nil {
return fmt.Errorf("error occurred when checking changes: %w", err)
}
if !changed {
logrus.Info("no images updated, exiting ...")
return nil
}
if o.SkipPullRequest {
logrus.Debugf("--skip-pull-request is set to true, won't create a pull request.")
} else {
var sa secret.Agent
if err := sa.Start([]string{o.GitHubToken}); err != nil {
return fmt.Errorf("failed to start secrets agent: %w", err)
}
gc := github.NewClient(sa.GetTokenGenerator(o.GitHubToken), sa.Censor, github.DefaultGraphQLEndpoint, github.DefaultAPIEndpoint)
if o.GitHubLogin == "" || o.GitName == "" || o.GitEmail == "" {
user, err := gc.BotUser()
if err != nil {
return fmt.Errorf("failed to get the user data for the provided GH token: %w", err)
}
if o.GitHubLogin == "" {
o.GitHubLogin = user.Login
}
if o.GitName == "" {
o.GitName = user.Name
}
if o.GitEmail == "" {
o.GitEmail = user.Email
}
}
remoteBranch := "autobump"
stdout := HideSecretsWriter{Delegate: os.Stdout, Censor: &sa}
stderr := HideSecretsWriter{Delegate: os.Stderr, Censor: &sa}
if err := MakeGitCommit(fmt.Sprintf("[email protected]:%s/test-infra.git", o.GitHubLogin), remoteBranch, o.GitName, o.GitEmail, images, stdout, stderr); err != nil {
return fmt.Errorf("failed to push changes to the remote branch: %w", err)
}
if err := UpdatePR(gc, o.GitHubOrg, o.GitHubRepo, images, getAssignment(o.OncallAddress), "Update prow to", o.GitHubLogin+":"+remoteBranch, "master", updater.PreventMods); err != nil {
return fmt.Errorf("failed to create the PR: %w", err)
}
}
return nil
}
func cdToRootDir() error {
if bazelWorkspace := os.Getenv("BUILD_WORKSPACE_DIRECTORY"); bazelWorkspace != "" {
if err := os.Chdir(bazelWorkspace); err != nil {
return fmt.Errorf("failed to chdir to bazel workspace (%s): %w", bazelWorkspace, err)
}
return nil
}
cmd := exec.Command("git", "rev-parse", "--show-toplevel")
output, err := cmd.Output()
if err != nil {
return fmt.Errorf("failed to get the repo's root directory: %w", err)
}
d := strings.TrimSpace(string(output))
logrus.Infof("Changing working directory to %s...", d)
return os.Chdir(d)
}
func Call(stdout, stderr io.Writer, cmd string, args ...string) error {
c := exec.Command(cmd, args...)
c.Stdout = stdout
c.Stderr = stderr
return c.Run()
}
type Censor interface {
Censor(content []byte) []byte
}
type HideSecretsWriter struct {
Delegate io.Writer
Censor Censor
}
func (w HideSecretsWriter) Write(content []byte) (int, error) {
_, err := w.Delegate.Write(w.Censor.Censor(content))
if err != nil {
return 0, err
}
return len(content), nil
}
// UpdatePR updates with github client "gc" the PR of github repo org/repo
// with "matchTitle" from "source" to "branch"
// "images" contains the tag replacements that have been made which is returned from "updateReferences([]string{"."}, extraFiles)"
// "images" and "extraLineInPRBody" are used to generate commit summary and body of the PR
func UpdatePR(gc github.Client, org, repo string, images map[string]string, extraLineInPRBody string, matchTitle, source, branch string, allowMods bool) error {
summary, err := makeCommitSummary(images)
if err != nil {
return err
}
return UpdatePullRequest(gc, org, repo, summary, generatePRBody(images, extraLineInPRBody), matchTitle, source, branch, allowMods)
}
// UpdatePullRequest updates with github client "gc" the PR of github repo org/repo
// with "title" and "body" of PR matching "matchTitle" from "source" to "branch"
func UpdatePullRequest(gc github.Client, org, repo, title, body, matchTitle, source, branch string, allowMods bool) error {
return UpdatePullRequestWithLabels(gc, org, repo, title, body, matchTitle, source, branch, allowMods, nil)
}
func UpdatePullRequestWithLabels(gc github.Client, org, repo, title, body, matchTitle, source, branch string, allowMods bool, labels []string) error {
logrus.Info("Creating or updating PR...")
n, err := updater.EnsurePRWithLabels(org, repo, title, body, source, branch, matchTitle, allowMods, gc, labels)
if err != nil {
return fmt.Errorf("failed to ensure PR exists: %w", err)
}
logrus.Infof("PR %s/%s#%d will merge %s into %s: %s", org, repo, *n, source, branch, title)
return nil
}
// updateReferences update the references of prow-images and/or testimages
// in the files in any of "subfolders" of the includeConfigPaths but not in excludeConfigPaths
// if the file is a yaml file (*.yaml) or extraFiles[file]=true
func UpdateReferences(bumpProwImages, bumpTestImages bool, targetVersion string,
includeConfigPaths []string, excludeConfigPaths []string, extraFiles []string) (map[string]string, error) {
logrus.Info("Bumping image references...")
filters := make([]string, 0)
if bumpProwImages {
filters = append(filters, prowPrefix)
}
if bumpTestImages {
filters = append(filters, testImagePrefix)
}
filterRegexp := regexp.MustCompile(strings.Join(filters, "|"))
var tagPicker func(string, string, string) (string, error)
var err error
switch targetVersion {
case latestVersion:
tagPicker = imagebumper.FindLatestTag
case upstreamVersion:
tagPicker, err = upstreamImageVersionResolver(upstreamURLBase + "/" + prowRefConfigFile)
if err != nil {
return nil, fmt.Errorf("failed to resolve the upstream Prow image version: %w", err)
}
case upstreamStagingVersion:
tagPicker, err = upstreamImageVersionResolver(upstreamURLBase + "/" + prowStagingRefConfigFile)
if err != nil {
return nil, fmt.Errorf("failed to resolve the upstream staging Prow image version: %w", err)
}
default:
tagPicker = func(imageHost, imageName, currentTag string) (string, error) { return targetVersion, nil }
}
updateFile := func(name string) error {
logrus.Infof("Updating file %s", name)
if err := imagebumper.UpdateFile(tagPicker, name, filterRegexp); err != nil {
return fmt.Errorf("failed to update the file: %w", err)
}
return nil
}
updateYAMLFile := func(name string) error {
if strings.HasSuffix(name, ".yaml") && !isUnderPath(name, excludeConfigPaths) {
return updateFile(name)
}
return nil
}
// Updated all .yaml files under the included config paths but not under excluded config paths.
for _, path := range includeConfigPaths {
info, err := os.Stat(path)
if err != nil {
return nil, fmt.Errorf("failed to get the file info for %q", path)
}
if info.IsDir() {
err := filepath.Walk(path, func(subpath string, info os.FileInfo, err error) error {
return updateYAMLFile(subpath)
})
if err != nil {
return nil, fmt.Errorf("failed to update yaml files under %q: %w", path, err)
}
} else {
if err := updateYAMLFile(path); err != nil {
return nil, fmt.Errorf("failed to update the yaml file %q: %w", path, err)
}
}
}
// Update the extra files in any case.
for _, file := range extraFiles {
if err := updateFile(file); err != nil {
return nil, fmt.Errorf("failed to update the extra file %q: %w", file, err)
}
}
return imagebumper.GetReplacements(), nil
}
func upstreamImageVersionResolver(upstreamAddress string) (func(imageHost, imageName, currentTag string) (string, error), error) {
version, err := parseUpstreamImageVersion(upstreamAddress)
if err != nil {
return nil, fmt.Errorf("error resolving the upstream Prow version from %q: %w", upstreamAddress, err)
}
return func(imageHost, imageName, currentTag string) (string, error) {
// Skip boskos images as they do not have the same image tag as other Prow components.
// TODO(chizhg): remove this check after all Prow instances are using boskos images not in gcr.io/k8s-prow/boskos
if strings.Contains(imageName, "boskos/") {
return currentTag, nil
}
return version, nil
}, nil
}
func parseUpstreamImageVersion(upstreamAddress string) (string, error) {
resp, err := http.Get(upstreamAddress)
if err != nil {
return "", fmt.Errorf("error sending GET request to %q: %w", upstreamAddress, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("HTTP error %d (%q) fetching upstream config file", resp.StatusCode, resp.Status)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("error reading the response body: %w", err)
}
res := imageMatcher.FindStringSubmatch(string(body))
if len(res) < 2 {
return "", fmt.Errorf("the image tag is malformatted: %v", res)
}
return res[1], nil
}
func isUnderPath(name string, paths []string) bool {
for _, p := range paths {
if p != "" && strings.HasPrefix(name, p) {
return true
}
}
return false
}
func getNewProwVersion(images map[string]string) (string, error) {
found := map[string]bool{}
for k, v := range images {
if strings.HasPrefix(k, prowPrefix) {
found[v] = true
}
}
switch len(found) {
case 0:
return "", nil
case 1:
for version := range found {
return version, nil
}
}
return "", fmt.Errorf(
"Expected a consistent version for all %q images, but found multiple: %v",
prowPrefix,
found)
}
// HasChanges checks if the current git repo contains any changes
func HasChanges() (bool, error) {
cmd := "git"
args := []string{"status", "--porcelain"}
logrus.WithField("cmd", cmd).WithField("args", args).Info("running command ...")
combinedOutput, err := exec.Command(cmd, args...).CombinedOutput()
if err != nil {
logrus.WithField("cmd", cmd).Debugf("output is '%s'", string(combinedOutput))
return false, fmt.Errorf("error running command %s %s: %w", cmd, args, err)
}
return len(strings.TrimSuffix(string(combinedOutput), "\n")) > 0, nil
}
func makeCommitSummary(images map[string]string) (string, error) {
version, err := getNewProwVersion(images)
if err != nil {
return "", err
}
return fmt.Sprintf("Update prow to %s, and other images as necessary.", version), nil
}
// MakeGitCommit runs a sequence of git commands to
// commit and push the changes the "remote" on "remoteBranch"
// "name" and "email" are used for git-commit command
// "images" contains the tag replacements that have been made which is returned from "updateReferences([]string{"."}, extraFiles)"
// "images" is used to generate commit message
func MakeGitCommit(remote, remoteBranch, name, email string, images map[string]string, stdout, stderr io.Writer) error {
summary, err := makeCommitSummary(images)
if err != nil {
return err
}
return GitCommitAndPush(remote, remoteBranch, name, email, summary, stdout, stderr)
}
// GitCommitAndPush runs a sequence of git commands to commit.
// The "name", "email", and "message" are used for git-commit command
func GitCommitAndPush(remote, remoteBranch, name, email, message string, stdout, stderr io.Writer) error {
logrus.Info("Making git commit...")
if err := Call(stdout, stderr, "git", "add", "-A"); err != nil {
return fmt.Errorf("failed to git add: %w", err)
}
commitArgs := []string{"commit", "-m", message}
if name != "" && email != "" {
commitArgs = append(commitArgs, "--author", fmt.Sprintf("%s <%s>", name, email))
}
if err := Call(stdout, stderr, "git", commitArgs...); err != nil {
return fmt.Errorf("failed to git commit: %w", err)
}
if err := Call(stdout, stderr, "git", "remote", "add", forkRemoteName, remote); err != nil {
return fmt.Errorf("failed to add remote: %w", err)
}
fetchStderr := &bytes.Buffer{}
var remoteTreeRef string
if err := Call(stdout, fetchStderr, "git", "fetch", forkRemoteName, remoteBranch); err != nil && !strings.Contains(fetchStderr.String(), fmt.Sprintf("couldn't find remote ref %s", remoteBranch)) {
return fmt.Errorf("failed to fetch from fork: %w", err)
} else {
var err error
remoteTreeRef, err = getTreeRef(stderr, fmt.Sprintf("refs/remotes/%s/%s", forkRemoteName, remoteBranch))
if err != nil {
return fmt.Errorf("failed to get remote tree ref: %w", err)
}
}
localTreeRef, err := getTreeRef(stderr, "HEAD")
if err != nil {
return fmt.Errorf("failed to get local tree ref: %w", err)
}
// Avoid doing metadata-only pushes that re-trigger tests and remove lgtm
if localTreeRef != remoteTreeRef {
if err := GitPush(forkRemoteName, remoteBranch, stdout, stderr); err != nil {
return err
}
} else {
logrus.Info("Not pushing as up-to-date remote branch already exists")
}
return nil
}
// GitPush push the changes to the given remote and branch.
func GitPush(remote, remoteBranch string, stdout, stderr io.Writer) error {
logrus.Info("Pushing to remote...")
if err := Call(stdout, stderr, "git", "push", "-f", remote, fmt.Sprintf("HEAD:%s", remoteBranch)); err != nil {
return fmt.Errorf("failed to git push: %w", err)
}
return nil
}
func tagFromName(name string) string {
parts := strings.Split(name, ":")
if len(parts) < 2 {
return ""
}
return parts[1]
}
func componentFromName(name string) string {
s := strings.SplitN(strings.Split(name, ":")[0], "/", 3)
return s[len(s)-1]
}
func formatTagDate(d string) string {
if len(d) != 8 {
return d
}
// ‑ = U+2011 NON-BREAKING HYPHEN, to prevent line wraps.
return fmt.Sprintf("%s‑%s‑%s", d[0:4], d[4:6], d[6:8])
}
func generateSummary(name, repo, prefix string, summarise bool, images map[string]string) string {
type delta struct {
oldCommit string
newCommit string
oldDate string
newDate string
variant string
component string
}
versions := map[string][]delta{}
for image, newTag := range images {
if !strings.HasPrefix(image, prefix) {
continue
}
if strings.HasSuffix(image, ":"+newTag) {
continue
}
oldDate, oldCommit, oldVariant := imagebumper.DeconstructTag(tagFromName(image))
newDate, newCommit, _ := imagebumper.DeconstructTag(newTag)
k := oldCommit + ":" + newCommit
d := delta{
oldCommit: oldCommit,
newCommit: newCommit,
oldDate: oldDate,
newDate: newDate,
variant: oldVariant,
component: componentFromName(image),
}
versions[k] = append(versions[k], d)
}
switch {
case len(versions) == 0:
return fmt.Sprintf("No %s changes.", name)
case len(versions) == 1 && summarise:
for k, v := range versions {
s := strings.Split(k, ":")
return fmt.Sprintf("%s changes: %s/compare/%s...%s (%s → %s)", name, repo, s[0], s[1], formatTagDate(v[0].oldDate), formatTagDate(v[0].newDate))
}
default:
changes := make([]string, 0, len(versions))
for k, v := range versions {
s := strings.Split(k, ":")
names := make([]string, 0, len(v))
for _, d := range v {
names = append(names, d.component+d.variant)
}
sort.Strings(names)
changes = append(changes, fmt.Sprintf("%s/compare/%s...%s | %s → %s | %s",
repo, s[0], s[1], formatTagDate(v[0].oldDate), formatTagDate(v[0].newDate), strings.Join(names, ", ")))
}
sort.Slice(changes, func(i, j int) bool { return strings.Split(changes[i], "|")[1] < strings.Split(changes[j], "|")[1] })
return fmt.Sprintf("Multiple distinct %s changes:\n\nCommits | Dates | Images\n--- | --- | ---\n%s\n", name, strings.Join(changes, "\n"))
}
panic("unreachable!")
}
func generatePRBody(images map[string]string, assignment string) string {
prowSummary := generateSummary("Prow", prowRepo, prowPrefix, true, images)
testImagesSummary := generateSummary("test-image", testImageRepo, testImagePrefix, false, images)
return prowSummary + "\n\n" + testImagesSummary + "\n\n" + assignment + "\n"
}
func getAssignment(oncallAddress string) string {
if oncallAddress == "" {
return ""
}
req, err := http.Get(oncallAddress)
if err != nil {
return fmt.Sprintf(errOncallMsgTempl, err)
}
defer req.Body.Close()
if req.StatusCode != http.StatusOK {
return fmt.Sprintf(errOncallMsgTempl,
fmt.Sprintf("Error requesting oncall address: HTTP error %d: %q", req.StatusCode, req.Status))
}
oncall := struct {
Oncall struct {
TestInfra string `json:"testinfra"`
} `json:"Oncall"`
}{}
if err := json.NewDecoder(req.Body).Decode(&oncall); err != nil {
return fmt.Sprintf(errOncallMsgTempl, err)
}
curtOncall := oncall.Oncall.TestInfra
if curtOncall != "" {
return "/cc @" + curtOncall
}
return noOncallMsg
}
func getTreeRef(stderr io.Writer, refname string) (string, error) {
revParseStdout := &bytes.Buffer{}
if err := Call(revParseStdout, stderr, "git", "rev-parse", refname+":"); err != nil {
return "", fmt.Errorf("failed to parse ref: %w", err)
}
fields := strings.Fields(revParseStdout.String())
if n := len(fields); n < 1 {
return "", errors.New("got no otput when trying to rev-parse")
}
return fields[0], nil
}
| [
"\"BUILD_WORKSPACE_DIRECTORY\""
]
| []
| [
"BUILD_WORKSPACE_DIRECTORY"
]
| [] | ["BUILD_WORKSPACE_DIRECTORY"] | go | 1 | 0 | |
kfai_sql_chemistry/test/test_create_db_connection.py | import os
import unittest
from typing import Dict
from kfai_env import Environment
from kfai_sql_chemistry.db.database_config import DatabaseConfig
from kfai_sql_chemistry.db.engines import SQLEngineFactory
def setUpModule():
os.environ['ENV'] = 'TEST'
def tearDownModule():
os.environ['ENV'] = ''
class CreateDbConnectionTest(unittest.TestCase):
def setUp(self):
e = Environment('./kfai_sql_chemistry/test/env')
e.register_environment("TEST")
e.load_env()
@unittest.skip("Skip until we run databases in github actions")
def test_registration_and_access(self):
database_map: Dict[str, DatabaseConfig] = {
"main": DatabaseConfig.from_local_env("main")
}
factory = SQLEngineFactory()
factory.create_all_engines(database_map)
engine = factory.get_engine("main")
with engine.connect() as conn:
print(conn.execute("SELECT 1").fetchall())
| []
| []
| [
"ENV"
]
| [] | ["ENV"] | python | 1 | 0 | |
src/apache.go | package main
import (
"errors"
"fmt"
"net/url"
"os"
"time"
"github.com/newrelic/infra-integrations-sdk/data/metric"
"github.com/newrelic/infra-integrations-sdk/persist"
sdkArgs "github.com/newrelic/infra-integrations-sdk/args"
"github.com/newrelic/infra-integrations-sdk/integration"
"github.com/newrelic/infra-integrations-sdk/log"
)
type argumentList struct {
sdkArgs.DefaultArgumentList
StatusURL string `default:"http://127.0.0.1/server-status?auto" help:"Apache status-server URL."`
CABundleFile string `default:"" help:"Alternative Certificate Authority bundle file"`
CABundleDir string `default:"" help:"Alternative Certificate Authority bundle directory"`
RemoteMonitoring bool `default:"false" help:"Identifies the monitored entity as 'remote'. In doubt: set to true."`
ValidateCerts bool `default:"true" help:"If the status URL is HTTPS with a self-signed certificate, set this to false if you want to avoid certificate validation"`
}
const (
integrationName = "com.newrelic.apache"
integrationVersion = "1.5.0"
defaultHTTPTimeout = time.Second * 1
entityRemoteType = "server"
httpProtocol = `http`
httpsProtocol = `https`
httpDefaultPort = `80`
httpsDefaultPort = `443`
)
var args argumentList
func main() {
log.Debug("Starting Apache integration")
defer log.Debug("Apache integration exited")
i, err := createIntegration()
fatalIfErr(err)
log.SetupLogging(args.Verbose)
e, err := entity(i, args.StatusURL, args.RemoteMonitoring)
fatalIfErr(err)
if args.HasInventory() {
log.Debug("Fetching data for '%s' integration", integrationName+"-inventory")
fatalIfErr(setInventory(e.Inventory))
}
if args.HasMetrics() {
log.Debug("Fetching data for '%s' integration", integrationName+"-metrics")
hostname, port, err := parseStatusURL(args.StatusURL)
fatalIfErr(err)
ms := metricSet(e, "ApacheSample", hostname, port, args.RemoteMonitoring)
provider := &Status{
CABundleDir: args.CABundleDir,
CABundleFile: args.CABundleFile,
HTTPTimeout: defaultHTTPTimeout,
ValidateCerts: args.ValidateCerts,
}
fatalIfErr(getMetricsData(provider, ms))
}
fatalIfErr(i.Publish())
}
func entity(i *integration.Integration, statusURL string, remote bool) (*integration.Entity, error) {
if remote {
hostname, port, err := parseStatusURL(statusURL)
if err != nil {
return nil, err
}
n := fmt.Sprintf("%s:%s", hostname, port)
return i.Entity(n, entityRemoteType)
}
return i.LocalEntity(), nil
}
func metricSet(e *integration.Entity, eventType, hostname, port string, remote bool) *metric.Set {
if remote {
return e.NewMetricSet(
eventType,
metric.Attr("hostname", hostname),
metric.Attr("port", port),
)
}
return e.NewMetricSet(
eventType,
metric.Attr("port", port),
)
}
// parseStatusURL will extract the hostname and the port from the apache status URL.
func createIntegration() (*integration.Integration, error) {
cachePath := os.Getenv("NRIA_CACHE_PATH")
if cachePath == "" {
return integration.New(integrationName, integrationVersion, integration.Args(&args))
}
l := log.NewStdErr(args.Verbose)
s, err := persist.NewFileStore(cachePath, l, persist.DefaultTTL)
if err != nil {
return nil, err
}
return integration.New(integrationName, integrationVersion, integration.Args(&args), integration.Storer(s), integration.Logger(l))
}
// parseStatusURL will extract the hostname and the port from the nginx status URL.
func parseStatusURL(statusURL string) (hostname, port string, err error) {
u, err := url.Parse(statusURL)
if err != nil {
return
}
if !isHTTP(u) {
err = errors.New("unsupported protocol scheme")
return
}
hostname = u.Hostname()
if hostname == "" {
err = errors.New("http: no Host in request URL")
return
}
if u.Port() != "" {
port = u.Port()
} else if u.Scheme == httpsProtocol {
port = httpsDefaultPort
} else {
port = httpDefaultPort
}
return
}
// isHTTP is checking if the URL is http/s protocol.
func isHTTP(u *url.URL) bool {
return u.Scheme == httpProtocol || u.Scheme == httpsProtocol
}
func fatalIfErr(err error) {
if err != nil {
log.Fatal(err)
}
}
| [
"\"NRIA_CACHE_PATH\""
]
| []
| [
"NRIA_CACHE_PATH"
]
| [] | ["NRIA_CACHE_PATH"] | go | 1 | 0 | |
src/main/java/org/codinjutsu/tools/jenkins/ConfigFile.java | package org.codinjutsu.tools.jenkins;
import java.io.*;
import java.nio.charset.StandardCharsets;
public class ConfigFile {
private static final String JENKINSS_INTEGRATION_CFG = "jenkinsIntegrations.cfg";
private static String resourcesLocation = null;
public static String getResourcesLocation() {
if (isWindows()) {
File windowsHome = new File(System.getenv("USERPROFILE"));
File resourcesFolder = new File(windowsHome, ".jenkinsIntegrations");
if (!resourcesFolder.exists()) {
if (!resourcesFolder.mkdirs()) {
System.out.println("not create " + resourcesFolder.getName());
}
}
resourcesLocation = resourcesFolder.getAbsolutePath();
return resourcesLocation;
}
File userHomeDir = new File(getHome());
File resourcesFolder = new File(userHomeDir, ".jenkinsIntegrations");
resourcesLocation = resourcesFolder.getAbsolutePath();
return resourcesLocation;
}
private static String getConfigFilePath() {
File file = new File(getResourcesLocation(), ConfigFile.JENKINSS_INTEGRATION_CFG);
if (!file.exists()) {
try {
if (!file.createNewFile()) {
System.out.println("Not create " + file.getName());
}
} catch (IOException e) {
//
}
}
return file.getAbsolutePath();
}
private static String getHome() {
return System.getProperty("user.home");
}
public static String getString(String key) {
return get("Configs", key, ConfigFile.getConfigFilePath());
}
public static Integer getInteger(String key) {
String configs = get("Configs", key, ConfigFile.getConfigFilePath());
int i = 0;
try {
i = Integer.parseInt(configs);
} catch (NumberFormatException e) {
// return null;
}
return i;
}
public static boolean getBoolean(String key) {
String configs = get("Configs", key, ConfigFile.getConfigFilePath());
boolean b = false;
try {
b = Boolean.getBoolean(configs);
} catch (NumberFormatException e) {
// return null;
}
return b;
}
public static String get(String key) {
return get("Configs", key, ConfigFile.getConfigFilePath());
}
public static String get(String section, String key) {
return get(section, key, ConfigFile.getConfigFilePath());
}
public static String get(String section, String key, String file) {
String val = null;
try {
BufferedReader br = new BufferedReader(new FileReader(file));
String currentSection = "";
try {
String line = br.readLine();
while (line != null) {
if (line.trim().startsWith("[") && line.trim().endsWith("]")) {
currentSection = line.trim().substring(1, line.trim().length() - 1).toLowerCase();
} else {
if (section.toLowerCase().equals(currentSection)) {
String[] parts = line.split("=");
if (parts.length == 2 && parts[0].trim().equals(key)) {
val = parts[1].trim();
br.close();
return val;
}
}
}
line = br.readLine();
}
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
} catch (FileNotFoundException e1) { /* ignored */ }
return val;
}
public static void set(String key, Object value) {
set("Configs", key, value != null ? value.toString() : null);
}
public static void set(String section, String key, String val) {
String file = ConfigFile.getConfigFilePath();
StringBuilder contents = new StringBuilder();
try {
BufferedReader br = new BufferedReader(new FileReader(file));
try {
String currentSection = "";
String line = br.readLine();
boolean found = false;
while (line != null) {
if (line.trim().startsWith("[") && line.trim().endsWith("]")) {
if (section.toLowerCase().equals(currentSection) && !found) {
contents.append(key).append(" = ").append(val).append("\n");
found = true;
}
currentSection = line.trim().substring(1, line.trim().length() - 1).toLowerCase();
contents.append(line).append("\n");
} else {
if (section.toLowerCase().equals(currentSection)) {
String[] parts = line.split("=");
String currentKey = parts[0].trim();
if (currentKey.equals(key)) {
if (!found) {
contents.append(key).append(" = ").append(val).append("\n");
found = true;
}
} else {
contents.append(line).append("\n");
}
} else {
contents.append(line).append("\n");
}
}
line = br.readLine();
}
if (!found) {
if (!section.toLowerCase().equals(currentSection)) {
contents.append("[").append(section.toLowerCase()).append("]\n");
}
contents.append(key).append(" = ").append(val).append("\n");
}
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
} catch (FileNotFoundException e1) {
// cannot read config file, so create it
contents = new StringBuilder();
contents.append("[").append(section.toLowerCase()).append("]\n");
contents.append(key).append(" = ").append(val).append("\n");
}
PrintWriter writer = null;
try {
writer = new PrintWriter(file, StandardCharsets.UTF_8);
} catch (Exception e) {
e.printStackTrace();
}
if (writer != null) {
writer.print(contents);
writer.close();
}
}
public static boolean isWindows() {
return System.getProperty("os.name").contains("Windows");
}
}
| [
"\"USERPROFILE\""
]
| []
| [
"USERPROFILE"
]
| [] | ["USERPROFILE"] | java | 1 | 0 | |
cmd/controller/run.go | // Copyright 2020 VMware, Inc.
// SPDX-License-Identifier: Apache-2.0
package controller
import (
"fmt"
"net/http" // Pprof related
_ "net/http/pprof" // Pprof related
"os"
"strconv"
"time"
"github.com/go-logr/logr"
"github.com/vmware-tanzu/carvel-kapp-controller/cmd/controller/handlers"
"github.com/vmware-tanzu/carvel-kapp-controller/pkg/apiserver"
"github.com/vmware-tanzu/carvel-kapp-controller/pkg/reftracker"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // Initialize gcp client auth plugin
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
"sigs.k8s.io/controller-runtime/pkg/source"
kcv1alpha1 "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1"
pkgingv1alpha1 "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1"
kcclient "github.com/vmware-tanzu/carvel-kapp-controller/pkg/client/clientset/versioned"
kcconfig "github.com/vmware-tanzu/carvel-kapp-controller/pkg/config"
datapkgingv1alpha1 "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apiserver/apis/datapackaging/v1alpha1"
pkgclient "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apiserver/client/clientset/versioned"
)
const (
PprofListenAddr = "0.0.0.0:6060"
kappctrlAPIPORTEnvKey = "KAPPCTRL_API_PORT"
)
type Options struct {
Concurrency int
Namespace string
EnablePprof bool
APIRequestTimeout time.Duration
PackagingGloablNS string
}
// Based on https://github.com/kubernetes-sigs/controller-runtime/blob/8f633b179e1c704a6e40440b528252f147a3362a/examples/builtins/main.go
func Run(opts Options, runLog logr.Logger) {
runLog.Info("start controller")
runLog.Info("setting up manager")
restConfig := config.GetConfigOrDie()
if opts.APIRequestTimeout != 0 {
restConfig.Timeout = opts.APIRequestTimeout
}
mgr, err := manager.New(restConfig, manager.Options{Namespace: opts.Namespace, Scheme: kcconfig.Scheme})
if err != nil {
runLog.Error(err, "unable to set up overall controller manager")
os.Exit(1)
}
logProxies(runLog)
runLog.Info("setting up controller")
coreClient, err := kubernetes.NewForConfig(restConfig)
if err != nil {
runLog.Error(err, "building core client")
os.Exit(1)
}
kcClient, err := kcclient.NewForConfig(restConfig)
if err != nil {
runLog.Error(err, "building app client")
os.Exit(1)
}
kcConfig, err := kcconfig.GetConfig(coreClient)
if err != nil {
runLog.Error(err, "getting kapp-controller config")
os.Exit(1)
}
pkgClient, err := pkgclient.NewForConfig(restConfig)
if err != nil {
runLog.Error(err, "building app client")
os.Exit(1)
}
// assign bindPort to env var KAPPCTRL_API_PORT if available
var bindPort int
if apiPort, ok := os.LookupEnv(kappctrlAPIPORTEnvKey); ok {
var err error
if bindPort, err = strconv.Atoi(apiPort); err != nil {
runLog.Error(fmt.Errorf("%s environment variable must be an integer", kappctrlAPIPORTEnvKey), "reading server port")
os.Exit(1)
}
} else {
runLog.Error(fmt.Errorf("os call failed to read env var %s", kappctrlAPIPORTEnvKey), "reading server port")
os.Exit(1)
}
server, err := apiserver.NewAPIServer(restConfig, coreClient, kcClient, opts.PackagingGloablNS, bindPort)
if err != nil {
runLog.Error(err, "creating server")
os.Exit(1)
}
err = server.Run()
if err != nil {
runLog.Error(err, "starting server")
os.Exit(1)
}
refTracker := reftracker.NewAppRefTracker()
updateStatusTracker := reftracker.NewAppUpdateStatus()
appFactory := AppFactory{
coreClient: coreClient,
kcConfig: kcConfig,
appClient: kcClient,
}
{ // add controller for apps
schApp := handlers.NewSecretHandler(runLog, refTracker, updateStatusTracker)
cfgmhApp := handlers.NewConfigMapHandler(runLog, refTracker, updateStatusTracker)
ctrlAppOpts := controller.Options{
Reconciler: NewUniqueReconciler(&ErrReconciler{
delegate: NewAppsReconciler(kcClient, runLog.WithName("ar"), appFactory, refTracker, updateStatusTracker),
log: runLog.WithName("pr"),
}),
MaxConcurrentReconciles: opts.Concurrency,
}
ctrlApp, err := controller.New("kapp-controller-app", mgr, ctrlAppOpts)
if err != nil {
runLog.Error(err, "unable to set up kapp-controller-app")
os.Exit(1)
}
err = ctrlApp.Watch(&source.Kind{Type: &kcv1alpha1.App{}}, &handler.EnqueueRequestForObject{})
if err != nil {
runLog.Error(err, "unable to watch Apps")
os.Exit(1)
}
err = ctrlApp.Watch(&source.Kind{Type: &v1.Secret{}}, schApp)
if err != nil {
runLog.Error(err, "unable to watch Secrets")
os.Exit(1)
}
err = ctrlApp.Watch(&source.Kind{Type: &v1.ConfigMap{}}, cfgmhApp)
if err != nil {
runLog.Error(err, "unable to watch ConfigMaps")
os.Exit(1)
}
}
{ // add controller for PackageInstall
pkgInstallCtrlOpts := controller.Options{
Reconciler: &PackageInstallReconciler{
kcClient: kcClient,
pkgClient: pkgClient,
log: runLog.WithName("ipr"),
},
MaxConcurrentReconciles: opts.Concurrency,
}
pkgInstallCtrl, err := controller.New("kapp-controller-packageinstall", mgr, pkgInstallCtrlOpts)
if err != nil {
runLog.Error(err, "unable to set up kapp-controller-packageinstall")
os.Exit(1)
}
err = pkgInstallCtrl.Watch(&source.Kind{Type: &pkgingv1alpha1.PackageInstall{}}, &handler.EnqueueRequestForObject{})
if err != nil {
runLog.Error(err, "unable to watch *pkgingv1alpha1.PackageInstall")
os.Exit(1)
}
err = pkgInstallCtrl.Watch(&source.Kind{Type: &datapkgingv1alpha1.Package{}}, handlers.NewPackageInstallVersionHandler(kcClient, opts.PackagingGloablNS, runLog.WithName("handler")))
if err != nil {
runLog.Error(err, "unable to watch *datapkgingv1alpha1.Package for PackageInstall")
os.Exit(1)
}
err = pkgInstallCtrl.Watch(&source.Kind{Type: &kcv1alpha1.App{}}, &handler.EnqueueRequestForOwner{
OwnerType: &pkgingv1alpha1.PackageInstall{},
IsController: true,
})
if err != nil {
runLog.Error(err, "unable to watch *kcv1alpha1.App for PackageInstall")
os.Exit(1)
}
}
{ // add controller for pkgrepositories
schRepo := handlers.NewSecretHandler(runLog, refTracker, updateStatusTracker)
pkgRepositoriesCtrlOpts := controller.Options{
Reconciler: NewPkgRepositoryReconciler(kcClient, runLog.WithName("prr"), appFactory, refTracker, updateStatusTracker),
// TODO: Consider making this configurable for multiple PackageRepo reconciles
MaxConcurrentReconciles: 1,
}
pkgRepositoryCtrl, err := controller.New("kapp-controller-package-repository", mgr, pkgRepositoriesCtrlOpts)
if err != nil {
runLog.Error(err, "unable to set up kapp-controller-package-repository")
os.Exit(1)
}
err = pkgRepositoryCtrl.Watch(&source.Kind{Type: &pkgingv1alpha1.PackageRepository{}}, &handler.EnqueueRequestForObject{})
if err != nil {
runLog.Error(err, "unable to watch *pkgingv1alpha1.PackageRepository")
os.Exit(1)
}
err = pkgRepositoryCtrl.Watch(&source.Kind{Type: &v1.Secret{}}, schRepo)
if err != nil {
runLog.Error(err, "unable to watch Secrets")
os.Exit(1)
}
}
runLog.Info("starting manager")
if opts.EnablePprof {
runLog.Info("DANGEROUS in production setting -- pprof running", "listen-addr", PprofListenAddr)
go func() {
runLog.Error(http.ListenAndServe(PprofListenAddr, nil), "serving pprof")
}()
}
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
runLog.Error(err, "unable to run manager")
os.Exit(1)
}
runLog.Info("Exiting")
server.Stop()
os.Exit(0)
}
func logProxies(runLog logr.Logger) {
if proxyVal := os.Getenv("http_proxy"); proxyVal != "" {
runLog.Info(fmt.Sprintf("Using http proxy '%s'", proxyVal))
}
if proxyVal := os.Getenv("https_proxy"); proxyVal != "" {
runLog.Info(fmt.Sprintf("Using https proxy '%s'", proxyVal))
}
if noProxyVal := os.Getenv("no_proxy"); noProxyVal != "" {
runLog.Info(fmt.Sprintf("No proxy set for: %s", noProxyVal))
}
}
| [
"\"http_proxy\"",
"\"https_proxy\"",
"\"no_proxy\""
]
| []
| [
"http_proxy",
"no_proxy",
"https_proxy"
]
| [] | ["http_proxy", "no_proxy", "https_proxy"] | go | 3 | 0 | |
pkg/config/config.go | /*
Package config implements a simple library for config.
*/
package config
import (
"io/ioutil"
"os"
"strings"
"github.com/pkg/errors"
"github.com/spf13/viper"
)
//Config interface
type Config interface {
GetCacheConfig() *CacheConfig
GetDBConfig() *DatabaseConfig
}
//HostConfig host
type HostConfig struct {
Address string
Port string
}
//CacheConfig cache
type CacheConfig struct {
Dialect string
Host string
Port string
URL string
}
//DatabaseConfig db
type DatabaseConfig struct {
Dialect string
DBName string
UserName string
Password string
URL string
}
type config struct {
docker string
Host HostConfig
Cache CacheConfig
Database DatabaseConfig
}
//var configChange = make(chan int, 1)
//NewConfig instance
func NewConfig() (*config, error) {
var err error
var config = new(config)
env := os.Getenv("CONTACTENV")
if env == "dev" {
config.Database.UserName = os.Getenv("MONGOUSERNAME")
buf, err := ioutil.ReadFile(os.Getenv("MONGOPWD"))
if err != nil {
panic(errors.New("read the env var fail"))
}
config.Database.Password = strings.TrimSpace(string(buf))
config.Database.URL = os.Getenv("MONGOURL")
config.Cache.URL = os.Getenv("REDISURL")
} else if env == "local" || env == "" {
fileName := "local.config"
config, err = local(fileName)
} else if env == "remote" {
fileName := "remote.config"
config, err = remote(fileName)
} else {
panic(errors.New("env var is invalid"))
}
//WatchConfig(configChange)
return config, err
}
func local(fileName string) (*config, error) {
path := os.Getenv("CONFIGPATH")
if path == "" {
path = "configs"
}
v := viper.New()
config := new(config)
v.SetConfigType("json")
v.SetConfigName(fileName)
v.AddConfigPath(path)
err := v.ReadInConfig()
if err != nil {
panic(err)
}
err = v.Unmarshal(config)
if err != nil {
panic(err)
}
return config, err
}
func remote(fileName string) (config *config, err error) {
path := os.Getenv("CONFIGPATH")
if path == "" {
path = "configs"
}
v := viper.New()
v.SetConfigType("json")
err = v.AddRemoteProvider("etcd", "http://127.0.0.1:4001", path+fileName+".json")
if err != nil {
panic(err)
}
err = v.ReadRemoteConfig()
if err != nil {
panic(err)
}
err = v.Unmarshal(config)
if err != nil {
panic(err)
}
return
}
func (c *config) GetCacheConfig() *CacheConfig {
return &c.Cache
}
func (c *config) GetDBConfig() *DatabaseConfig {
return &c.Database
}
/*func WatchConfig(change chan int) {
viper.WatchConfig()
viper.OnConfigChange(func(e fsnotify.Event) {
logrus.Infof("config changed: %s", e.Name)
if err := viper.ReadInConfig(); err != nil {
logrus.Warnf("read config fail after changing config")
return
}
change <- 1
})
}*/
| [
"\"CONTACTENV\"",
"\"MONGOUSERNAME\"",
"\"MONGOPWD\"",
"\"MONGOURL\"",
"\"REDISURL\"",
"\"CONFIGPATH\"",
"\"CONFIGPATH\""
]
| []
| [
"CONTACTENV",
"MONGOURL",
"REDISURL",
"MONGOUSERNAME",
"CONFIGPATH",
"MONGOPWD"
]
| [] | ["CONTACTENV", "MONGOURL", "REDISURL", "MONGOUSERNAME", "CONFIGPATH", "MONGOPWD"] | go | 6 | 0 | |
cmd/abapEnvironmentCreateSystem_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type abapEnvironmentCreateSystemOptions struct {
CfAPIEndpoint string `json:"cfApiEndpoint,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
CfOrg string `json:"cfOrg,omitempty"`
CfSpace string `json:"cfSpace,omitempty"`
CfService string `json:"cfService,omitempty"`
CfServicePlan string `json:"cfServicePlan,omitempty"`
CfServiceInstance string `json:"cfServiceInstance,omitempty"`
ServiceManifest string `json:"serviceManifest,omitempty"`
AbapSystemAdminEmail string `json:"abapSystemAdminEmail,omitempty"`
AbapSystemDescription string `json:"abapSystemDescription,omitempty"`
AbapSystemIsDevelopmentAllowed bool `json:"abapSystemIsDevelopmentAllowed,omitempty"`
AbapSystemID string `json:"abapSystemID,omitempty"`
AbapSystemSizeOfPersistence int `json:"abapSystemSizeOfPersistence,omitempty"`
AbapSystemSizeOfRuntime int `json:"abapSystemSizeOfRuntime,omitempty"`
AddonDescriptorFileName string `json:"addonDescriptorFileName,omitempty"`
IncludeAddon bool `json:"includeAddon,omitempty"`
}
// AbapEnvironmentCreateSystemCommand Creates a SAP Cloud Platform ABAP Environment system (aka Steampunk system)
func AbapEnvironmentCreateSystemCommand() *cobra.Command {
const STEP_NAME = "abapEnvironmentCreateSystem"
metadata := abapEnvironmentCreateSystemMetadata()
var stepConfig abapEnvironmentCreateSystemOptions
var startTime time.Time
var createAbapEnvironmentCreateSystemCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Creates a SAP Cloud Platform ABAP Environment system (aka Steampunk system)",
Long: `creates a SAP Cloud Platform ABAP Environment system (aka Steampunk system)`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
abapEnvironmentCreateSystem(stepConfig, &telemetryData)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addAbapEnvironmentCreateSystemFlags(createAbapEnvironmentCreateSystemCmd, &stepConfig)
return createAbapEnvironmentCreateSystemCmd
}
func addAbapEnvironmentCreateSystemFlags(cmd *cobra.Command, stepConfig *abapEnvironmentCreateSystemOptions) {
cmd.Flags().StringVar(&stepConfig.CfAPIEndpoint, "cfApiEndpoint", `https://api.cf.eu10.hana.ondemand.com`, "Cloud Foundry API endpoint")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User or E-Mail for CF")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for Cloud Foundry User")
cmd.Flags().StringVar(&stepConfig.CfOrg, "cfOrg", os.Getenv("PIPER_cfOrg"), "Cloud Foundry org")
cmd.Flags().StringVar(&stepConfig.CfSpace, "cfSpace", os.Getenv("PIPER_cfSpace"), "Cloud Foundry Space")
cmd.Flags().StringVar(&stepConfig.CfService, "cfService", os.Getenv("PIPER_cfService"), "Parameter for Cloud Foundry Service to be used for creating Cloud Foundry Service")
cmd.Flags().StringVar(&stepConfig.CfServicePlan, "cfServicePlan", os.Getenv("PIPER_cfServicePlan"), "Parameter for Cloud Foundry Service Plan to be used when creating a Cloud Foundry Service")
cmd.Flags().StringVar(&stepConfig.CfServiceInstance, "cfServiceInstance", os.Getenv("PIPER_cfServiceInstance"), "Parameter for naming the Service Instance when creating a Cloud Foundry Service")
cmd.Flags().StringVar(&stepConfig.ServiceManifest, "serviceManifest", os.Getenv("PIPER_serviceManifest"), "Path to Cloud Foundry Service Manifest in YAML format for multiple service creations that are being passed to a Create-Service-Push Cloud Foundry cli plugin")
cmd.Flags().StringVar(&stepConfig.AbapSystemAdminEmail, "abapSystemAdminEmail", os.Getenv("PIPER_abapSystemAdminEmail"), "Admin E-Mail address for the initial administrator of the system")
cmd.Flags().StringVar(&stepConfig.AbapSystemDescription, "abapSystemDescription", `Test system created by an automated pipeline`, "Description for the ABAP Environment system")
cmd.Flags().BoolVar(&stepConfig.AbapSystemIsDevelopmentAllowed, "abapSystemIsDevelopmentAllowed", true, "This parameter determines, if development is allowed on the system")
cmd.Flags().StringVar(&stepConfig.AbapSystemID, "abapSystemID", `H02`, "The three character name of the system - maps to 'sapSystemName'")
cmd.Flags().IntVar(&stepConfig.AbapSystemSizeOfPersistence, "abapSystemSizeOfPersistence", 0, "The size of the persistence")
cmd.Flags().IntVar(&stepConfig.AbapSystemSizeOfRuntime, "abapSystemSizeOfRuntime", 0, "The size of the runtime")
cmd.Flags().StringVar(&stepConfig.AddonDescriptorFileName, "addonDescriptorFileName", os.Getenv("PIPER_addonDescriptorFileName"), "The file name of the addonDescriptor")
cmd.Flags().BoolVar(&stepConfig.IncludeAddon, "includeAddon", false, "Must be set to true to install the addon provided via 'addonDescriptorFileName'")
cmd.MarkFlagRequired("cfApiEndpoint")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("cfOrg")
cmd.MarkFlagRequired("cfSpace")
}
// retrieve step metadata
func abapEnvironmentCreateSystemMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "abapEnvironmentCreateSystem",
Aliases: []config.Alias{},
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "cfApiEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "cloudFoundry/apiEndpoint"}},
},
{
Name: "username",
ResourceRef: []config.ResourceReference{
{
Name: "cfCredentialsId",
Param: "username",
Type: "secret",
},
{
Name: "",
Paths: []string{"$(vaultPath)/cloudfoundry-$(cfOrg)-$(cfSpace)", "$(vaultBasePath)/$(vaultPipelineName)/cloudfoundry-$(cfOrg)-$(cfSpace)", "$(vaultBasePath)/GROUP-SECRETS/cloudfoundry-$(cfOrg)-$(cfSpace)"},
Type: "vaultSecret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "password",
ResourceRef: []config.ResourceReference{
{
Name: "cfCredentialsId",
Param: "password",
Type: "secret",
},
{
Name: "",
Paths: []string{"$(vaultPath)/cloudfoundry-$(cfOrg)-$(cfSpace)", "$(vaultBasePath)/$(vaultPipelineName)/cloudfoundry-$(cfOrg)-$(cfSpace)", "$(vaultBasePath)/GROUP-SECRETS/cloudfoundry-$(cfOrg)-$(cfSpace)"},
Type: "vaultSecret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "cfOrg",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "cloudFoundry/org"}},
},
{
Name: "cfSpace",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "cloudFoundry/space"}},
},
{
Name: "cfService",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/service"}},
},
{
Name: "cfServicePlan",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/servicePlan"}},
},
{
Name: "cfServiceInstance",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceInstance"}},
},
{
Name: "serviceManifest",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceManifest"}, {Name: "cfServiceManifest"}},
},
{
Name: "abapSystemAdminEmail",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "abapSystemDescription",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "abapSystemIsDevelopmentAllowed",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "abapSystemID",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "abapSystemSizeOfPersistence",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "int",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "abapSystemSizeOfRuntime",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "int",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "addonDescriptorFileName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "includeAddon",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
},
},
},
}
return theMetaData
}
| [
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_cfOrg\"",
"\"PIPER_cfSpace\"",
"\"PIPER_cfService\"",
"\"PIPER_cfServicePlan\"",
"\"PIPER_cfServiceInstance\"",
"\"PIPER_serviceManifest\"",
"\"PIPER_abapSystemAdminEmail\"",
"\"PIPER_addonDescriptorFileName\""
]
| []
| [
"PIPER_cfSpace",
"PIPER_cfServicePlan",
"PIPER_password",
"PIPER_cfService",
"PIPER_username",
"PIPER_addonDescriptorFileName",
"PIPER_cfServiceInstance",
"PIPER_cfOrg",
"PIPER_abapSystemAdminEmail",
"PIPER_serviceManifest"
]
| [] | ["PIPER_cfSpace", "PIPER_cfServicePlan", "PIPER_password", "PIPER_cfService", "PIPER_username", "PIPER_addonDescriptorFileName", "PIPER_cfServiceInstance", "PIPER_cfOrg", "PIPER_abapSystemAdminEmail", "PIPER_serviceManifest"] | go | 10 | 0 | |
example.py | # MIT License
#
# Copyright (c) 2020 Genesis Cloud Ltd. <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# Oz Tiram <[email protected]>
"""
An example script to show how to start a Genesis Cloud GPU instance
with custom user data to install the NVIDIA GPU driver.
Grab your API key from the UI and save it in a safe place.
on the shell before running this script
$ export GENESISCLOUD_API_KEY=secretkey
"""
import os
import textwrap
import time
import subprocess as sp
from genesiscloud.client import Client, INSTANCE_TYPES
def simple_startup_script():
"""see the documentation of cloud init"""
return textwrap.dedent("""
#cloud-config
hostname: mytestubuntu
runcmd:
- [ "apt", "install", "-y", "vim" ]
""")
def get_startup_script():
return """#!/bin/bash
set -eux
IS_INSTALLED=false
NVIDIA_SHORT_VERSION=430
manual_fetch_install() {
__nvidia_full_version="430_430.50-0ubuntu2"
for i in $(seq 1 5)
do
echo "Connecting to http://archive.ubuntu.com site for $i time"
if curl -s --head --request GET http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-"${NVIDIA_SHORT_VERSION}" | grep "HTTP/1.1" > /dev/null ;
then
echo "Connected to http://archive.ubuntu.com. Start downloading and installing the NVIDIA driver..."
__tempdir="$(mktemp -d)"
apt-get install -y --no-install-recommends "linux-headers-$(uname -r)" dkms
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-kernel-common-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-kernel-source-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-dkms-${__nvidia_full_version}_amd64.deb
dpkg -i "${__tempdir}"/nvidia-kernel-common-${__nvidia_full_version}_amd64.deb "${__tempdir}"/nvidia-kernel-source-${__nvidia_full_version}_amd64.deb "${__tempdir}"/nvidia-dkms-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-utils-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/libnvidia-compute-${__nvidia_full_version}_amd64.deb
dpkg -i "${__tempdir}"/nvidia-utils-${__nvidia_full_version}_amd64.deb "${__tempdir}"/libnvidia-compute-${__nvidia_full_version}_amd64.deb
IS_INSTALLED=true
rm -r "${__tempdir}"
break
fi
sleep 2
done
}
apt_fetch_install() {
add-apt-repository -s -u -y restricted
# Ubuntu has only a single version in the repository marked as "latest" of
# this series.
for _ in $(seq 1 5)
do
if apt-get install -y --no-install-recommends nvidia-utils-${NVIDIA_SHORT_VERSION} libnvidia-compute-${NVIDIA_SHORT_VERSION} \
nvidia-kernel-common-${NVIDIA_SHORT_VERSION} \
nvidia-kernel-source-${NVIDIA_SHORT_VERSION} \
nvidia-dkms-${NVIDIA_SHORT_VERSION} \
"linux-headers-$(uname -r)" dkms; then
IS_INSTALLED=true
break
fi
sleep 2
done
}
main() {
apt-get update
if grep xenial /etc/os-release; then
manual_fetch_install
else
apt_fetch_install
fi
# remove the module if it is inserted, blacklist it
rmmod nouveau || echo "nouveau kernel module not loaded ..."
echo "blacklist nouveau" > /etc/modprobe.d/nouveau.conf
# log insertion of the nvidia module
# this should always succeed on customer instances
if modprobe -vi nvidia; then
nvidia-smi
modinfo nvidia
gpu_found=true
else
gpu_found=false
fi
if [ "${IS_INSTALLED}" = true ]; then
echo "NVIDIA driver has been successfully installed."
else
echo "NVIDIA driver has NOT been installed."
fi
if [ "${gpu_found}" ]; then
echo "NVIDIA GPU device is found and ready"
else
echo "WARNING: NVIDIA GPU device is not found or is failed"
fi
}
main
"""
def create_instance():
client = Client(os.getenv("GENESISCLOUD_API_KEY"))
# before we continue to create objects, we check that we can communicate
# with the API, if the connect method does not succeed it will throw an
# error and the script will terminate
if client.connect():
pass
# To create an instance you will need an SSH public key.
# Upload it via the Web UI, you can now find it with.
# replace this to match your key
SSHKEYNAME = 'YourKeyName'
# genesiscloud.client.Resource.find methods returns generators - that is,
# they are lazy per-default.
sshkey_gen = client.SSHKeys.find({"name": SSHKEYNAME})
sshkey = list(sshkey_gen)[0]
# You need to tell the client which OS should be used for your instance
# One can use a snapshot or a base-os to create a new instance
ubuntu_18 = [image for image in client.Images.find({"name": 'Ubuntu 18.04'})][0]
# choose the most simple instance type
# to see the instance properties, use
# list(INSTANCE_TYPES.items())[0]
#
# ('vcpu-4_memory-12g_disk-80g_nvidia1080ti-1',
# {'vCPUs': 4, 'RAM': 12, 'Disk': 80, 'GPU': 1})
instace_type = list(INSTANCE_TYPES.keys())[0]
# To create an instace use Instances.create
# You must pass a ssh key to SSH into the machine. Currently, only one
# SSH key is supported. If you need more use the command
# `ssh-import-id-gh oz123`
# it can fetch public key from github.com/oz123.keys
# *Obviously* __replace__ my user name with YOURS or anyone you TRUST.
# You should put this in the user_data script. You can add this in the
# text block that the function `get_startup_script` returns.
# NOTE:
# you can also create an instance with SSH password enabled, but you should
# prefer SSH key authentication. If you choose to use password, you should
# not pass ssh_keys
my_instance = client.Instances.create(
name="demo",
hostname="demo",
ssh_keys=[sshkey.id], # comment this to enable password
image=ubuntu_18.id,
type=instace_type,
metadata={"startup_script":
simple_startup_script()},
#password="yourSekretPassword#12!"
)
# my_instance is a dictionary containing information about the instance
# that was just created.
print(my_instance)
while my_instance['status'] != 'active':
time.sleep(1)
my_instance = client.Instances.get(my_instance.id)
print(f"{my_instance['status']}\r", end="")
print("")
# yay! the instance is active
# let's ssh to the public IP of the instance
public_ip = my_instance.public_ip
print(f"The ssh address of the Instance is: {public_ip}")
# wait for ssh to become available, this returns exit code other
# than 0 as long the ssh connection isn't available
while sp.run(
("ssh -l ubuntu -o StrictHostKeyChecking=accept-new "
"-o ConnectTimeout=50 "
f"{public_ip} hostname"), shell=True).returncode:
time.sleep(1)
print("Congratulations! You genesiscloud instance has been created!")
print("You can ssh to it with:")
print(f"ssh -l ubuntu {public_ip}")
print("Some interesting commands to try at first:")
print("cloud-init stats # if this is still running, NVIDIA driver is still"
" installing")
print("use the following to see cloud-init output in real time:")
print("sudo tail -f /var/log/cloud-init-output.log")
return my_instance
def destroy(instance_id):
# finally destory this instance, when you no longer need it
client = Client(os.getenv("GENESISCLOUD_API_KEY"))
client.Instances.delete(id=instance_id)
if __name__ == "__main__":
instance = create_instance()
instance_id = instance['id']
# destroy(instance_id)
| []
| []
| [
"GENESISCLOUD_API_KEY"
]
| [] | ["GENESISCLOUD_API_KEY"] | python | 1 | 0 | |
test/config.py | import os
import unittest
from unittest.mock import patch
from configparser import ConfigParser
import uuid
from kb_faprotax.kb_faprotaxServer import MethodContext
from installed_clients.authclient import KBaseAuth as _KBaseAuth
from installed_clients.WorkspaceClient import Workspace
from kb_faprotax.kb_faprotaxImpl import kb_faprotax
######################################
DO_PATCH = False
if DO_PATCH:
patch_ = patch
patch_dict_ = patch.dict
else:
patch_ = lambda *args, **kwargs: lambda f: f
patch_dict_ = lambda *args, **kwargs: lambda f: f
######################################
####################################################################################################
####################################################################################################
class BaseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = os.environ.get('KB_AUTH_TOKEN', None)
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('kb_faprotax'):
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'kb_faprotax',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = Workspace(cls.wsURL)
cls.wsName = 'kb_faprotax_' + str(uuid.uuid4())
cls.wsId = cls.wsClient.create_workspace({'workspace': cls.wsName})[0]
cls.ws = {
'workspace_id': cls.wsId,
'workspace_name': cls.wsName,
}
cls.serviceImpl = kb_faprotax(cls.cfg)
cls.shared_folder = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
@classmethod
def tearDownClass(cls):
print('BaseTest tearDownClass')
#
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace deleted')
def shortDescription(self):
return None
| []
| []
| [
"SDK_CALLBACK_URL",
"KB_AUTH_TOKEN",
"KB_DEPLOYMENT_CONFIG"
]
| [] | ["SDK_CALLBACK_URL", "KB_AUTH_TOKEN", "KB_DEPLOYMENT_CONFIG"] | python | 3 | 0 | |
soracom/generated/cmd/event_handlers_unignore.go | // Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"fmt"
"net/url"
"os"
"github.com/spf13/cobra"
)
// EventHandlersUnignoreCmdHandlerId holds value of 'handler_id' option
var EventHandlersUnignoreCmdHandlerId string
// EventHandlersUnignoreCmdImsi holds value of 'imsi' option
var EventHandlersUnignoreCmdImsi string
func init() {
EventHandlersUnignoreCmd.Flags().StringVar(&EventHandlersUnignoreCmdHandlerId, "handler-id", "", TRAPI("handler_id"))
EventHandlersUnignoreCmd.Flags().StringVar(&EventHandlersUnignoreCmdImsi, "imsi", "", TRAPI("imsi"))
EventHandlersCmd.AddCommand(EventHandlersUnignoreCmd)
}
// EventHandlersUnignoreCmd defines 'unignore' subcommand
var EventHandlersUnignoreCmd = &cobra.Command{
Use: "unignore",
Short: TRAPI("/event_handlers/{handler_id}/subscribers/{imsi}/ignore:delete:summary"),
Long: TRAPI(`/event_handlers/{handler_id}/subscribers/{imsi}/ignore:delete:description`),
RunE: func(cmd *cobra.Command, args []string) error {
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectEventHandlersUnignoreCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectEventHandlersUnignoreCmdParams(ac *apiClient) (*apiParams, error) {
if EventHandlersUnignoreCmdHandlerId == "" {
return nil, fmt.Errorf("required parameter '%s' is not specified", "handler-id")
}
if EventHandlersUnignoreCmdImsi == "" {
return nil, fmt.Errorf("required parameter '%s' is not specified", "imsi")
}
return &apiParams{
method: "DELETE",
path: buildPathForEventHandlersUnignoreCmd("/event_handlers/{handler_id}/subscribers/{imsi}/ignore"),
query: buildQueryForEventHandlersUnignoreCmd(),
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForEventHandlersUnignoreCmd(path string) string {
escapedHandlerId := url.PathEscape(EventHandlersUnignoreCmdHandlerId)
path = strReplace(path, "{"+"handler_id"+"}", escapedHandlerId, -1)
escapedImsi := url.PathEscape(EventHandlersUnignoreCmdImsi)
path = strReplace(path, "{"+"imsi"+"}", escapedImsi, -1)
return path
}
func buildQueryForEventHandlersUnignoreCmd() url.Values {
result := url.Values{}
return result
}
| [
"\"SORACOM_VERBOSE\""
]
| []
| [
"SORACOM_VERBOSE"
]
| [] | ["SORACOM_VERBOSE"] | go | 1 | 0 | |
src/app.py | import os
import re
import random
from chalice import Chalice, Cron
from chalicelib.gitlab import GitlabClient
from chalicelib.slack import SlackClient
AUTHOS_LIST_IDS = os.environ.get('AUTHOS_LIST_IDS', '')
AUTHOS_LIST_IDS = re.findall(r'\d+', AUTHOS_LIST_IDS)
GITLAB_REPO_LIST_IDS = os.environ.get('GITLAB_REPO_LIST_IDS', '')
GITLAB_REPO_LIST_IDS = re.findall(r'\d+', GITLAB_REPO_LIST_IDS)
class Notification(object):
AUTHORS = AUTHOS_LIST_IDS
def send_to_slack(self):
data = self._format_data()
SlackClient().message(data)
def _message(self):
MESSAGES = [
"Fala meus consagrados, temos vários MR pendentes vamos ajudar!",
"E aí meus cumpadis, vamos olhar um MR hoje?",
"Estamos comemorando bar-mitza desses MR, vamos ajudar?",
"Jovens e jovas, seguem os MR em aberto de hoje",
"O povo bunito! Vamos ver um MR pra liberar os amiguinhos!",
"A cada 15 minutos, um MR é esquecido no Brasil! Vamos contribuir!",
"Faça como eu, revise os MR dos seus amigos!",
"Olha o ronaldinho!",
"Olhas os MRs aí gente!",
]
return random.choice(MESSAGES)
def _format_data(self):
data = {
"text": self._message(),
"username": "Fabiao",
"mrkdwn": True,
"attachments": []
}
for project_id in GITLAB_REPO_LIST_IDS:
for item in GitlabClient().project_merge_requests(project_id=project_id, scope='all', state='opened'):
# text = item.get('description')[:40]
if item.get('title').startswith('WIP'):
continue
data['attachments'].append({
"title": '{} (#{})'.format(item.get('title'), item.get('id')),
"text": "<https://gitlab.com/{0}|{0}>".format(
item.get('references').get('full').split('!')[0],
),
"title_link": item.get('web_url'),
"footer": 'criado por {}'.format(item.get('author').get('name')),
"footer_icon": item.get('author').get('avatar_url'),
})
return data
app = Chalice(app_name='connect')
@app.schedule(Cron(0, '12,17,20', '?', '*', 'MON-FRI', '*'))
def send_to_slack(event):
Notification().send_to_slack()
| []
| []
| [
"AUTHOS_LIST_IDS",
"GITLAB_REPO_LIST_IDS"
]
| [] | ["AUTHOS_LIST_IDS", "GITLAB_REPO_LIST_IDS"] | python | 2 | 0 | |
AgentServer/settings.py | """
Django settings for AgentServer project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import sys
from configparser import ConfigParser
from urllib.parse import urljoin
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
config = ConfigParser()
config.read(os.path.join(BASE_DIR, 'conf/config.ini'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u2^jmdc^l#=uz&r765fb4nyo)k*)0%tk3%yp*xf#i8b%(+-&vj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get("debug", 'false') == 'true'
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'dongtai',
'apiserver',
'drf_spectacular'
]
#
SPECTACULAR_SETTINGS = {
'TITLE': 'Your Project API',
'DESCRIPTION': 'Your project description',
'VERSION': '1.0.0',
# OTHER SETTINGS
}
REST_FRAMEWORK = {
'PAGE_SIZE': 20,
'DEFAULT_PAGINATION_CLASS': ['django.core.paginator'],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
],
# swagger setting
'DEFAULT_SCHEMA_CLASS': 'drf_spectacular.openapi.AutoSchema',
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'AgentServer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'AgentServer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
if len(sys.argv) > 1 and sys.argv[1] == 'test':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {'charset': 'utf8mb4'},
'USER': config.get("mysql", 'user'),
'NAME': config.get("mysql", 'name'),
'PASSWORD': config.get("mysql", 'password'),
'HOST': config.get("mysql", 'host'),
'PORT': config.get("mysql", 'port'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_USER_MODEL = 'dongtai.User'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# 配置RSA加解密需要的公钥、私钥路径
PRIVATE_KEY = os.path.join(BASE_DIR, 'config', 'rsa_keys/private_key.pem')
PUBLIC_KEY = os.path.join(BASE_DIR, 'config', 'rsa_keys/public_key.pem')
ENGINE_URL = config.get("engine", "url")
HEALTH_ENGINE_URL = urljoin(ENGINE_URL, "/api/engine/health")
BASE_ENGINE_URL = config.get("engine", "url") + '/api/engine/run?method_pool_id={id}'
REPLAY_ENGINE_URL = config.get("engine", "url") + '/api/engine/run?method_pool_id={id}&model=replay'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '{levelname} {asctime} [{module}.{funcName}:{lineno}] {message}',
'style': '{',
},
'simple': {
'format': '{levelname} {message}',
'style': '{',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'dongtai.openapi': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/openapi.log'),
'backupCount': 5,
'maxBytes': 1024 * 1024 * 10,
'formatter': 'verbose',
'encoding':'utf8',
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
},
'dongtai.openapi': {
'handlers': ['console', 'dongtai.openapi'],
'propagate': True,
'level': 'INFO',
},
'dongtai-core': {
'handlers': ['console', 'dongtai.openapi'],
'propagate': True,
'level': 'INFO',
},
}
}
# 配置阿里云OSS访问凭证
ACCESS_KEY = config.get('aliyun_oss', 'access_key')
ACCESS_KEY_SECRET = config.get('aliyun_oss', 'access_key_secret')
BUCKET_URL = 'https://oss-cn-beijing.aliyuncs.com'
BUCKET_NAME = 'dongtai'
BUCKET_NAME_BASE_URL = 'agent/' if os.getenv('active.profile',
None) != 'TEST' else 'agent_test/'
# CONST
PENDING = 1
VERIFYING = 2
CONFIRMED = 3
IGNORE = 4
SOLVED = 5
if os.getenv('active.profile', None) == 'TEST' or os.getenv('PYTHONAGENT', None) == 'TRUE':
MIDDLEWARE.append('dongtai_agent_python.middlewares.django_middleware.FireMiddleware')
| []
| []
| [
"active.profile",
"debug",
"PYTHONAGENT"
]
| [] | ["active.profile", "debug", "PYTHONAGENT"] | python | 3 | 0 | |
concordion/src/test/java/nl/knaw/huc/textrepo/Config.java | package nl.knaw.huc.textrepo;
import java.util.List;
import static com.google.common.collect.Lists.newArrayList;
import static java.lang.String.format;
import static java.lang.System.getenv;
import static org.apache.commons.lang3.StringUtils.isBlank;
public class Config {
public static final String HTTP_ES_HOST = "http://" + requireNonBlank("ES_HOST");
public static final String HTTP_APP_HOST = "http://" + requireNonBlank("APP_HOST");
public static final String HTTP_APP_HOST_ADMIN = "http://" + requireNonBlank("APP_HOST_ADMIN");
public static final String POSTGRES_PASSWORD = requireNonBlank("POSTGRES_PASSWORD");
public static final String POSTGRES_DB = requireNonBlank("POSTGRES_DB");
public static final String POSTGRES_USER = requireNonBlank("POSTGRES_USER");
public static final String POSTGRES_HOST = requireNonBlank("POSTGRES_HOST");
public static final String FULL_TEXT_INDEX = requireNonBlank("FULL_TEXT_INDEX");
public static final String CUSTOM_INDEX = requireNonBlank("CUSTOM_INDEX");
public static final String AUTOCOMPLETE_INDEX = requireNonBlank("AUTOCOMPLETE_INDEX");
public static final String FILE_INDEX = requireNonBlank("FILE_INDEX");
public static final String HOST = HTTP_APP_HOST;
public static final String TYPES_URL = HOST + "/rest/types";
public static final List<String> INDICES = newArrayList(
FULL_TEXT_INDEX,
CUSTOM_INDEX,
AUTOCOMPLETE_INDEX,
FILE_INDEX
);
public static final String TEXT_TYPE = "text";
public static final String TEXT_MIMETYPE = "text/plain";
public static final String FOO_TYPE = "foo";
public static final String FOO_MIMETYPE = "foo/bar";
private static String requireNonBlank(String field) {
final var value = getenv(field);
if (isBlank(value)) {
throw new RuntimeException(format("Env var [%s] is not set", field));
}
return value;
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
clients/java/zts/core/src/main/java/com/yahoo/athenz/zts/ZTSClient.java | /*
* Copyright 2016 Yahoo Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.zts;
import java.io.Closeable;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.PrivateKey;
import java.security.cert.Certificate;
import java.security.cert.CertificateParsingException;
import java.security.cert.X509Certificate;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLPeerUnverifiedException;
import javax.net.ssl.SSLSession;
import javax.ws.rs.client.Client;
import javax.ws.rs.client.ClientBuilder;
import com.fasterxml.jackson.databind.DeserializationFeature;
import org.bouncycastle.asn1.DERIA5String;
import org.bouncycastle.asn1.x509.GeneralName;
import org.bouncycastle.operator.OperatorCreationException;
import org.glassfish.jersey.apache.connector.ApacheConnectorProvider;
import org.glassfish.jersey.client.ClientConfig;
import org.glassfish.jersey.client.ClientProperties;
import org.glassfish.jersey.jackson.internal.jackson.jaxrs.json.JacksonJaxbJsonProvider;
import org.glassfish.jersey.jackson.internal.jackson.jaxrs.json.JacksonJsonProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.securitytoken.model.AssumeRoleRequest;
import com.amazonaws.services.securitytoken.model.Credentials;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.yahoo.athenz.auth.Principal;
import com.yahoo.athenz.auth.PrivateKeyStore;
import com.yahoo.athenz.auth.ServiceIdentityProvider;
import com.yahoo.athenz.auth.impl.RoleAuthority;
import com.yahoo.athenz.auth.util.Crypto;
import com.yahoo.athenz.auth.util.CryptoException;
import com.yahoo.athenz.common.config.AthenzConfig;
import com.yahoo.athenz.common.utils.SSLUtils;
import com.yahoo.athenz.common.utils.SSLUtils.ClientSSLContextBuilder;
import com.yahoo.rdl.JSON;
public class ZTSClient implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(ZTSClient.class);
private String ztsUrl = null;
private String proxyUrl = null;
private String domain = null;
private String service = null;
private SSLContext sslContext = null;
ZTSRDLGeneratedClient ztsClient = null;
ServiceIdentityProvider siaProvider = null;
Principal principal = null;
// configurable fields
//
static private boolean cacheDisabled = false;
static private int tokenMinExpiryTime = 900;
static private long prefetchInterval = 60; // seconds
static private boolean prefetchAutoEnable = true;
static private String x509CsrDn = null;
static private String x509CsrDomain = null;
static private int reqReadTimeout = 30000;
static private int reqConnectTimeout = 30000;
static private String x509CertDNSName = null;
static private String confZtsUrl = null;
private boolean enablePrefetch = true;
private boolean ztsClientOverride = false;
@SuppressWarnings("unused")
static private boolean initialized = initConfigValues();
// system properties
public static final String ZTS_CLIENT_PROP_ATHENZ_CONF = "athenz.athenz_conf";
public static final String ZTS_CLIENT_PROP_TOKEN_MIN_EXPIRY_TIME = "athenz.zts.client.token_min_expiry_time";
public static final String ZTS_CLIENT_PROP_READ_TIMEOUT = "athenz.zts.client.read_timeout";
public static final String ZTS_CLIENT_PROP_CONNECT_TIMEOUT = "athenz.zts.client.connect_timeout";
public static final String ZTS_CLIENT_PROP_PREFETCH_SLEEP_INTERVAL = "athenz.zts.client.prefetch_sleep_interval";
public static final String ZTS_CLIENT_PROP_PREFETCH_AUTO_ENABLE = "athenz.zts.client.prefetch_auto_enable";
public static final String ZTS_CLIENT_PROP_X509CERT_DNS_NAME = "athenz.zts.client.x509cert_dns_name";
public static final String ZTS_CLIENT_PROP_X509CSR_DN = "athenz.zts.client.x509csr_dn";
public static final String ZTS_CLIENT_PROP_X509CSR_DOMAIN = "athenz.zts.client.x509csr_domain";
public static final String ZTS_CLIENT_PROP_DISABLE_CACHE = "athenz.zts.client.disable_cache";
public static final String ZTS_CLIENT_PROP_CERT_ALIAS = "athenz.zts.client.cert_alias";
public static final String ZTS_CLIENT_PROP_KEYSTORE_PATH = "athenz.zts.client.keystore_path";
public static final String ZTS_CLIENT_PROP_KEYSTORE_TYPE = "athenz.zts.client.keystore_type";
public static final String ZTS_CLIENT_PROP_KEYSTORE_PASSWORD = "athenz.zts.client.keystore_password";
public static final String ZTS_CLIENT_PROP_KEYSTORE_PWD_APP_NAME = "athenz.zts.client.keystore_pwd_app_name";
public static final String ZTS_CLIENT_PROP_KEY_MANAGER_PASSWORD = "athenz.zts.client.keymanager_password";
public static final String ZTS_CLIENT_PROP_KEY_MANAGER_PWD_APP_NAME = "athenz.zts.client.keymanager_pwd_app_name";
public static final String ZTS_CLIENT_PROP_TRUSTSTORE_PATH = "athenz.zts.client.truststore_path";
public static final String ZTS_CLIENT_PROP_TRUSTSTORE_TYPE = "athenz.zts.client.truststore_type";
public static final String ZTS_CLIENT_PROP_TRUSTSTORE_PASSWORD = "athenz.zts.client.truststore_password";
public static final String ZTS_CLIENT_PROP_TRUSTSTORE_PWD_APP_NAME = "athenz.zts.client.truststore_pwd_app_name";
public static final String ZTS_CLIENT_PROP_PRIVATE_KEY_STORE_FACTORY_CLASS = "athenz.zts.client.private_keystore_factory_class";
public static final String ZTS_CLIENT_PROP_CLIENT_PROTOCOL = "athenz.zts.client.client_ssl_protocol";
public static final String ZTS_CLIENT_PKEY_STORE_FACTORY_CLASS = "com.yahoo.athenz.auth.impl.FilePrivateKeyStoreFactory";
public static final String ZTS_CLIENT_DEFAULT_CLIENT_SSL_PROTOCOL = "TLSv1.2";
public static final String ROLE_TOKEN_HEADER = System.getProperty(RoleAuthority.ATHENZ_PROP_ROLE_HEADER,
RoleAuthority.HTTP_HEADER);
final static ConcurrentHashMap<String, RoleToken> ROLE_TOKEN_CACHE = new ConcurrentHashMap<>();
final static ConcurrentHashMap<String, AccessTokenResponseCacheEntry> ACCESS_TOKEN_CACHE = new ConcurrentHashMap<>();
final static ConcurrentHashMap<String, AWSTemporaryCredentials> AWS_CREDS_CACHE = new ConcurrentHashMap<>();
private static final long FETCH_EPSILON = 60; // if cache expires in the next minute, fetch it.
private static final Queue<PrefetchRoleTokenScheduledItem> PREFETCH_SCHEDULED_ITEMS = new ConcurrentLinkedQueue<>();
private static Timer FETCH_TIMER;
private static final Object TIMER_LOCK = new Object();
static AtomicLong FETCHER_LAST_RUN_AT = new AtomicLong(-1);
// allows outside implementations to get role tokens for special environments - ex. hadoop
private static ServiceLoader<ZTSClientService> ztsTokenProviders;
private static AtomicReference<Set<String>> svcLoaderCacheKeys;
private static PrivateKeyStore PRIVATE_KEY_STORE = loadServicePrivateKey();
static boolean initConfigValues() {
// load our service providers tokens
loadSvcProviderTokens();
// set the token min expiry time
setTokenMinExpiryTime(Integer.parseInt(System.getProperty(ZTS_CLIENT_PROP_TOKEN_MIN_EXPIRY_TIME, "900")));
// set the prefetch interval
setPrefetchInterval(Integer.parseInt(System.getProperty(ZTS_CLIENT_PROP_PREFETCH_SLEEP_INTERVAL, "60")));
// set the prefetch support
setPrefetchAutoEnable(Boolean.parseBoolean(System.getProperty(ZTS_CLIENT_PROP_PREFETCH_AUTO_ENABLE, "true")));
// disable the cache if configured
setCacheDisable(Boolean.parseBoolean(System.getProperty(ZTS_CLIENT_PROP_DISABLE_CACHE, "false")));
// set x509 csr details
setX509CsrDetails(System.getProperty(ZTS_CLIENT_PROP_X509CSR_DN),
System.getProperty(ZTS_CLIENT_PROP_X509CSR_DOMAIN));
// set connection timeouts
setConnectionTimeouts(Integer.parseInt(System.getProperty(ZTS_CLIENT_PROP_CONNECT_TIMEOUT, "30000")),
Integer.parseInt(System.getProperty(ZTS_CLIENT_PROP_READ_TIMEOUT, "30000")));
// set our server certificate dns name
setX509CertDnsName(System.getProperty(ZTS_CLIENT_PROP_X509CERT_DNS_NAME));
// finally retrieve our configuration ZTS url from our config file
lookupZTSUrl();
return true;
}
/**
* Set the X509 Cert DNS Name in case ZTS Server is running with
* a certificate not matching its hostname
* @param dnsName name of the ZTS Servers X.509 Cert dns value
*/
public static void setX509CertDnsName(final String dnsName) {
x509CertDNSName = dnsName;
}
/**
* Set request connection and read timeout
* @param connectTimeout timeout for initial connection in milliseconds
* @param readTimeout timeout for read response in milliseconds
*/
public static void setConnectionTimeouts(int connectTimeout, int readTimeout) {
reqConnectTimeout = connectTimeout;
reqReadTimeout = readTimeout;
}
/**
* Set X509 CSR Details - DN and domain name. These values can be specified
* in the generate csr function as well in which case these will be ignored.
* @param csrDn string identifying the dn for the csr without the cn component
* @param csrDomain string identifying the dns domain for generating SAN fields
*/
public static void setX509CsrDetails(final String csrDn, final String csrDomain) {
x509CsrDn = csrDn;
x509CsrDomain = csrDomain;
}
/**
* Disable the cache of role tokens if configured.
* @param cacheState false to disable the cache
*/
public static void setCacheDisable(boolean cacheState) {
cacheDisabled = cacheState;
}
/**
* Enable prefetch of role tokens
* @param fetchState state of prefetch
*/
public static void setPrefetchAutoEnable(boolean fetchState) {
prefetchAutoEnable = fetchState;
}
/**
* Set the prefetch interval. if the prefetch interval is longer than
* our token min expiry time, then we'll default back to 60 seconds
* @param interval time in seconds
*/
public static void setPrefetchInterval(int interval) {
prefetchInterval = interval;
if (prefetchInterval >= tokenMinExpiryTime) {
prefetchInterval = 60;
}
}
/**
* Set the minimum token expiry time. The server will not give out tokens
* less than configured expiry time
* @param minExpiryTime expiry time in seconds
*/
public static void setTokenMinExpiryTime(int minExpiryTime) {
// The minimum token expiry time by default is 15 minutes (900). By default the
// server gives out role tokens for 2 hours and with this setting we'll be able
// to cache tokens for 1hr45mins before requesting a new one from ZTS
tokenMinExpiryTime = minExpiryTime;
if (tokenMinExpiryTime < 0) {
tokenMinExpiryTime = 900;
}
}
public static void lookupZTSUrl() {
String rootDir = System.getenv("ROOT");
if (rootDir == null) {
rootDir = "/home/athenz";
}
String confFileName = System.getProperty(ZTS_CLIENT_PROP_ATHENZ_CONF,
rootDir + "/conf/athenz/athenz.conf");
try {
Path path = Paths.get(confFileName);
AthenzConfig conf = JSON.fromBytes(Files.readAllBytes(path), AthenzConfig.class);
confZtsUrl = conf.getZtsUrl();
} catch (Exception ex) {
// if we have a zts client service specified and we have keys
// in our service loader cache then we're running within
// some managed framework (e.g. hadoop) so we're going to
// report this exception as a warning rather than an error
// and default to localhost as the url to avoid further
// warnings from our generated client
LOG.warn("Unable to extract ZTS Url from conf file {}, exc: {}",
confFileName, ex.getMessage());
if (!svcLoaderCacheKeys.get().isEmpty()) {
confZtsUrl = "https://localhost:4443/";
}
}
}
/**
* Constructs a new ZTSClient object with default settings.
* The url for ZTS Server is automatically retrieved from the athenz
* configuration file (ztsUrl field). The client can only be used
* to retrieve objects from ZTS that do not require any authentication
* otherwise addCredentials method must be used to set the principal identity.
* Default read and connect timeout values are 30000ms (30sec).
* The application can change these values by using the
* athenz.zts.client.read_timeout and athenz.zts.client.connect_timeout
* system properties. The values specified for timeouts must be in
* milliseconds.
*/
public ZTSClient() {
initClient(null, null, null, null, null);
enablePrefetch = false; // can't use this domain and service for prefetch
}
/**
* Constructs a new ZTSClient object with the given ZTS Server Url.
* If the specified zts url is null, then it is automatically
* retrieved from athenz.conf configuration file (ztsUrl field).
* Default read and connect timeout values are 30000ms (30sec).
* The application can change these values by using the
* athenz.zts.client.read_timeout and athenz.zts.client.connect_timeout
* system properties. The values specified for timeouts must be in
* milliseconds. This client object can only be used for API calls
* that require no authentication or setting the principal using
* addCredentials method before calling any other authentication
* protected API.
* @param ztsUrl ZTS Server's URL (optional)
*/
public ZTSClient(String ztsUrl) {
initClient(ztsUrl, null, null, null, null);
enablePrefetch = false; // can't use this domain and service for prefetch
}
/**
* Constructs a new ZTSClient object with the given principal identity.
* The url for ZTS Server is automatically retrieved from the athenz
* configuration file (ztsUrl field). Default read and connect timeout values
* are 30000ms (30sec). The application can change these values by using the
* athenz.zts.client.read_timeout and athenz.zts.client.connect_timeout
* system properties. The values specified for timeouts must be in milliseconds.
* @param identity Principal identity for authenticating requests
*/
public ZTSClient(Principal identity) {
this(null, identity);
}
/**
* Constructs a new ZTSClient object with the given principal identity
* and ZTS Server Url. Default read and connect timeout values are
* 30000ms (30sec). The application can change these values by using the
* athenz.zts.client.read_timeout and athenz.zts.client.connect_timeout
* system properties. The values specified for timeouts must be in milliseconds.
* @param ztsUrl ZTS Server's URL (optional)
* @param identity Principal identity for authenticating requests
*/
public ZTSClient(String ztsUrl, Principal identity) {
// verify we have a valid principal and authority
if (identity == null) {
throw new IllegalArgumentException("Principal object must be specified");
}
if (identity.getAuthority() == null) {
throw new IllegalArgumentException("Principal Authority cannot be null");
}
initClient(ztsUrl, identity, null, null, null);
enablePrefetch = false; // can't use this domain and service for prefetch
}
/**
* Constructs a new ZTSClient object with the given SSLContext object
* and ZTS Server Url. Default read and connect timeout values are
* 30000ms (30sec). The application can change these values by using the
* athenz.zts.client.read_timeout and athenz.zts.client.connect_timeout
* system properties. The values specified for timeouts must be in milliseconds.
* @param ztsUrl ZTS Server's URL (optional)
* @param sslContext SSLContext that includes service's private key and x.509 certificate
* for authenticating requests
*/
public ZTSClient(String ztsUrl, SSLContext sslContext) {
this(ztsUrl, null, sslContext);
}
/**
* Constructs a new ZTSClient object with the given SSLContext object
* and ZTS Server Url through the specified Proxy URL. Default read
* and connect timeout values are 30000ms (30sec). The application can
* change these values by using the athenz.zts.client.read_timeout and
* athenz.zts.client.connect_timeout system properties. The values
* specified for timeouts must be in milliseconds.
* @param ztsUrl ZTS Server's URL
* @param proxyUrl Proxy Server's URL
* @param sslContext SSLContext that includes service's private key and x.509 certificate
* for authenticating requests
*/
public ZTSClient(String ztsUrl, String proxyUrl, SSLContext sslContext) {
// verify we have a valid ssl context specified
if (sslContext == null) {
throw new IllegalArgumentException("SSLContext object must be specified");
}
this.sslContext = sslContext;
this.proxyUrl = proxyUrl;
initClient(ztsUrl, null, null, null, null);
}
/**
* Constructs a new ZTSClient object with the given service details
* identity provider (which will provide the ntoken for the service)
* The ZTS Server url is automatically retrieved from athenz.conf configuration
* file (ztsUrl field). Default read and connect timeout values are
* 30000ms (30sec). The application can change these values by using the
* athenz.zts.client.read_timeout and athenz.zts.client.connect_timeout
* system properties. The values specified for timeouts must be in milliseconds.
* @param domainName name of the domain
* @param serviceName name of the service
* @param siaProvider service identity provider for the client to request principals
*/
public ZTSClient(String domainName, String serviceName, ServiceIdentityProvider siaProvider) {
this(null, domainName, serviceName, siaProvider);
}
/**
* Constructs a new ZTSClient object with the given service details
* identity provider (which will provide the ntoken for the service)
* and ZTS Server Url. If the specified zts url is null, then it is
* automatically retrieved from athenz.conf configuration file
* (ztsUrl field). Default read and connect timeout values are
* 30000ms (30sec). The application can change these values by using the
* athenz.zts.client.read_timeout and athenz.zts.client.connect_timeout
* system properties. The values specified for timeouts must be in milliseconds.
* @param ztsUrl ZTS Server's URL (optional)
* @param domainName name of the domain
* @param serviceName name of the service
* @param siaProvider service identity provider for the client to request principals
*/
public ZTSClient(String ztsUrl, String domainName, String serviceName,
ServiceIdentityProvider siaProvider) {
if (domainName == null || domainName.isEmpty()) {
throw new IllegalArgumentException("Domain name must be specified");
}
if (serviceName == null || serviceName.isEmpty()) {
throw new IllegalArgumentException("Service name must be specified");
}
if (siaProvider == null) {
throw new IllegalArgumentException("Service Identity Provider must be specified");
}
initClient(ztsUrl, null, domainName, serviceName, siaProvider);
}
/**
* Close the ZTSClient object and release any allocated resources.
*/
@Override
public void close() {
ztsClient.close();
}
/**
* Call to enable/disable prefetch for the current ZTSClient.
* @param state whether prefetch is enabled or not
*/
public void setEnablePrefetch(boolean state) {
enablePrefetch = state;
}
/**
* Set new ZTS Client configuration property. This method calls
* internal javax.ws.rs.client.Client client's property method.
* If already set, the existing value of the property will be updated.
* Setting a null value into a property effectively removes the property
* from the property bag.
* @param name property name.
* @param value property value. null value removes the property with the given name.
*/
public void setProperty(String name, Object value) {
if (ztsClient != null) {
ztsClient.setProperty(name, value);
}
}
/**
* Cancel the Prefetch Timer. This removes all the prefetch
* items from the list, purges and cancels the fetch timer.
* This should be called before application shutdown.
*/
public static void cancelPrefetch() {
PREFETCH_SCHEDULED_ITEMS.clear();
if (FETCH_TIMER != null) {
FETCH_TIMER.purge();
FETCH_TIMER.cancel();
FETCH_TIMER = null;
}
}
/**
* Returns the locally configured ZTS Server's URL value
* @return ZTS Server URL
*/
public String getZTSUrl() {
return ztsUrl;
}
public void setZTSRDLGeneratedClient(ZTSRDLGeneratedClient client) {
this.ztsClient = client;
ztsClientOverride = true;
}
private SSLContext createSSLContext() {
// to create the SSL context we must have the keystore path
// specified. If it's not specified, then we are not going
// to create our ssl context
String keyStorePath = System.getProperty(ZTS_CLIENT_PROP_KEYSTORE_PATH);
if (keyStorePath == null || keyStorePath.isEmpty()) {
return null;
}
String keyStoreType = System.getProperty(ZTS_CLIENT_PROP_KEYSTORE_TYPE);
String keyStorePwd = System.getProperty(ZTS_CLIENT_PROP_KEYSTORE_PASSWORD);
char[] keyStorePassword = null;
if (null != keyStorePwd && !keyStorePwd.isEmpty()) {
keyStorePassword = keyStorePwd.toCharArray();
}
String keyStorePasswordAppName = System.getProperty(ZTS_CLIENT_PROP_KEYSTORE_PWD_APP_NAME);
char[] keyManagerPassword = null;
String keyManagerPwd = System.getProperty(ZTS_CLIENT_PROP_KEY_MANAGER_PASSWORD);
if (null != keyManagerPwd && !keyManagerPwd.isEmpty()) {
keyManagerPassword = keyManagerPwd.toCharArray();
}
String keyManagerPasswordAppName = System.getProperty(ZTS_CLIENT_PROP_KEY_MANAGER_PWD_APP_NAME);
// truststore
String trustStorePath = System.getProperty(ZTS_CLIENT_PROP_TRUSTSTORE_PATH);
String trustStoreType = System.getProperty(ZTS_CLIENT_PROP_TRUSTSTORE_TYPE);
String trustStorePwd = System.getProperty(ZTS_CLIENT_PROP_TRUSTSTORE_PASSWORD);
char[] trustStorePassword = null;
if (null != trustStorePwd && !trustStorePwd.isEmpty()) {
trustStorePassword = trustStorePwd.toCharArray();
}
String trustStorePasswordAppName = System.getProperty(ZTS_CLIENT_PROP_TRUSTSTORE_PWD_APP_NAME);
// alias and protocol details
String certAlias = System.getProperty(ZTS_CLIENT_PROP_CERT_ALIAS);
String clientProtocol = System.getProperty(ZTS_CLIENT_PROP_CLIENT_PROTOCOL,
ZTS_CLIENT_DEFAULT_CLIENT_SSL_PROTOCOL);
ClientSSLContextBuilder builder = new SSLUtils.ClientSSLContextBuilder(clientProtocol)
.privateKeyStore(PRIVATE_KEY_STORE).keyStorePath(keyStorePath);
if (null != certAlias && !certAlias.isEmpty()) {
builder.certAlias(certAlias);
}
if (null != keyStoreType && !keyStoreType.isEmpty()) {
builder.keyStoreType(keyStoreType);
}
if (null != keyStorePassword) {
builder.keyStorePassword(keyStorePassword);
}
if (null != keyStorePasswordAppName) {
builder.keyStorePasswordAppName(keyStorePasswordAppName);
}
if (null != keyManagerPassword) {
builder.keyManagerPassword(keyManagerPassword);
}
if (null != keyManagerPasswordAppName) {
builder.keyManagerPasswordAppName(keyManagerPasswordAppName);
}
if (null != trustStorePath && !trustStorePath.isEmpty()) {
builder.trustStorePath(trustStorePath);
}
if (null != trustStoreType && !trustStoreType.isEmpty()) {
builder.trustStoreType(trustStoreType);
}
if (null != trustStorePassword) {
builder.trustStorePassword(trustStorePassword);
}
if (null != trustStorePasswordAppName) {
builder.trustStorePasswordAppName(trustStorePasswordAppName);
}
return builder.build();
}
static PrivateKeyStore loadServicePrivateKey() {
String pkeyFactoryClass = System.getProperty(ZTS_CLIENT_PROP_PRIVATE_KEY_STORE_FACTORY_CLASS,
ZTS_CLIENT_PKEY_STORE_FACTORY_CLASS);
return SSLUtils.loadServicePrivateKey(pkeyFactoryClass);
}
private void initClient(final String serverUrl, Principal identity,
final String domainName, final String serviceName,
final ServiceIdentityProvider siaProvider) {
ztsUrl = (serverUrl == null) ? confZtsUrl : serverUrl;
// verify if the url is ending with /zts/v1 and if it's
// not we'll automatically append it
if (ztsUrl != null && !ztsUrl.isEmpty()) {
if (!ztsUrl.endsWith("/zts/v1")) {
if (ztsUrl.charAt(ztsUrl.length() - 1) != '/') {
ztsUrl += '/';
}
ztsUrl += "zts/v1";
}
}
// determine to see if we need a host verifier for our ssl connections
HostnameVerifier hostnameVerifier = null;
if (x509CertDNSName != null && !x509CertDNSName.isEmpty()) {
hostnameVerifier = new AWSHostNameVerifier(x509CertDNSName);
}
// if we don't have a ssl context specified, check the system
// properties to see if we need to create one
if (sslContext == null) {
sslContext = createSSLContext();
}
// setup our client config object with timeouts
final JacksonJsonProvider jacksonJsonProvider = new JacksonJaxbJsonProvider().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
final ClientConfig config = new ClientConfig(jacksonJsonProvider);
config.property(ClientProperties.CONNECT_TIMEOUT, reqConnectTimeout);
config.property(ClientProperties.READ_TIMEOUT, reqReadTimeout);
config.connectorProvider(new ApacheConnectorProvider());
// if we're asked to use a proxy for our request
// we're going to set the property that is supported
// by the apache connector and use that
if (proxyUrl != null) {
config.property(ClientProperties.PROXY_URI, proxyUrl);
}
ClientBuilder builder = ClientBuilder.newBuilder();
if (sslContext != null) {
builder = builder.sslContext(sslContext);
enablePrefetch = true;
}
Client rsClient = builder.hostnameVerifier(hostnameVerifier)
.withConfig(config)
.build();
ztsClient = new ZTSRDLGeneratedClient(ztsUrl, rsClient);
principal = identity;
domain = domainName;
service = serviceName;
this.siaProvider = siaProvider;
// if we are given a principal object then we need
// to update the domain/service settings
if (principal != null) {
domain = principal.getDomain();
service = principal.getName();
ztsClient.addCredentials(identity.getAuthority().getHeader(), identity.getCredentials());
}
}
void setPrefetchInterval(long interval) {
prefetchInterval = interval;
}
long getPrefetchInterval() {
return prefetchInterval;
}
/**
* Returns the header name that the client needs to use to pass
* the received RoleToken to the Athenz protected service.
* @return HTTP header name
*/
public static String getHeader() {
return ROLE_TOKEN_HEADER;
}
/**
* Set client credentials based on the given principal.
* @param identity Principal identity for authenticating requests
* @return self ZTSClient object
*/
public ZTSClient addCredentials(Principal identity) {
return addPrincipalCredentials(identity, true);
}
/**
* Set the client credentials using the specified header and token.
* @param credHeader authentication header name
* @param credToken authentication credentials
*/
public void addCredentials(String credHeader, String credToken) {
ztsClient.addCredentials(credHeader, credToken);
}
/**
* Clear the principal identity set for the client. Unless a new principal is set
* using the addCredentials method, the client can only be used to requests data
* from the ZTS Server that doesn't require any authentication.
* @return self ZTSClient object
*/
public ZTSClient clearCredentials() {
if (principal != null) {
ztsClient.addCredentials(principal.getAuthority().getHeader(), null);
principal = null;
}
return this;
}
ZTSClient addPrincipalCredentials(Principal identity, boolean resetServiceDetails) {
if (identity != null && identity.getAuthority() != null) {
ztsClient.addCredentials(identity.getAuthority().getHeader(), identity.getCredentials());
}
// if the client is adding new principal identity then we have to
// clear out the sia provider object reference so that we don't try
// to get a service token since we already have one given to us
if (resetServiceDetails) {
siaProvider = null;
}
principal = identity;
return this;
}
boolean sameCredentialsAsBefore(Principal svcPrincipal) {
// if we don't have a principal or no credentials
// then the principal has changed
if (principal == null) {
return false;
}
String creds = principal.getCredentials();
if (creds == null) {
return false;
}
return creds.equals(svcPrincipal.getCredentials());
}
boolean updateServicePrincipal() {
// if we have a service principal then we need to keep updating
// our PrincipalToken otherwise it might expire.
if (siaProvider == null) {
return false;
}
Principal svcPrincipal = siaProvider.getIdentity(domain, service);
// if we get no principal from our sia provider, then we
// should log and throw an IllegalArgumentException otherwise the
// client doesn't know that something bad has happened - in this
// case illegal domain/service was passed to the constructor
// and the ZTS Server just rejects the request with 401
if (svcPrincipal == null) {
final String msg = "UpdateServicePrincipal: Unable to get PrincipalToken "
+ "from SIA Provider for " + domain + "." + service;
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
// if the principal has the same credentials as before
// then we don't need to update anything
if (sameCredentialsAsBefore(svcPrincipal)) {
return false;
}
addPrincipalCredentials(svcPrincipal, false);
return true;
}
/**
* Retrieve list of services that have been configured to run on the specified host
* @param host name of the host
* @return list of service names on success. ZTSClientException will be thrown in case of failure
*/
public HostServices getHostServices(String host) {
updateServicePrincipal();
try {
return ztsClient.getHostServices(host);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* Retrieve list of ZTS Server public keys in Json WEB Key (JWK) format
* @return list of public keys (JWKs) on success. ZTSClientException will be thrown in case of failure
*/
public JWKList getJWKList() {
updateServicePrincipal();
try {
return ztsClient.getJWKList();
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* For the specified requester(user/service) return the corresponding Role Token that
* includes the list of roles that the principal has access to in the specified domain.
* The client will automatically fulfill the request from the cache, if possible.
* The default minimum expiry time is 900 secs (15 mins).
* @param domainName name of the domain
* @return ZTS generated Role Token. ZTSClientException will be thrown in case of failure
*/
public RoleToken getRoleToken(String domainName) {
return getRoleToken(domainName, null, null, null, false, null);
}
/**
* For the specified requester(user/service) return the corresponding Role Token that
* includes the list of roles that the principal has access to in the specified domain
* and filtered to include only those that end with the specified suffix.
* The client will automatically fulfill the request from the cache, if possible.
* The default minimum expiry time is 900 secs (15 mins).
* @param domainName name of the domain
* @param roleNames only interested in roles with these names, comma separated list of roles
* @return ZTS generated Role Token. ZTSClientException will be thrown in case of failure
*/
public RoleToken getRoleToken(String domainName, String roleNames) {
if (roleNames == null || roleNames.isEmpty()) {
throw new IllegalArgumentException("RoleNames cannot be null or empty");
}
return getRoleToken(domainName, roleNames, null, null, false, null);
}
/**
* For the specified requester(user/service) return the corresponding Role Token that
* includes the list of roles that the principal has access to in the specified domain
* @param domainName name of the domain
* @param roleNames only interested in roles with these names, comma separated list of roles
* @param minExpiryTime (optional) specifies that the returned RoleToken must be
* at least valid (min/lower bound) for specified number of seconds,
* @param maxExpiryTime (optional) specifies that the returned RoleToken must be
* at most valid (max/upper bound) for specified number of seconds.
* @param ignoreCache ignore the cache and retrieve the token from ZTS Server
* @return ZTS generated Role Token. ZTSClientException will be thrown in case of failure
*/
public RoleToken getRoleToken(String domainName, String roleNames, Integer minExpiryTime,
Integer maxExpiryTime, boolean ignoreCache) {
return getRoleToken(domainName, roleNames, minExpiryTime, maxExpiryTime,
ignoreCache, null);
}
/**
* For the specified requester(user/service) return the corresponding Role Token that
* includes the list of roles that the principal has access to in the specified domain
* @param domainName name of the domain
* @param roleNames only interested in roles with these names, comma separated list of roles
* @param minExpiryTime (optional) specifies that the returned RoleToken must be
* at least valid (min/lower bound) for specified number of seconds,
* @param maxExpiryTime (optional) specifies that the returned RoleToken must be
* at most valid (max/upper bound) for specified number of seconds.
* @param ignoreCache ignore the cache and retrieve the token from ZTS Server
* @param proxyForPrincipal (optional) this request is proxy for this principal
* @return ZTS generated Role Token. ZTSClientException will be thrown in case of failure
*/
public RoleToken getRoleToken(String domainName, String roleNames, Integer minExpiryTime,
Integer maxExpiryTime, boolean ignoreCache, String proxyForPrincipal) {
RoleToken roleToken;
// first lookup in our cache to see if it can be satisfied
// only if we're not asked to ignore the cache
String cacheKey = null;
if (!cacheDisabled) {
cacheKey = getRoleTokenCacheKey(domainName, roleNames, proxyForPrincipal);
if (cacheKey != null && !ignoreCache) {
roleToken = lookupRoleTokenInCache(cacheKey, minExpiryTime, maxExpiryTime, tokenMinExpiryTime);
if (roleToken != null) {
return roleToken;
}
// start prefetch for this token if prefetch is enabled
if (enablePrefetch && prefetchAutoEnable) {
if (prefetchRoleToken(domainName, roleNames, minExpiryTime, maxExpiryTime,
proxyForPrincipal)) {
roleToken = lookupRoleTokenInCache(cacheKey, minExpiryTime, maxExpiryTime, tokenMinExpiryTime);
}
if (roleToken != null) {
return roleToken;
}
LOG.error("GetRoleToken: cache prefetch and lookup error");
}
}
}
// 2nd look in service providers
//
for (ZTSClientService provider: ztsTokenProviders) {
if (LOG.isDebugEnabled()) {
LOG.debug("getRoleToken: found service provider={}", provider);
}
// provider needs to know who the client is so we'll be passing
// the client's domain and service names as the first two fields
roleToken = provider.fetchToken(domain, service, domainName, roleNames,
minExpiryTime, maxExpiryTime, proxyForPrincipal);
if (roleToken != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("getRoleToken: service provider={} returns token", provider);
}
return roleToken;
}
}
// if no hit then we need to request a new token from ZTS
updateServicePrincipal();
try {
roleToken = ztsClient.getRoleToken(domainName, roleNames,
minExpiryTime, maxExpiryTime, proxyForPrincipal);
} catch (ResourceException ex) {
// if we have an entry in our cache then we'll return that
// instead of returning failure
if (cacheKey != null && !ignoreCache) {
roleToken = lookupRoleTokenInCache(cacheKey, null, null, 1);
if (roleToken != null) {
return roleToken;
}
}
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
// if we have an entry in our cache then we'll return that
// instead of returning failure
if (cacheKey != null && !ignoreCache) {
roleToken = lookupRoleTokenInCache(cacheKey, null, null, 1);
if (roleToken != null) {
return roleToken;
}
}
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
// need to add the token to our cache. If our principal was
// updated then we need to retrieve a new cache key
if (!cacheDisabled) {
if (cacheKey == null) {
cacheKey = getRoleTokenCacheKey(domainName, roleNames, proxyForPrincipal);
}
if (cacheKey != null) {
ROLE_TOKEN_CACHE.put(cacheKey, roleToken);
}
}
return roleToken;
}
/**
* For the specified requester(user/service) return the corresponding Access Token that
* includes the list of roles that the principal has access to in the specified domain
* @param domainName name of the domain
* @param roleNames (optional) only interested in roles with these names, comma separated list of roles
* @param expiryTime (optional) specifies that the returned Access must be
* at least valid for specified number of seconds. Pass 0 to use
* server default timeout.
* @return ZTS generated Access Token Response object. ZTSClientException will be thrown in case of failure
*/
public AccessTokenResponse getAccessToken(String domainName, List<String> roleNames, long expiryTime) {
return getAccessToken(domainName, roleNames, null, expiryTime, false);
}
/**
* For the specified requester(user/service) return the corresponding Access Token that
* includes the list of roles that the principal has access to in the specified domain
* @param domainName name of the domain
* @param roleNames (optional) only interested in roles with these names, comma separated list of roles
* @param idTokenServiceName (optional) as part of the response return an id token whose audience
* is the specified service (only service name e.g. api) in the
* domainName domain.
* @param expiryTime (optional) specifies that the returned Access must be
* at least valid for specified number of seconds. Pass 0 to use
* server default timeout.
* @param ignoreCache ignore the cache and retrieve the token from ZTS Server
* @return ZTS generated Access Token Response object. ZTSClientException will be thrown in case of failure
*/
public AccessTokenResponse getAccessToken(String domainName, List<String> roleNames, String idTokenServiceName,
long expiryTime, boolean ignoreCache) {
AccessTokenResponse accessTokenResponse;
// first lookup in our cache to see if it can be satisfied
// only if we're not asked to ignore the cache
String cacheKey = null;
if (!cacheDisabled) {
cacheKey = getAccessTokenCacheKey(domainName, roleNames, idTokenServiceName);
if (cacheKey != null && !ignoreCache) {
accessTokenResponse = lookupAccessTokenResponseInCache(cacheKey, expiryTime);
if (accessTokenResponse != null) {
return accessTokenResponse;
}
}
}
// if no hit then we need to request a new token from ZTS
updateServicePrincipal();
try {
final String requestBody = generateAccessTokenRequestBody(domainName, roleNames,
idTokenServiceName, expiryTime);
accessTokenResponse = ztsClient.postAccessTokenRequest(requestBody);
} catch (ResourceException ex) {
if (cacheKey != null && !ignoreCache) {
accessTokenResponse = lookupAccessTokenResponseInCache(cacheKey, -1);
if (accessTokenResponse != null) {
return accessTokenResponse;
}
}
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
if (cacheKey != null && !ignoreCache) {
accessTokenResponse = lookupAccessTokenResponseInCache(cacheKey, -1);
if (accessTokenResponse != null) {
return accessTokenResponse;
}
}
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
// need to add the token to our cache. If our principal was
// updated then we need to retrieve a new cache key
if (!cacheDisabled) {
if (cacheKey == null) {
cacheKey = getAccessTokenCacheKey(domainName, roleNames, idTokenServiceName);
}
if (cacheKey != null) {
ACCESS_TOKEN_CACHE.put(cacheKey, new AccessTokenResponseCacheEntry(accessTokenResponse));
}
}
return accessTokenResponse;
}
String generateAccessTokenRequestBody(String domainName, List<String> roleNames,
String idTokenServiceName, long expiryTime) throws UnsupportedEncodingException {
StringBuilder body = new StringBuilder(256);
body.append("grant_type=client_credentials");
if (expiryTime > 0) {
body.append("&expires_in=").append(expiryTime);
}
StringBuilder scope = new StringBuilder(256);
if (roleNames == null || roleNames.isEmpty()) {
scope.append(domainName).append(":domain");
} else {
for (String role : roleNames) {
if (scope.length() != 0) {
scope.append(' ');
}
scope.append(domainName).append(":role.").append(role);
}
}
if (idTokenServiceName != null && !idTokenServiceName.isEmpty()) {
scope.append(" openid ").append(domainName).append(":service.").append(idTokenServiceName);
}
final String scopeStr = scope.toString();
body.append("&scope=").append(URLEncoder.encode(scopeStr, "UTF-8"));
return body.toString();
}
/**
* For the specified requester(user/service) return the corresponding Role Certificate
* @param domainName name of the domain
* @param roleName name of the role
* @param req Role Certificate Request (csr)
* @return RoleToken that includes client x509 role certificate
*/
public RoleToken postRoleCertificateRequest(String domainName, String roleName,
RoleCertificateRequest req) {
updateServicePrincipal();
try {
return ztsClient.postRoleCertificateRequest(domainName, roleName, req);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getMessage());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* Generate a Role Certificate request that could be sent to ZTS
* to obtain a X509 Certificate for the requested role.
* @param principalDomain name of the principal's domain
* @param principalService name of the principal's service
* @param roleDomainName name of the domain where role is defined
* @param roleName name of the role to get a certificate request for
* @param privateKey private key for the service identity for the caller
* @param csrDn string identifying the dn for the csr without the cn component
* @param csrDomain string identifying the dns domain for generating SAN fields
* @param expiryTime number of seconds to request certificate to be valid for
* @return RoleCertificateRequest object
*/
static public RoleCertificateRequest generateRoleCertificateRequest(final String principalDomain,
final String principalService, final String roleDomainName, final String roleName,
PrivateKey privateKey, final String csrDn, final String csrDomain, int expiryTime) {
if (principalDomain == null || principalService == null) {
throw new IllegalArgumentException("Principal's Domain and Service must be specified");
}
if (roleDomainName == null || roleName == null) {
throw new IllegalArgumentException("Role DomainName and Name must be specified");
}
if (csrDomain == null) {
throw new IllegalArgumentException("X509 CSR Domain must be specified");
}
// Athenz uses lower case for all elements, so let's
// generate our dn which will be our role resource value
final String domain = principalDomain.toLowerCase();
final String service = principalService.toLowerCase();
String dn = "cn=" + roleDomainName.toLowerCase() + ":role." + roleName.toLowerCase();
if (csrDn != null) {
dn = dn.concat(",").concat(csrDn);
}
// now let's generate our dsnName and email fields which will based on
// our principal's details
final String hostName = service + '.' + domain.replace('.', '-') + '.' + csrDomain;
final String email = domain + "." + service + "@" + csrDomain;
GeneralName[] sanArray = new GeneralName[2];
sanArray[0] = new GeneralName(GeneralName.dNSName, new DERIA5String(hostName));
sanArray[1] = new GeneralName(GeneralName.rfc822Name, new DERIA5String(email));
String csr;
try {
csr = Crypto.generateX509CSR(privateKey, dn, sanArray);
} catch (OperatorCreationException | IOException ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
return new RoleCertificateRequest().setCsr(csr).setExpiryTime((long) expiryTime);
}
/**
* Generate a Role Certificate request that could be sent to ZTS
* to obtain a X509 Certificate for the requested role.
* @param principalDomain name of the principal's domain
* @param principalService name of the principal's service
* @param roleDomainName name of the domain where role is defined
* @param roleName name of the role to get a certificate request for
* @param privateKey private key for the service identity for the caller
* @param cloud string identifying the environment, e.g. aws
* @param expiryTime number of seconds to request certificate to be valid for
* @return RoleCertificateRequest object
*/
static public RoleCertificateRequest generateRoleCertificateRequest(final String principalDomain,
final String principalService, final String roleDomainName, final String roleName,
final PrivateKey privateKey, final String cloud, int expiryTime) {
if (cloud == null) {
throw new IllegalArgumentException("Cloud Environment must be specified");
}
String csrDomain;
if (x509CsrDomain != null) {
csrDomain = cloud + "." + x509CsrDomain;
} else {
csrDomain = cloud;
}
return generateRoleCertificateRequest(principalDomain, principalService,
roleDomainName, roleName, privateKey, x509CsrDn, csrDomain,
expiryTime);
}
/**
* Generate a Instance Refresh request that could be sent to ZTS to
* request a TLS certificate for a service.
* @param principalDomain name of the principal's domain
* @param principalService name of the principal's service
* @param privateKey private key for the service identity for the caller
* @param csrDn string identifying the dn for the csr without the cn component
* @param csrDomain string identifying the dns domain for generating SAN fields
* @param expiryTime number of seconds to request certificate to be valid for
* @return InstanceRefreshRequest object
*/
static public InstanceRefreshRequest generateInstanceRefreshRequest(final String principalDomain,
final String principalService, PrivateKey privateKey, final String csrDn,
final String csrDomain, int expiryTime) {
if (principalDomain == null || principalService == null) {
throw new IllegalArgumentException("Principal's Domain and Service must be specified");
}
if (csrDomain == null) {
throw new IllegalArgumentException("X509 CSR Domain must be specified");
}
// Athenz uses lower case for all elements, so let's
// generate our dn which will be based on our service name
final String domain = principalDomain.toLowerCase();
final String service = principalService.toLowerCase();
final String cn = domain + "." + service;
String dn = "cn=" + cn;
if (csrDn != null) {
dn = dn.concat(",").concat(csrDn);
}
// now let's generate our dsnName field based on our principal's details
final String hostName = service + '.' + domain.replace('.', '-') + '.' + csrDomain;
GeneralName[] sanArray = new GeneralName[1];
sanArray[0] = new GeneralName(GeneralName.dNSName, new DERIA5String(hostName));
String csr;
try {
csr = Crypto.generateX509CSR(privateKey, dn, sanArray);
} catch (OperatorCreationException | IOException ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
return new InstanceRefreshRequest().setCsr(csr).setExpiryTime(expiryTime);
}
/**
* Generate a Instance Refresh request that could be sent to ZTS to
* request a TLS certificate for a service.
* @param principalDomain name of the principal's domain
* @param principalService name of the principal's service
* @param privateKey private key for the service identity for the caller
* @param cloud string identifying the environment, e.g. aws
* @param expiryTime number of seconds to request certificate to be valid for
* @return InstanceRefreshRequest object
*/
static public InstanceRefreshRequest generateInstanceRefreshRequest(String principalDomain,
String principalService, PrivateKey privateKey, String cloud, int expiryTime) {
if (cloud == null) {
throw new IllegalArgumentException("Cloud Environment must be specified");
}
String csrDomain;
if (x509CsrDomain != null) {
csrDomain = cloud + "." + x509CsrDomain;
} else {
csrDomain = cloud;
}
return generateInstanceRefreshRequest(principalDomain, principalService, privateKey,
x509CsrDn, csrDomain, expiryTime);
}
private static class RolePrefetchTask extends TimerTask {
ZTSClient getZTSClient(PrefetchRoleTokenScheduledItem item) {
ZTSClient client;
if (item.sslContext != null) {
client = new ZTSClient(item.providedZTSUrl, item.proxyUrl, item.sslContext);
} else {
client = new ZTSClient(item.providedZTSUrl, item.identityDomain,
item.identityName, item.siaProvider);
}
return client;
}
@Override
public void run() {
long currentTime = System.currentTimeMillis() / 1000;
FETCHER_LAST_RUN_AT.set(currentTime);
if (LOG.isDebugEnabled()) {
LOG.debug("RolePrefetchTask: Fetching role token from the scheduled queue. Size={}",
PREFETCH_SCHEDULED_ITEMS.size());
}
if (PREFETCH_SCHEDULED_ITEMS.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("RolePrefetchTask: No items to fetch. Queue is empty");
}
return;
}
List<PrefetchRoleTokenScheduledItem> toFetch = new ArrayList<>(PREFETCH_SCHEDULED_ITEMS.size());
synchronized (PREFETCH_SCHEDULED_ITEMS) {
// if this item is to be fetched now, add it to collection
for (PrefetchRoleTokenScheduledItem item : PREFETCH_SCHEDULED_ITEMS) {
// see if item expires within next two minutes
long expiryTime = item.expiresAtUTC - (currentTime + FETCH_EPSILON + prefetchInterval);
if (LOG.isDebugEnabled()) {
final String itemName = item.sslContext == null ?
item.identityDomain + "." + item.identityName : item.sslContext.toString();
LOG.debug("RolePrefetchTask: item={} domain={} roleName={} to be expired at {}",
itemName, item.domainName, item.roleName, expiryTime);
}
if (isExpiredToken(expiryTime, item.minDuration, item.maxDuration, item.tokenMinExpiryTime)) {
if (LOG.isDebugEnabled()) {
final String itemName = item.sslContext == null ?
item.identityDomain + "." + item.identityName : item.sslContext.toString();
LOG.debug("RolePrefetchTask: item={} domain={} roleName={} expired {}. Fetch this item.",
itemName, item.domainName, item.roleName, expiryTime);
}
toFetch.add(item);
}
}
}
// if toFetch is not empty, fetch those tokens, and add refreshed scheduled items back to the queue
if (!toFetch.isEmpty()) {
Set<String> oldSvcLoaderCache = svcLoaderCacheKeys.get();
Set<String> newSvcLoaderCache = null;
// fetch items
for (PrefetchRoleTokenScheduledItem item : toFetch) {
// create ZTS Client for this particular item
try (ZTSClient itemZtsClient = getZTSClient(item)) {
// use the zts client if one was given however we need
// reset back to the original client so we don't close
// our given client
ZTSRDLGeneratedClient savedZtsClient = itemZtsClient.ztsClient;
if (item.ztsClient != null) {
itemZtsClient.ztsClient = item.ztsClient;
}
if (item.isRoleToken()) {
// check if this came from service provider
String key = itemZtsClient.getRoleTokenCacheKey(item.domainName, item.roleName,
item.proxyForPrincipal);
if (oldSvcLoaderCache.contains(key)) {
// if haven't gotten the new list of service
// loader tokens then get it now
if (newSvcLoaderCache == null) {
newSvcLoaderCache = loadSvcProviderTokens();
}
// check if the key is in the new key set
// - if not, mark the item as invalid
if (!newSvcLoaderCache.contains(key)) {
item.isInvalid(true);
}
} else {
RoleToken token = itemZtsClient.getRoleToken(item.domainName, item.roleName,
item.minDuration, item.maxDuration, true, item.proxyForPrincipal);
// update the expire time
item.expiresAtUTC(token.getExpiryTime());
}
} else {
AWSTemporaryCredentials awsCred = itemZtsClient.getAWSTemporaryCredentials(item.domainName,
item.roleName, item.externalId, item.minDuration, item.maxDuration, true);
item.expiresAtUTC(awsCred.getExpiration().millis() / 1000);
}
// don't forget to restore the original client if case
// we had overridden with the caller specified client
itemZtsClient.ztsClient = savedZtsClient;
} catch (Exception ex) {
// any exception should remove this item from fetch queue
item.isInvalid(true);
PREFETCH_SCHEDULED_ITEMS.remove(item);
LOG.error("RolePrefetchTask: Error while trying to prefetch token", ex);
}
}
// remove all invalid items.
toFetch.removeIf(p -> p.isInvalid);
// now, add items back.
if (!toFetch.isEmpty()) {
synchronized (PREFETCH_SCHEDULED_ITEMS) {
// make sure there are no items of common
PREFETCH_SCHEDULED_ITEMS.removeAll(toFetch);
// add them back
PREFETCH_SCHEDULED_ITEMS.addAll(toFetch);
}
}
}
}
}
// method useful for test purposes only
int getScheduledItemsSize() {
synchronized (PREFETCH_SCHEDULED_ITEMS) {
// ConcurrentLinkedQueue.size() method is typically not very useful in concurrent applications
return PREFETCH_SCHEDULED_ITEMS.size();
}
}
/**
* Pre-fetches role tokens so that the client does not take the hit of
* contacting ZTS Server for its first request (avg ~75ms). The client
* library will automatically try to keep the cache up to date such
* that the tokens are never expired and regular getRoleToken requests
* are fulfilled from the cache instead of contacting ZTS Server.
* @param domainName name of the domain
* @param roleName (optional) only interested in roles with this name
* @param minExpiryTime (optional) specifies that the returned RoleToken must be
* at least valid (min/lower bound) for specified number of seconds,
* @param maxExpiryTime (optional) specifies that the returned RoleToken must be
* at most valid (max/upper bound) for specified number of seconds.
* @return true if all is well, else false
*/
boolean prefetchRoleToken(String domainName, String roleName,
Integer minExpiryTime, Integer maxExpiryTime) {
return prefetchRoleToken(domainName, roleName, minExpiryTime, maxExpiryTime, null);
}
/**
* Pre-fetches role tokens so that the client does not take the hit of
* contacting ZTS Server for its first request (avg ~75ms). The client
* library will automatically try to keep the cache up to date such
* that the tokens are never expired and regular getRoleToken requests
* are fulfilled from the cache instead of contacting ZTS Server.
* @param domainName name of the domain
* @param roleName (optional) only interested in roles with this name
* @param minExpiryTime (optional) specifies that the returned RoleToken must be
* at least valid (min/lower bound) for specified number of seconds,
* @param maxExpiryTime (optional) specifies that the returned RoleToken must be
* at most valid (max/upper bound) for specified number of seconds.
* @param proxyForPrincipal (optional) request is proxy for this principal
* @return true if all is well, else false
*/
boolean prefetchRoleToken(String domainName, String roleName,
Integer minExpiryTime, Integer maxExpiryTime, String proxyForPrincipal) {
if (domainName == null || domainName.trim().isEmpty()) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, "Domain Name cannot be empty");
}
RoleToken token = getRoleToken(domainName, roleName, minExpiryTime, maxExpiryTime,
true, proxyForPrincipal);
if (token == null) {
LOG.error("PrefetchToken: No token fetchable using domain={}, roleSuffix={}",
domainName, roleName);
return false;
}
long expiryTimeUTC = token.getExpiryTime();
return prefetchToken(domainName, roleName, minExpiryTime, maxExpiryTime,
proxyForPrincipal, null, expiryTimeUTC, true);
}
boolean prefetchAwsCreds(String domainName, String roleName, String externalId,
Integer minExpiryTime, Integer maxExpiryTime) {
if (domainName == null || domainName.trim().isEmpty()) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, "Domain Name cannot be empty");
}
AWSTemporaryCredentials awsCred = getAWSTemporaryCredentials(domainName, roleName,
externalId, minExpiryTime, maxExpiryTime, true);
if (awsCred == null) {
LOG.error("PrefetchToken: No aws credential fetchable using domain={}, roleName={}",
domainName, roleName);
return false;
}
long expiryTimeUTC = awsCred.getExpiration().millis() / 1000;
return prefetchToken(domainName, roleName, minExpiryTime, maxExpiryTime, null,
externalId, expiryTimeUTC, false);
}
boolean prefetchToken(String domainName, String roleName, Integer minExpiryTime,
Integer maxExpiryTime, String proxyForPrincipal, String externalId,
long expiryTimeUTC, boolean isRoleToken) {
// if we're given a ssl context then we don't have domain/service
// settings configured otherwise those are required
if (sslContext == null) {
if (domain == null || domain.isEmpty() || service == null || service.isEmpty()) {
if (LOG.isWarnEnabled()) {
LOG.warn("PrefetchToken: setup failure. Both domain({}) and service({}) are required",
domain, service);
}
return false;
}
}
PrefetchRoleTokenScheduledItem item = new PrefetchRoleTokenScheduledItem()
.isRoleToken(isRoleToken)
.domainName(domainName)
.roleName(roleName)
.proxyForPrincipal(proxyForPrincipal)
.externalId(externalId)
.minDuration(minExpiryTime)
.maxDuration(maxExpiryTime)
.expiresAtUTC(expiryTimeUTC)
.identityDomain(domain)
.identityName(service)
.tokenMinExpiryTime(ZTSClient.tokenMinExpiryTime)
.providedZTSUrl(this.ztsUrl)
.siaIdentityProvider(siaProvider)
.sslContext(sslContext)
.proxyUrl(proxyUrl);
// include our zts client only if it was overriden by
// the caller (most likely for unit test mock)
if (ztsClientOverride) {
item.ztsClient(this.ztsClient);
}
if (!PREFETCH_SCHEDULED_ITEMS.contains(item)) {
PREFETCH_SCHEDULED_ITEMS.add(item);
} else {
// contains item based on these 6 fields:
// domainName identityDomain identityName suffix trustDomain isRoleToken
//
// So need to remove and append since the new token expiry has changed
// .expiresAtUTC(token.getExpiryTime())
//
PREFETCH_SCHEDULED_ITEMS.remove(item);
PREFETCH_SCHEDULED_ITEMS.add(item);
}
startPrefetch();
return true;
}
String getAccessTokenCacheKey(String domainName, List<String> roleNames, String idTokenServiceName) {
// if we don't have a tenant domain specified but we have a ssl context
// then we're going to use the hash code for our sslcontext as the
// value for our tenant
String tenantDomain = domain;
if (domain == null && sslContext != null) {
tenantDomain = sslContext.toString();
}
return getAccessTokenCacheKey(tenantDomain, service, domainName, roleNames, idTokenServiceName);
}
String getAccessTokenCacheKey(String tenantDomain, String tenantService, String domainName,
List<String> roleNames, String idTokenServiceName) {
// before we generate a cache key we need to have a valid domain
if (tenantDomain == null) {
return null;
}
StringBuilder cacheKey = new StringBuilder(256);
cacheKey.append("p=");
cacheKey.append(tenantDomain);
if (tenantService != null) {
cacheKey.append(".").append(tenantService);
}
cacheKey.append(";d=");
cacheKey.append(domainName);
if (roleNames != null && !roleNames.isEmpty()) {
cacheKey.append(";r=");
cacheKey.append(ZTSClient.multipleRoleKey(roleNames));
}
if (idTokenServiceName != null && !idTokenServiceName.isEmpty()) {
cacheKey.append(";o=");
cacheKey.append(idTokenServiceName);
}
return cacheKey.toString();
}
String getRoleTokenCacheKey(String domainName, String roleName, String proxyForPrincipal) {
// if we don't have a tenant domain specified but we have a ssl context
// then we're going to use the hash code for our sslcontext as the
// value for our tenant
String tenantDomain = domain;
if (domain == null && sslContext != null) {
tenantDomain = sslContext.toString();
}
return getRoleTokenCacheKey(tenantDomain, service, domainName, roleName, proxyForPrincipal);
}
static String getRoleTokenCacheKey(String tenantDomain, String tenantService, String domainName,
String roleName, String proxyForPrincipal) {
// before we generate a cache key we need to have a valid domain
if (tenantDomain == null) {
return null;
}
StringBuilder cacheKey = new StringBuilder(256);
cacheKey.append("p=");
cacheKey.append(tenantDomain);
if (tenantService != null) {
cacheKey.append(".").append(tenantService);
}
cacheKey.append(";d=");
cacheKey.append(domainName);
if (roleName != null && !roleName.isEmpty()) {
cacheKey.append(";r=");
// check to see if we have multiple roles in the values
// in which case we need to sort the values
if (roleName.indexOf(',') == -1) {
cacheKey.append(roleName);
} else {
List<String> roles = Arrays.asList(roleName.split(","));
cacheKey.append(ZTSClient.multipleRoleKey(roles));
}
}
if (proxyForPrincipal != null && !proxyForPrincipal.isEmpty()) {
cacheKey.append(";u=");
cacheKey.append(proxyForPrincipal);
}
return cacheKey.toString();
}
static boolean isExpiredToken(long expiryTime, Integer minExpiryTime, Integer maxExpiryTime,
int tokenMinExpiryTime) {
// we'll first make sure if we're given both min and max expiry
// times then both conditions are satisfied
if (minExpiryTime != null && expiryTime < minExpiryTime) {
return true;
}
if (maxExpiryTime != null && expiryTime > maxExpiryTime) {
return true;
}
// if both limits were null then we need to make sure
// that our token is valid for based on our min configured value
return minExpiryTime == null && maxExpiryTime == null && expiryTime < tokenMinExpiryTime;
}
RoleToken lookupRoleTokenInCache(String cacheKey, Integer minExpiryTime, Integer maxExpiryTime, int serverMinExpiryTime) {
RoleToken roleToken = ROLE_TOKEN_CACHE.get(cacheKey);
if (roleToken == null) {
if (LOG.isInfoEnabled()) {
LOG.info("LookupRoleTokenInCache: cache-lookup key: {} result: not found", cacheKey);
}
return null;
}
// before returning our cache hit we need to make sure it
// satisfies the time requirements as specified by the client
long expiryTime = roleToken.getExpiryTime() - (System.currentTimeMillis() / 1000);
if (isExpiredToken(expiryTime, minExpiryTime, maxExpiryTime, serverMinExpiryTime)) {
if (LOG.isInfoEnabled()) {
LOG.info("LookupRoleTokenInCache: role-cache-lookup key: {} token-expiry: {}"
+ " req-min-expiry: {} req-max-expiry: {} client-min-expiry: {} result: expired",
cacheKey, expiryTime, minExpiryTime, maxExpiryTime, serverMinExpiryTime);
}
// if the token is completely expired then we'll remove it from the cache
if (expiryTime < 1) {
ROLE_TOKEN_CACHE.remove(cacheKey);
}
return null;
}
return roleToken;
}
AccessTokenResponse lookupAccessTokenResponseInCache(String cacheKey, long expiryTime) {
AccessTokenResponseCacheEntry accessTokenResponseCacheEntry = ACCESS_TOKEN_CACHE.get(cacheKey);
if (accessTokenResponseCacheEntry == null) {
if (LOG.isInfoEnabled()) {
LOG.info("LookupAccessTokenResponseInCache: cache-lookup key: {} result: not found", cacheKey);
}
return null;
}
// before returning our cache hit we need to make sure it
// it was at least 1/4th time left before the token expires
// if the expiryTime is -1 then we return the token as
// long as its not expired
if (accessTokenResponseCacheEntry.isExpired(expiryTime)) {
if (accessTokenResponseCacheEntry.isExpired(-1)) {
ACCESS_TOKEN_CACHE.remove(cacheKey);
}
return null;
}
return accessTokenResponseCacheEntry.accessTokenResponse();
}
AWSTemporaryCredentials lookupAwsCredInCache(String cacheKey, Integer minExpiryTime,
Integer maxExpiryTime) {
AWSTemporaryCredentials awsCred = AWS_CREDS_CACHE.get(cacheKey);
if (awsCred == null) {
if (LOG.isInfoEnabled()) {
LOG.info("LookupAwsCredInCache: aws-cache-lookup key: {} result: not found", cacheKey);
}
return null;
}
// before returning our cache hit we need to make sure it
// satisfies the time requirements as specified by the client
long expiryTime = awsCred.getExpiration().millis() - System.currentTimeMillis();
expiryTime /= 1000; // expiry time is in seconds
if (isExpiredToken(expiryTime, minExpiryTime, maxExpiryTime, tokenMinExpiryTime)) {
if (LOG.isInfoEnabled()) {
LOG.info("LookupAwsCredInCache: aws-cache-lookup key: {} token-expiry: {}"
+ " req-min-expiry: {} req-max-expiry: {} client-min-expiry: {} result: expired",
cacheKey, expiryTime, minExpiryTime, maxExpiryTime, tokenMinExpiryTime);
}
AWS_CREDS_CACHE.remove(cacheKey);
return null;
}
return awsCred;
}
/**
* Retrieve the list of roles that the given principal has access to in the domain
* @param domainName name of the domain
* @param principal name of the principal
* @return RoleAccess object on success. ZTSClientException will be thrown in case of failure
*/
public RoleAccess getRoleAccess(String domainName, String principal) {
updateServicePrincipal();
try {
return ztsClient.getRoleAccess(domainName, principal);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getMessage());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* Retrieve the specified service object from a domain
* @param domainName name of the domain
* @param serviceName name of the service to be retrieved
* @return ServiceIdentity object on success. ZTSClientException will be thrown in case of failure
*/
public ServiceIdentity getServiceIdentity(String domainName, String serviceName) {
updateServicePrincipal();
try {
return ztsClient.getServiceIdentity(domainName, serviceName);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* Retrieve the specified public key from the given service object
* @param domainName name of the domain
* @param serviceName name of the service
* @param keyId the identifier of the public key to be retrieved
* @return PublicKeyEntry object or ZTSClientException will be thrown in case of failure
*/
public PublicKeyEntry getPublicKeyEntry(String domainName, String serviceName, String keyId) {
try {
return ztsClient.getPublicKeyEntry(domainName, serviceName, keyId);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* Retrieve the full list of services defined in a domain
* @param domainName name of the domain
* @return list of all service names on success. ZTSClientException will be thrown in case of failure
*/
public ServiceIdentityList getServiceIdentityList(String domainName) {
updateServicePrincipal();
try {
return ztsClient.getServiceIdentityList(domainName);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* For a given provider domain get a list of tenant domain names that the user is a member of
* @param providerDomainName name of the provider domain
* @param userName is the name of the user to search for in the tenant domains of the provider
* @param roleName is the name of the role to filter on when searching through the list of tenants with
* the specified role name.
* @param serviceName is the name of the service to filter on that the tenant has on-boarded to
* @return TenantDomains object which contains a list of tenant domain names for a given provider
* domain, that the user is a member of
*/
public TenantDomains getTenantDomains(String providerDomainName, String userName,
String roleName, String serviceName) {
updateServicePrincipal();
try {
return ztsClient.getTenantDomains(providerDomainName, userName, roleName, serviceName);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* Request by a service to refresh its NToken. The original NToken must have been
* obtained by an authorized service by calling the postInstanceTenantRequest
* method.
* @param domain Name of the domain
* @param service Name of the service
* @param req InstanceRefreshRequest object for th request
* @return Identity object that includes a refreshed NToken for the service
*/
public Identity postInstanceRefreshRequest(String domain, String service, InstanceRefreshRequest req) {
updateServicePrincipal();
try {
return ztsClient.postInstanceRefreshRequest(domain, service, req);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* For AWS Lambda functions generate a new private key, request a
* x.509 certificate based on the requested CSR and return both to
* the client in order to establish tls connections with other
* Athenz enabled services.
* @param domainName name of the domain
* @param serviceName name of the service
* @param account AWS account name that the function runs in
* @param provider name of the provider service for AWS Lambda
* @return AWSLambdaIdentity with private key and certificate
*/
public AWSLambdaIdentity getAWSLambdaServiceCertificate(String domainName,
String serviceName, String account, String provider) {
if (domainName == null || serviceName == null) {
throw new IllegalArgumentException("Domain and Service must be specified");
}
if (account == null || provider == null) {
throw new IllegalArgumentException("AWS Account and Provider must be specified");
}
if (x509CsrDomain == null) {
throw new IllegalArgumentException("X509 CSR Domain must be specified");
}
// first we're going to generate a private key for the request
AWSLambdaIdentity lambdaIdentity = new AWSLambdaIdentity();
try {
lambdaIdentity.setPrivateKey(Crypto.generateRSAPrivateKey(2048));
} catch (CryptoException ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
// we need to generate an csr with an instance register object
InstanceRegisterInformation info = new InstanceRegisterInformation();
info.setDomain(domainName.toLowerCase());
info.setService(serviceName.toLowerCase());
info.setProvider(provider.toLowerCase());
final String athenzService = info.getDomain() + "." + info.getService();
// generate our dn which will be based on our service name
StringBuilder dnBuilder = new StringBuilder(128);
dnBuilder.append("cn=");
dnBuilder.append(athenzService);
if (x509CsrDn != null) {
dnBuilder.append(',');
dnBuilder.append(x509CsrDn);
}
// now let's generate our dsnName field based on our principal's details
GeneralName[] sanArray = new GeneralName[2];
final String hostBuilder = info.getService() + '.' + info.getDomain().replace('.', '-') +
'.' + x509CsrDomain;
sanArray[0] = new GeneralName(GeneralName.dNSName, new DERIA5String(hostBuilder));
final String instanceHostBuilder = "lambda-" + account + '-' + info.getService() +
".instanceid.athenz." + x509CsrDomain;
sanArray[1] = new GeneralName(GeneralName.dNSName, new DERIA5String(instanceHostBuilder));
// next generate the csr based on our private key and data
try {
info.setCsr(Crypto.generateX509CSR(lambdaIdentity.getPrivateKey(),
dnBuilder.toString(), sanArray));
} catch (OperatorCreationException | IOException ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
// finally obtain attestation data for lambda
info.setAttestationData(getAWSLambdaAttestationData(athenzService, account));
// request the x.509 certificate from zts server
Map<String, List<String>> responseHeaders = new HashMap<>();
InstanceIdentity identity = postInstanceRegisterInformation(info, responseHeaders);
try {
lambdaIdentity.setX509Certificate(Crypto.loadX509Certificate(identity.getX509Certificate()));
} catch (CryptoException ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
lambdaIdentity.setCaCertificates(identity.getX509CertificateSigner());
return lambdaIdentity;
}
String getAWSLambdaAttestationData(final String athenzService, final String account) {
AWSAttestationData data = new AWSAttestationData();
data.setRole(athenzService);
Credentials awsCreds = assumeAWSRole(account, athenzService);
data.setAccess(awsCreds.getAccessKeyId());
data.setSecret(awsCreds.getSecretAccessKey());
data.setToken(awsCreds.getSessionToken());
ObjectMapper mapper = new ObjectMapper();
String jsonData = null;
try {
jsonData = mapper.writeValueAsString(data);
} catch (JsonProcessingException ex) {
LOG.error("Unable to generate attestation json data: {}", ex.getMessage());
}
return jsonData;
}
AssumeRoleRequest getAssumeRoleRequest(String account, String roleName) {
// assume the target role to get the credentials for the client
// aws format is arn:aws:iam::<account-id>:role/<role-name>
final String arn = "arn:aws:iam::" + account + ":role/" + roleName;
AssumeRoleRequest req = new AssumeRoleRequest();
req.setRoleArn(arn);
req.setRoleSessionName(roleName);
return req;
}
Credentials assumeAWSRole(String account, String roleName) {
try {
AssumeRoleRequest req = getAssumeRoleRequest(account, roleName);
return AWSSecurityTokenServiceClientBuilder.defaultClient().assumeRole(req).getCredentials();
} catch (Exception ex) {
LOG.error("assumeAWSRole - unable to assume role: {}", ex.getMessage());
return null;
}
}
/**
* AWSCredential Provider provides AWS Credentials which the caller can
* use to authorize an AWS request. It automatically refreshes the credentials
* when the current credentials become invalid.
* It uses ZTS client to refresh the AWS Credentials. So the ZTS Client must
* not be closed while the credential provider is being used.
* The caller should close the client when the provider is no longer required.
* For a given domain and role return AWS temporary credential provider
* @param domainName name of the domain
* @param roleName is the name of the role
* @return AWSCredentialsProvider AWS credential provider
*/
public AWSCredentialsProvider getAWSCredentialProvider(String domainName, String roleName) {
return new AWSCredentialsProviderImpl(this, domainName, roleName);
}
/**
* AWSCredential Provider provides AWS Credentials which the caller can
* use to authorize an AWS request. It automatically refreshes the credentials
* when the current credentials become invalid.
* It uses ZTS client to refresh the AWS Credentials. So the ZTS Client must
* not be closed while the credential provider is being used.
* The caller should close the client when the provider is no longer required.
* For a given domain and role return AWS temporary credential provider
* @param domainName name of the domain
* @param roleName is the name of the role
* @param externalId (optional) external id to satisfy configured assume role condition
* @param minExpiryTime (optional) specifies that the returned RoleToken must be
* at least valid (min/lower bound) for specified number of seconds,
* @param maxExpiryTime (optional) specifies that the returned RoleToken must be
* at most valid (max/upper bound) for specified number of seconds.
* @return AWSCredentialsProvider AWS credential provider
*/
public AWSCredentialsProvider getAWSCredentialProvider(String domainName, String roleName,
String externalId, Integer minExpiryTime, Integer maxExpiryTime) {
return new AWSCredentialsProviderImpl(this, domainName, roleName, externalId,
minExpiryTime, maxExpiryTime);
}
/**
* For a given domain and role return AWS temporary credentials
*
* @param domainName name of the domain
* @param roleName is the name of the role
* @return AWSTemporaryCredentials AWS credentials
*/
public AWSTemporaryCredentials getAWSTemporaryCredentials(String domainName, String roleName) {
return getAWSTemporaryCredentials(domainName, roleName, null, null, null, false);
}
public AWSTemporaryCredentials getAWSTemporaryCredentials(String domainName, String roleName,
boolean ignoreCache) {
return getAWSTemporaryCredentials(domainName, roleName, null, null, null, ignoreCache);
}
/**
* For a given domain and role return AWS temporary credentials
*
* @param domainName name of the domain
* @param roleName is the name of the role
* @param minExpiryTime (optional) specifies that the returned RoleToken must be
* at least valid (min/lower bound) for specified number of seconds,
* @param maxExpiryTime (optional) specifies that the returned RoleToken must be
* at most valid (max/upper bound) for specified number of seconds.
* @param externalId (optional) external id to satisfy configured assume role condition
* @return AWSTemporaryCredentials AWS credentials
*/
public AWSTemporaryCredentials getAWSTemporaryCredentials(String domainName, String roleName,
String externalId, Integer minExpiryTime, Integer maxExpiryTime) {
return getAWSTemporaryCredentials(domainName, roleName, externalId,
minExpiryTime, maxExpiryTime, false);
}
public AWSTemporaryCredentials getAWSTemporaryCredentials(String domainName, String roleName,
String externalId, Integer minExpiryTime, Integer maxExpiryTime, boolean ignoreCache) {
// since our aws role name can contain the path element thus /'s
// we need to encode the value and use that instead
try {
roleName = URLEncoder.encode(roleName, "UTF-8");
} catch (UnsupportedEncodingException ex) {
LOG.error("Unable to encode {} - error {}", roleName, ex.getMessage());
}
// first lookup in our cache to see if it can be satisfied
// only if we're not asked to ignore the cache
AWSTemporaryCredentials awsCred;
String cacheKey = getRoleTokenCacheKey(domainName, roleName, null);
if (cacheKey != null && !ignoreCache) {
awsCred = lookupAwsCredInCache(cacheKey, minExpiryTime, maxExpiryTime);
if (awsCred != null) {
return awsCred;
}
// start prefetch for this token if prefetch is enabled
if (enablePrefetch && prefetchAutoEnable) {
if (prefetchAwsCreds(domainName, roleName, externalId, minExpiryTime, maxExpiryTime)) {
awsCred = lookupAwsCredInCache(cacheKey, minExpiryTime, maxExpiryTime);
}
if (awsCred != null) {
return awsCred;
}
LOG.error("GetAWSTemporaryCredentials: cache prefetch and lookup error");
}
}
// if no hit then we need to request a new token from ZTS
updateServicePrincipal();
try {
awsCred = ztsClient.getAWSTemporaryCredentials(domainName, roleName,
maxExpiryTime, externalId);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
// need to add the token to our cache. If our principal was
// updated then we need to retrieve a new cache key
if (awsCred != null) {
if (cacheKey == null) {
cacheKey = getRoleTokenCacheKey(domainName, roleName, null);
}
if (cacheKey != null) {
AWS_CREDS_CACHE.put(cacheKey, awsCred);
}
}
return awsCred;
}
/**
* Retrieve the list of all policies (not just names) from the ZTS Server that
* is signed with both ZTS's and ZMS's private keys. It will pass an option matchingTag
* so that ZTS can skip returning signed policies if no changes have taken
* place since that tag was issued.
* @param domainName name of the domain
* @param matchingTag name of the tag issued with last request
* @param responseHeaders contains the "tag" returned for modification
* time of the policies, map key = "tag", List should contain a single value
* @return list of policies signed by ZTS Server. ZTSClientException will be thrown in case of failure
*/
public DomainSignedPolicyData getDomainSignedPolicyData(String domainName, String matchingTag,
Map<String, List<String>> responseHeaders) {
try {
return ztsClient.getDomainSignedPolicyData(domainName, matchingTag, responseHeaders);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* Verify if the given principal has access to the specified role in the
* domain or not.
* @param domainName name of the domain
* @param roleName name of the role
* @param principal name of the principal to check for
* @return Access object with grant true/false response. ZTSClientException will be thrown in case of failure
*/
public Access getAccess(String domainName, String roleName, String principal) {
updateServicePrincipal();
try {
return ztsClient.getAccess(domainName, roleName, principal);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* Requests the ZTS to indicate whether or not the specific request for the
* specified resource with authentication details will be granted or not.
* @param action value of the action to be carried out (e.g. "UPDATE", "DELETE")
* @param resource resource YRN. YRN is defined as {ServiceName})?:({LocationName})?:)?{ResourceName}"
* @param trustDomain (optional) if the access checks involves cross domain check only
* check the specified trusted domain and ignore all others
* @param principal (optional) carry out the access check for specified principal
* @return ResourceAccess object indicating whether or not the request will be granted or not
*/
public ResourceAccess getResourceAccess(String action, String resource, String trustDomain, String principal) {
updateServicePrincipal();
try {
return ztsClient.getResourceAccess(action, resource, trustDomain, principal);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getMessage());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* Requests the ZTS to indicate whether or not the specific request for the
* specified resource with authentication details will be granted or not.
* @param action value of the action to be carried out (e.g. "UPDATE", "DELETE")
* @param resource resource YRN. YRN is defined as {ServiceName})?:({LocationName})?:)?{ResourceName}"
* @param trustDomain (optional) if the access checks involves cross domain check only
* check the specified trusted domain and ignore all others
* @param principal (optional) carry out the access check for specified principal
* @return ResourceAccess object indicating whether or not the request will be granted or not
*/
public ResourceAccess getResourceAccessExt(String action, String resource, String trustDomain, String principal) {
updateServicePrincipal();
try {
return ztsClient.getResourceAccessExt(action, resource, trustDomain, principal);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getMessage());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* Caller may post set of domain metric attributes for monitoring and logging.
* ZTSClientException will be thrown in case of failure
* @param domainName name of the domain
* @param req list of domain metrics with their values
*/
public void postDomainMetrics(String domainName, DomainMetrics req) {
updateServicePrincipal();
try {
ztsClient.postDomainMetrics(domainName, req);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* Request by an instance to register itself based on its provider
* attestation.
* @param info InstanceRegisterInformation object for the request
* @param responseHeaders contains the "location" returned for post refresh requests
* List should contain a single value
* @return InstanceIdentity object that includes a x509 certificate for the service
*/
public InstanceIdentity postInstanceRegisterInformation(InstanceRegisterInformation info,
Map<String, List<String>> responseHeaders) {
updateServicePrincipal();
try {
return ztsClient.postInstanceRegisterInformation(info, responseHeaders);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* Request by an instance to refresh its certificate. The instance must
* authenticate itself using the certificate it has received from the
* postInstanceRegisterInformation call.
* @param provider Provider Service name
* @param domain instance domain name
* @param service instance service name
* @param instanceId instance id as provided in the CSR
* @param info InstanceRegisterInformation object for the request
* @return InstanceIdentity object that includes a x509 certificate for the service
*/
public InstanceIdentity postInstanceRefreshInformation(String provider, String domain,
String service, String instanceId, InstanceRefreshInformation info) {
updateServicePrincipal();
try {
return ztsClient.postInstanceRefreshInformation(provider, domain, service, instanceId, info);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
/**
* Revoke an instance from refreshing its certificates.
* @param provider Provider Service name
* @param domain instance domain name
* @param service instance service name
* @param instanceId instance id as provided in the CSR
*/
public void deleteInstanceIdentity(String provider, String domain,
String service, String instanceId) {
updateServicePrincipal();
try {
ztsClient.deleteInstanceIdentity(provider, domain, service, instanceId);
} catch (ResourceException ex) {
throw new ZTSClientException(ex.getCode(), ex.getData());
} catch (Exception ex) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, ex.getMessage());
}
}
static class PrefetchRoleTokenScheduledItem {
boolean isRoleToken = true;
PrefetchRoleTokenScheduledItem isRoleToken(boolean isRole) {
isRoleToken = isRole;
return this;
}
boolean isRoleToken() {
return isRoleToken;
}
String providedZTSUrl;
PrefetchRoleTokenScheduledItem providedZTSUrl(String u) {
providedZTSUrl = u;
return this;
}
ServiceIdentityProvider siaProvider;
PrefetchRoleTokenScheduledItem siaIdentityProvider(ServiceIdentityProvider s) {
siaProvider = s;
return this;
}
ZTSRDLGeneratedClient ztsClient;
PrefetchRoleTokenScheduledItem ztsClient(ZTSRDLGeneratedClient z) {
ztsClient = z;
return this;
}
boolean isInvalid = false;
PrefetchRoleTokenScheduledItem isInvalid(boolean invalid) {
isInvalid = invalid;
return this;
}
String identityDomain;
PrefetchRoleTokenScheduledItem identityDomain(String d) {
identityDomain = d;
return this;
}
String identityName;
PrefetchRoleTokenScheduledItem identityName(String d) {
identityName = d;
return this;
}
String domainName;
PrefetchRoleTokenScheduledItem domainName(String d) {
domainName = d;
return this;
}
String roleName;
PrefetchRoleTokenScheduledItem roleName(String s) {
roleName = s;
return this;
}
String proxyForPrincipal;
PrefetchRoleTokenScheduledItem proxyForPrincipal(String u) {
proxyForPrincipal = u;
return this;
}
String externalId;
PrefetchRoleTokenScheduledItem externalId(String id) {
externalId = id;
return this;
}
Integer minDuration;
PrefetchRoleTokenScheduledItem minDuration(Integer min) {
minDuration = min;
return this;
}
Integer maxDuration;
PrefetchRoleTokenScheduledItem maxDuration(Integer max) {
maxDuration = max;
return this;
}
long expiresAtUTC;
PrefetchRoleTokenScheduledItem expiresAtUTC(long e) {
expiresAtUTC = e;
return this;
}
int tokenMinExpiryTime;
PrefetchRoleTokenScheduledItem tokenMinExpiryTime(int t) {
tokenMinExpiryTime = t;
return this;
}
SSLContext sslContext;
PrefetchRoleTokenScheduledItem sslContext(SSLContext ctx) {
sslContext = ctx;
return this;
}
String proxyUrl;
PrefetchRoleTokenScheduledItem proxyUrl(String url) {
proxyUrl = url;
return this;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((domainName == null) ? 0 : domainName.hashCode());
result = prime * result + ((identityDomain == null) ? 0 : identityDomain.hashCode());
result = prime * result + ((identityName == null) ? 0 : identityName.hashCode());
result = prime * result + ((roleName == null) ? 0 : roleName.hashCode());
result = prime * result + ((proxyForPrincipal == null) ? 0 : proxyForPrincipal.hashCode());
result = prime * result + ((externalId == null) ? 0 : externalId.hashCode());
result = prime * result + ((sslContext == null) ? 0 : sslContext.hashCode());
result = prime * result + ((proxyUrl == null) ? 0 : proxyUrl.hashCode());
result = prime * result + Boolean.hashCode(isRoleToken);
result = prime * result + Boolean.hashCode(isInvalid);
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
PrefetchRoleTokenScheduledItem other = (PrefetchRoleTokenScheduledItem) obj;
if (domainName == null) {
if (other.domainName != null) {
return false;
}
} else if (!domainName.equals(other.domainName)) {
return false;
}
if (identityDomain == null) {
if (other.identityDomain != null) {
return false;
}
} else if (!identityDomain.equals(other.identityDomain)) {
return false;
}
if (identityName == null) {
if (other.identityName != null) {
return false;
}
} else if (!identityName.equals(other.identityName)) {
return false;
}
if (roleName == null) {
if (other.roleName != null) {
return false;
}
} else if (!roleName.equals(other.roleName)) {
return false;
}
if (proxyForPrincipal == null) {
if (other.proxyForPrincipal != null) {
return false;
}
} else if (!proxyForPrincipal.equals(other.proxyForPrincipal)) {
return false;
}
if (externalId == null) {
if (other.externalId != null) {
return false;
}
} else if (!externalId.equals(other.externalId)) {
return false;
}
if (isInvalid != other.isInvalid) {
return false;
}
if (isRoleToken != other.isRoleToken) {
return false;
}
if (sslContext == null) {
return other.sslContext == null;
} else {
return sslContext.equals(other.sslContext);
}
}
}
public class AWSHostNameVerifier implements HostnameVerifier {
String dnsHostname;
public AWSHostNameVerifier(String hostname) {
dnsHostname = hostname;
}
@Override
public boolean verify(String hostname, SSLSession session) {
Certificate[] certs = null;
try {
certs = session.getPeerCertificates();
} catch (SSLPeerUnverifiedException ignored) {
}
if (certs == null) {
return false;
}
for (Certificate cert : certs) {
try {
X509Certificate x509Cert = (X509Certificate) cert;
if (matchDnsHostname(x509Cert.getSubjectAlternativeNames())) {
return true;
}
} catch (CertificateParsingException ignored) {
}
}
return false;
}
boolean matchDnsHostname(Collection<List<?>> altNames) {
if (altNames == null) {
return false;
}
// GeneralName ::= CHOICE {
// otherName [0] OtherName,
// rfc822Name [1] IA5String,
// dNSName [2] IA5String,
// x400Address [3] ORAddress,
// directoryName [4] Name,
// ediPartyName [5] EDIPartyName,
// uniformResourceIdentifier [6] IA5String,
// iPAddress [7] OCTET STRING,
// registeredID [8] OBJECT IDENTIFIER}
for (@SuppressWarnings("rawtypes") List item : altNames) {
Integer type = (Integer) item.get(0);
if (type == 2) {
String dns = (String) item.get(1);
if (dnsHostname.equalsIgnoreCase(dns)) {
return true;
}
}
}
return false;
}
}
private static Set<String> loadSvcProviderTokens() {
ztsTokenProviders = ServiceLoader.load(ZTSClientService.class);
svcLoaderCacheKeys = new AtomicReference<>();
// if have service loader implementations, then stuff role tokens into cache
// and keep track of these tokens so that they will get refreshed from
// service loader and not zts server
Set<String> cacheKeySet = new HashSet<>();
for (ZTSClientService provider: ztsTokenProviders) {
Collection<ZTSClientService.RoleTokenDescriptor> descs = provider.loadTokens();
if (descs == null) {
if (LOG.isInfoEnabled()) {
LOG.info("loadSvcProviderTokens: provider didn't return tokens: prov={}", provider);
}
continue;
}
for (ZTSClientService.RoleTokenDescriptor desc: descs) {
if (desc.signedToken != null) {
// stuff token in cache and record service loader key
String key = cacheSvcProvRoleToken(desc);
if (key != null) {
cacheKeySet.add(key);
}
}
}
}
svcLoaderCacheKeys.set(cacheKeySet);
return cacheKeySet;
}
/**
* returns a cache key for the given list of roles.
* if the list of roles contains multiple entries
* then we have to sort the array first and then
* generate the key based on the sorted list since
* there is no guarantee what order the ZTS Server
* might return the list of roles
*
* @param roles list of role names
* @return cache key for the list
*/
static String multipleRoleKey(List<String> roles) {
// first check to make sure we have valid data
if (roles == null || roles.isEmpty()) {
return null;
}
// if we have a single role then that's the key
if (roles.size() == 1) {
return roles.get(0);
}
// if we have multiple roles, then we have to
// sort the values and then generate the key
Collections.sort(roles);
return String.join(",", roles);
}
/**
* stuff pre-loaded service token in cache. in this model an external
* service (proxy user) has retrieved the role tokens and added to the
* client cache so it can run without the need to contact zts server.
* in this model we're going to look at the principal field only and
* ignore the proxy field since the client doesn't need to know anything
* about that detail.
*
* start prefetch task to reload to prevent expiry
* return the cache key used
*/
static String cacheSvcProvRoleToken(ZTSClientService.RoleTokenDescriptor desc) {
if (cacheDisabled) {
return null;
}
com.yahoo.athenz.auth.token.RoleToken rt = new com.yahoo.athenz.auth.token.RoleToken(desc.getSignedToken());
String domainName = rt.getDomain();
String principalName = rt.getPrincipal();
boolean completeRoleSet = rt.getDomainCompleteRoleSet();
// if the role token was for a complete set then we're not going
// to use the rolename field (it indicates that the original request
// was completed without the rolename field being specified)
final String roleName = (completeRoleSet) ? null : multipleRoleKey(rt.getRoles());
// parse principalName for the tenant domain and service name
// we must have valid components otherwise we'll just
// ignore the token - you can't have a principal without
// valid domain and service names
int index = principalName.lastIndexOf('.'); // ex: cities.burbank.mysvc
if (index == -1) {
LOG.error("cacheSvcProvRoleToken: Invalid principal in token: {}", rt.getSignedToken());
return null;
}
final String tenantDomain = principalName.substring(0, index);
final String tenantService = principalName.substring(index + 1);
Long expiryTime = rt.getExpiryTime();
RoleToken roleToken = new RoleToken().setToken(desc.getSignedToken()).setExpiryTime(expiryTime);
String key = getRoleTokenCacheKey(tenantDomain, tenantService, domainName, roleName, null);
if (LOG.isInfoEnabled()) {
LOG.info("cacheSvcProvRoleToken: cache-add key: {} expiry: {}", key, expiryTime);
}
ROLE_TOKEN_CACHE.put(key, roleToken);
// setup prefetch task
Long expiryTimeUTC = roleToken.getExpiryTime();
prefetchSvcProvTokens(tenantDomain, tenantService, domainName,
roleName, null, null, expiryTimeUTC, null);
return key;
}
static void prefetchSvcProvTokens(String domain, String service, String domainName,
String roleName, Integer minExpiryTime, Integer maxExpiryTime,
Long expiryTimeUTC, String proxyForPrincipal) {
if (domainName == null || domainName.trim().isEmpty()) {
throw new ZTSClientException(ZTSClientException.BAD_REQUEST, "Domain Name cannot be empty");
}
PrefetchRoleTokenScheduledItem item = new PrefetchRoleTokenScheduledItem()
.isRoleToken(true)
.domainName(domainName)
.roleName(roleName)
.proxyForPrincipal(proxyForPrincipal)
.minDuration(minExpiryTime)
.maxDuration(maxExpiryTime)
.expiresAtUTC(expiryTimeUTC)
.identityDomain(domain)
.identityName(service)
.tokenMinExpiryTime(ZTSClient.tokenMinExpiryTime);
//noinspection RedundantCollectionOperation
if (PREFETCH_SCHEDULED_ITEMS.contains(item)) {
// contains item based on these 5 fields:
// domainName identityDomain identityName roleName proxyForProfile isRoleToken
//
// So need to remove and append since the new token expiry has changed
// .expiresAtUTC(token.getExpiryTime())
//
PREFETCH_SCHEDULED_ITEMS.remove(item);
}
PREFETCH_SCHEDULED_ITEMS.add(item);
startPrefetch();
}
static void startPrefetch() {
if (FETCH_TIMER != null) {
return;
}
synchronized (TIMER_LOCK) {
if (FETCH_TIMER == null) {
FETCH_TIMER = new Timer();
// check the fetch items every prefetchInterval seconds.
FETCH_TIMER.schedule(new RolePrefetchTask(), 0, prefetchInterval * 1000);
}
}
}
}
| [
"\"ROOT\""
]
| []
| [
"ROOT"
]
| [] | ["ROOT"] | java | 1 | 0 | |
utils.py | import os
import shutil
import time
import pprint
import torch
import torch.nn.functional as F
# data_split
root = './'
filename = 'RA_10wayRSI' # dataset folder name
filename_shot = 'shot_AR' # shot folder name
filename_query = 'query_AR' # query folder name
shot = 5
# train
def set_gpu(x):
os.environ['CUDA_VISIBLE_DEVICES'] = x
print('using gpu:', x)
def ensure_path(path):
if os.path.exists(path):
if input('{} exists, remove? ([y]/n)'.format(path)) != 'n':
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
def delete_path(path):
shutil.rmtree(path)
os.makedirs(path)
class Averager():
def __init__(self):
self.n = 0
self.v = 0
def add(self, x):
self.v = (self.v * self.n + x) / (self.n + 1)
self.n += 1
def item(self):
return self.v
class Averager_vector():
def __init__(self,num):
self.n = 0
self.v = torch.zeros(num)
def add(self, x):
self.v = (self.v * self.n + x) / (self.n + 1)
self.n += 1
def item(self):
return self.v
class Averager_matrix():
def __init__(self,M, N):
self.n = 0
self.v = torch.zeros(M,N)
def add(self, x):
self.v = (self.v * self.n + x) / (self.n + 1)
self.n += 1
def item(self):
return self.v
def count_acc(logits, label):
pred = torch.argmax(logits, dim=1)
return (pred == label).type(torch.cuda.FloatTensor).mean().item()
def dot_metric(a, b):
return torch.mm(a, b.t())
def euclidean_metric(a, b):
n = a.shape[0]
m = b.shape[0]
a = a.unsqueeze(1).expand(n, m, -1)
b = b.unsqueeze(0).expand(n, m, -1)
logits = -((a - b)**2).sum(dim=2)
return logits
def cos_metric(a, b):
n = a.shape[0]
m = b.shape[0]
a = a.unsqueeze(1).expand(n, m, -1)
b = b.unsqueeze(0).expand(n, m, -1)
ab = torch.mul(a,b)
ab = torch.sum(ab, dim=2)
a_norm = torch.norm(a,dim=2)
b_norm = torch.norm(b,dim=2)
ab_norm = torch.mul(a_norm,b_norm)
logits = ab/ab_norm
return logits
def K_euclidean_metric(a, b, k, shot):
n = a.shape[0]
m = b.shape[0]
a = a.unsqueeze(1).expand(n, m, -1)
b = b.unsqueeze(0).expand(n, m, -1)
logits = -((a - b)**2).sum(dim=2)
#logits_e = torch.exp(logits/100)
logits_e = logits
logits_zeros = torch.zeros_like(logits_e)
_, index = torch.topk(logits, k=k, dim=1, largest=True, sorted=False)
for num in range(logits_zeros.size(0)):
logits_zeros[num, index[num,:]] = 1
logits = torch.mul(logits_e, logits_zeros)
logits2 = logits.reshape(n,shot,-1).sum(dim=1)
return logits2
class Timer():
def __init__(self):
self.o = time.time()
def measure(self, p=1):
x = (time.time() - self.o) / p
x = int(x)
if x >= 3600:
return '{:.1f}h'.format(x / 3600)
if x >= 60:
return '{}m'.format(round(x / 60))
return '{}s'.format(x)
_utils_pp = pprint.PrettyPrinter()
def pprint(x):
_utils_pp.pprint(x)
def l2_loss(pred, label):
return ((pred - label)**2).sum() / len(pred) / 2
from sklearn.metrics import accuracy_score
NUM_CLASSES = 11
def cal_acc(gt_list, predict_list, num):
acc_sum = 0
for n in range(num):
y = []
pred_y = []
for i in range(len(gt_list)):
gt = gt_list[i]
predict = predict_list[i]
if gt == n:
y.append(gt)
pred_y.append(predict)
print ('{}: {:4f}'.format(n if n != (num - 1) else 'Unk', accuracy_score(y, pred_y)))
if n == (num - 1):
print ('Known Avg Acc: {:4f}'.format(acc_sum / (num - 1)))
acc_sum += accuracy_score(y, pred_y)
print ('Avg Acc: {:4f}'.format(acc_sum / num))
print ('Overall Acc : {:4f}'.format(accuracy_score(gt_list, predict_list)))
def cal_acc2(gt_list, predict_list, num):
acc_sum = 0
class_pred = torch.zeros(num)
for n in range(num):
y = []
pred_y = []
for i in range(len(gt_list)):
gt = gt_list[i]
predict = predict_list[i]
if gt == n:
y.append(gt)
pred_y.append(predict)
acc = accuracy_score(y, pred_y)
# print ('{}: {:4f}'.format(n if n != (num - 1) else 'Unk', acc))
# if n == (num - 1):
# print ('Known Avg Acc: {:4f}'.format(acc_sum / (num - 1)))
class_pred[n] = acc
# print ('Avg Acc: {:4f}'.format(acc_sum / num))
return class_pred
def find_index(a, b):
a_len = len(a)
index = []
for i in range(a_len):
if a[i] < b:
index.append(i)
return index
def find_p_less_index(pred, label_eta, pred_eta):
pred_label = pred.argmax(dim = 1)
pred_len = len(pred)
index = []
for i in range(pred_len):
position = pred_label[i]
if pred[i,position] > pred_eta and position < label_eta:
index.append(i)
return index
#Find the subscript in pred is greater than or equal to label_eta
def find_p_greater_index(pred, label_eta, pred_eta):
pred_label = pred.argmax(dim = 1)
pred_len = len(pred)
index = []
for i in range(pred_len):
position = pred_label[i]
if pred[i,position] > pred_eta and position >= label_eta:
index.append(i)
return index
#input: the probility of softmax, C*N
# label_eta: class boundary between common classes and private classes
#output: the index of common classes and private classes
def find_know_and_unknow(pred, label_eta):
index_common = []
index_peivate = []
pred_label = pred.argmax(dim = 1)
pred_len = len(pred)
for i in range(pred_len):
if pred_label[i] < label_eta:
index_common.append(i)
else:
index_peivate.append(i)
return index_common, index_peivate
if __name__=='__main__':
output = torch.randn(10,6)
pred = F.softmax(output,dim=1)
print(pred)
index_common, index_private = find_know_and_unknow(pred, 3)
print(index_common)
print(index_private) | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
demo_api/settings.py | """
Django settings for demo_api project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's#s-s3n5vo8+s@fu25vbul*h0x@2$qjep3nz^dj2dpu9*o$qlm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 1)))
ALLOWED_HOSTS = ['ec2-3-142-145-89.us-east-2.compute.amazonaws.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'demo_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'demo_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'demo_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'demo_app.UserProfile'
STATIC_ROOT = 'static/'
| []
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | python | 1 | 0 | |
bin/phishing_kit_tracker.py | #!/opt/splunk/bin/python
"""
Description: Tracking threat actor emails in phishing kits.
Source: https://github.com/neonprimetime/PhishingKitTracker
Instructions: None
Rate Limit: None
Results Limit: None
Notes: None
Debugger: open("/tmp/splunk_script.txt", "a").write("{}: <MSG>\n".format(<VAR>))
"""
import time
from collections import OrderedDict
import glob
import os
import shutil
import sys
import zipfile
app_home = "{}/etc/apps/OSweep".format(os.environ["SPLUNK_HOME"])
tp_modules = "{}/bin/_tp_modules".format(app_home)
sys.path.insert(0, tp_modules)
import validators
import commons
date = time.strftime("%Y-%m")
csv = "https://raw.githubusercontent.com/neonprimetime/PhishingKitTracker/master/{}_PhishingKitTracker.csv"
def get_project():
"""Download the project to /tmp"""
session = commons.create_session()
project = "https://github.com/neonprimetime/PhishingKitTracker/archive/master.zip"
resp = session.get(project, timeout=180)
if not (resp.status_code == 200 and resp.content != ""):
return
with open("/tmp/master.zip", "wb") as repo:
repo.write(resp.content)
repo_zip = zipfile.ZipFile("/tmp/master.zip", "r")
repo_zip.extractall("/tmp/")
repo_zip.close()
# Remove current files
for csv in glob.glob("/{}/etc/apps/OSweep/lookups/2*_PhishingKitTracker.csv".format(os.environ["SPLUNK_HOME"])):
os.remove(csv)
# Add new files
for csv in glob.glob("/tmp/PhishingKitTracker-master/2*_PhishingKitTracker.csv"):
shutil.move(csv, "/{}/etc/apps/OSweep/lookups".format(os.environ["SPLUNK_HOME"]))
os.remove("/tmp/master.zip")
shutil.rmtree("/tmp/PhishingKitTracker-master")
return
def get_feed():
"""Return the latest report summaries from the feed."""
session = commons.create_session()
data_feed = get_file(session)
if data_feed == None:
return
return data_feed
def get_file(session):
"""Return a list of tags."""
resp = session.get(csv.format(date), timeout=180)
if resp.status_code == 200 and resp.content != "":
return resp.content.splitlines()
return
def write_file(data_feed, file_path):
"""Write data to a file."""
if data_feed == None:
return
with open(file_path, "w") as open_file:
header = data_feed[0]
open_file.write("{}\n".format(header))
for data in data_feed[1:]:
open_file.write("{}\n".format(data.encode("UTF-8")))
return
if __name__ == "__main__":
if sys.argv[1].lower() == "feed":
data_feed = get_feed()
lookup_path = "{}/lookups".format(app_home)
file_path = "{}/{}_PhishingKitTracker.csv".format(lookup_path, date)
write_file(data_feed, file_path)
elif sys.argv[1].lower() == "git":
get_project()
| []
| []
| [
"SPLUNK_HOME"
]
| [] | ["SPLUNK_HOME"] | python | 1 | 0 | |
trac/util/__init__.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2006 Jonas Borgström <[email protected]>
# Copyright (C) 2006 Matthew Good <[email protected]>
# Copyright (C) 2005-2006 Christian Boos <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <[email protected]>
# Matthew Good <[email protected]>
from cStringIO import StringIO
import csv
import errno
import functools
import hashlib
import inspect
from itertools import izip, tee
import locale
import os
from pkg_resources import find_distributions
import random
import re
import shutil
import sys
import string
import struct
import tempfile
from urllib import quote, unquote, urlencode
from trac.util.compat import any, md5, sha1, sorted # Remove in 1.3.1
from trac.util.datefmt import time_now, to_datetime, to_timestamp, utc
from trac.util.text import exception_to_unicode, to_unicode, \
getpreferredencoding
def get_reporter_id(req, arg_name=None):
"""Get most informative "reporter" identity out of a request.
That's the `Request`'s authname if not 'anonymous', or a `Request`
argument, or the session name and e-mail, or only the name or only
the e-mail, or 'anonymous' as last resort.
:param req: a `trac.web.api.Request`
:param arg_name: if given, a `Request` argument which may contain
the id for non-authentified users
"""
if req.authname != 'anonymous':
return req.authname
if arg_name:
r = req.args.get(arg_name)
if r:
return r
name = req.session.get('name')
email = req.session.get('email')
if name and email:
return '%s <%s>' % (name, email)
return name or email or req.authname # == 'anonymous'
def content_disposition(type=None, filename=None):
"""Generate a properly escaped Content-Disposition header."""
type = type or ''
if filename is not None:
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
if type:
type += '; '
type += 'filename=' + quote(filename, safe='')
return type
# -- os utilities
if os.name == 'nt':
from getpass import getuser
else:
import pwd
def getuser():
"""Retrieve the identity of the process owner"""
try:
return pwd.getpwuid(os.geteuid())[0]
except KeyError:
return 'unknown'
try:
WindowsError = WindowsError
except NameError:
class WindowsError(OSError):
"""Dummy exception replacing WindowsError on non-Windows platforms"""
can_rename_open_file = False
if os.name == 'nt':
_rename = lambda src, dst: False
_rename_atomic = lambda src, dst: False
try:
import ctypes
MOVEFILE_REPLACE_EXISTING = 0x1
MOVEFILE_WRITE_THROUGH = 0x8
MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
if not isinstance(src, unicode):
src = unicode(src, sys.getfilesystemencoding())
if not isinstance(dst, unicode):
dst = unicode(dst, sys.getfilesystemencoding())
if _rename_atomic(src, dst):
return True
return MoveFileEx(src, dst, MOVEFILE_REPLACE_EXISTING
| MOVEFILE_WRITE_THROUGH)
CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = CreateTransaction(None, 0, 0, 0, 0, 10000, 'Trac rename')
if ta == -1:
return False
try:
return (MoveFileTransacted(src, dst, None, None,
MOVEFILE_REPLACE_EXISTING
| MOVEFILE_WRITE_THROUGH, ta)
and CommitTransaction(ta))
finally:
CloseHandle(ta)
except Exception:
pass
def rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError as e:
if e.errno != errno.EEXIST:
raise
old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
rename = os.rename
can_rename_open_file = True
class AtomicFile(object):
"""A file that appears atomically with its full content.
This file-like object writes to a temporary file in the same directory
as the final file. If the file is committed, the temporary file is renamed
atomically (on Unix, at least) to its final name. If it is rolled back,
the temporary file is removed.
"""
def __init__(self, path, mode='w', bufsize=-1):
self._file = None
self._path = os.path.realpath(path)
dir, name = os.path.split(self._path)
fd, self._temp = tempfile.mkstemp(prefix=name + '-', dir=dir)
self._file = os.fdopen(fd, mode, bufsize)
# Try to preserve permissions and group ownership, but failure
# should not be fatal
try:
st = os.stat(self._path)
if hasattr(os, 'chmod'):
os.chmod(self._temp, st.st_mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
os.chflags(self._temp, st.st_flags)
if hasattr(os, 'chown'):
os.chown(self._temp, -1, st.st_gid)
except OSError:
pass
def __getattr__(self, name):
return getattr(self._file, name)
def commit(self):
if self._file is None:
return
try:
f, self._file = self._file, None
f.close()
rename(self._temp, self._path)
except Exception:
os.unlink(self._temp)
raise
def rollback(self):
if self._file is None:
return
try:
f, self._file = self._file, None
f.close()
finally:
try:
os.unlink(self._temp)
except Exception:
pass
close = commit
__del__ = rollback
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
closed = property(lambda self: self._file is None or self._file.closed)
def read_file(path, mode='r'):
"""Read a file and return its content."""
with open(path, mode) as f:
return f.read()
def create_file(path, data='', mode='w'):
"""Create a new file with the given data.
:data: string or iterable of strings.
"""
with open(path, mode) as f:
if data:
if isinstance(data, basestring):
f.write(data)
else: # Assume iterable
f.writelines(data)
def create_unique_file(path):
"""Create a new file. An index is added if the path exists"""
parts = os.path.splitext(path)
idx = 1
while 1:
try:
flags = os.O_CREAT + os.O_WRONLY + os.O_EXCL
if hasattr(os, 'O_BINARY'):
flags += os.O_BINARY
return path, os.fdopen(os.open(path, flags, 0666), 'w')
except OSError as e:
if e.errno != errno.EEXIST:
raise
idx += 1
# A sanity check
if idx > 100:
raise Exception('Failed to create unique name: ' + path)
path = '%s.%d%s' % (parts[0], idx, parts[1])
if os.name == 'nt':
def touch_file(filename):
"""Update modified time of the given file. The file is created if
missing."""
# Use f.truncate() to avoid low resolution of GetSystemTime()
# on Windows
with open(filename, 'ab') as f:
stat = os.fstat(f.fileno())
f.truncate(stat.st_size)
else:
def touch_file(filename):
"""Update modified time of the given file. The file is created if
missing."""
try:
os.utime(filename, None)
except OSError as e:
if e.errno == errno.ENOENT:
with open(filename, 'ab'):
pass
else:
raise
def create_zipinfo(filename, mtime=None, dir=False, executable=False, symlink=False,
comment=None):
"""Create a instance of `ZipInfo`.
:param filename: file name of the entry
:param mtime: modified time of the entry
:param dir: if `True`, the entry is a directory
:param executable: if `True`, the entry is a executable file
:param symlink: if `True`, the entry is a symbolic link
:param comment: comment of the entry
"""
from zipfile import ZipInfo, ZIP_DEFLATED, ZIP_STORED
zipinfo = ZipInfo()
# The general purpose bit flag 11 is used to denote
# UTF-8 encoding for path and comment. Only set it for
# non-ascii files for increased portability.
# See http://www.pkware.com/documents/casestudies/APPNOTE.TXT
if any(ord(c) >= 128 for c in filename):
zipinfo.flag_bits |= 0x0800
zipinfo.filename = filename.encode('utf-8')
if mtime is not None:
mtime = to_datetime(mtime, utc)
zipinfo.date_time = mtime.utctimetuple()[:6]
# The "extended-timestamp" extra field is used for the
# modified time of the entry in unix time. It avoids
# extracting wrong modified time if non-GMT timezone.
# See http://www.opensource.apple.com/source/zip/zip-6/unzip/unzip
# /proginfo/extra.fld
zipinfo.extra += struct.pack(
'<hhBl',
0x5455, # extended-timestamp extra block type
1 + 4, # size of this block
1, # modification time is present
to_timestamp(mtime)) # time of last modification
# external_attr is 4 bytes in size. The high order two
# bytes represent UNIX permission and file type bits,
# while the low order two contain MS-DOS FAT file
# attributes, most notably bit 4 marking directories.
if dir:
if not zipinfo.filename.endswith('/'):
zipinfo.filename += '/'
zipinfo.compress_type = ZIP_STORED
zipinfo.external_attr = 040755 << 16L # permissions drwxr-xr-x
zipinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zipinfo.compress_type = ZIP_DEFLATED
zipinfo.external_attr = 0644 << 16L # permissions -r-wr--r--
if executable:
zipinfo.external_attr |= 0755 << 16L # -rwxr-xr-x
if symlink:
zipinfo.compress_type = ZIP_STORED
zipinfo.external_attr |= 0120000 << 16L # symlink file type
if comment:
zipinfo.comment = comment.encode('utf-8')
return zipinfo
class NaivePopen(object):
"""This is a deadlock-safe version of popen that returns an object with
errorlevel, out (a string) and err (a string).
The optional `input`, which must be a `str` object, is first written
to a temporary file from which the process will read.
(`capturestderr` may not work under Windows 9x.)
Example::
print(Popen3('grep spam','\\n\\nhere spam\\n\\n').out)
"""
def __init__(self, command, input=None, capturestderr=None):
outfile = tempfile.mktemp()
command = '( %s ) > %s' % (command, outfile)
if input is not None:
infile = tempfile.mktemp()
tmp = open(infile, 'w')
tmp.write(input)
tmp.close()
command = command + ' <' + infile
if capturestderr:
errfile = tempfile.mktemp()
command = command + ' 2>' + errfile
try:
self.err = None
self.errorlevel = os.system(command) >> 8
self.out = read_file(outfile)
if capturestderr:
self.err = read_file(errfile)
finally:
if os.path.isfile(outfile):
os.remove(outfile)
if input and os.path.isfile(infile):
os.remove(infile)
if capturestderr and os.path.isfile(errfile):
os.remove(errfile)
def terminate(process):
"""Terminate the process.
If the process has already finished and has not been waited for,
the function does not raise OSError and WindowsError exceptions unlike
a terminate method of `subprocess.Popen`.
:param process: the integer id (`pid`) of the process.
"""
pid = process if isinstance(process, int) else process.pid
def terminate_win():
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE,
False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
def terminate_nix():
import signal
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
# If the process has already finished and has not been
# waited for, killing it raises an ESRCH error on Cygwin
if e.errno != errno.ESRCH:
raise
if sys.platform == 'win32':
return terminate_win()
return terminate_nix()
def makedirs(path, overwrite=False):
"""Create as many directories as necessary to make `path` exist.
If `overwrite` is `True`, don't raise an exception in case `path`
already exists.
"""
if overwrite and os.path.exists(path):
return
os.makedirs(path)
def copytree(src, dst, symlinks=False, skip=[], overwrite=False):
"""Recursively copy a directory tree using copy2() (from shutil.copytree.)
Added a `skip` parameter consisting of absolute paths
which we don't want to copy.
"""
def str_path(path):
if isinstance(path, unicode):
path = path.encode(sys.getfilesystemencoding() or
getpreferredencoding())
return path
def remove_if_overwriting(path):
if overwrite and os.path.exists(path):
os.unlink(path)
skip = [str_path(f) for f in skip]
def copytree_rec(src, dst):
names = os.listdir(src)
makedirs(dst, overwrite=overwrite)
errors = []
for name in names:
srcname = os.path.join(src, name)
if srcname in skip:
continue
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
remove_if_overwriting(dstname)
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree_rec(srcname, dstname)
else:
remove_if_overwriting(dstname)
shutil.copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, OSError) as why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except shutil.Error as err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except WindowsError:
pass # Ignore errors due to limited Windows copystat support
except OSError as why:
errors.append((src, dst, str(why)))
if errors:
raise shutil.Error(errors)
copytree_rec(str_path(src), str_path(dst))
def is_path_below(path, parent):
"""Return True iff `path` is equal to parent or is located below `parent`
at any level.
"""
def normalize(path):
if os.name == 'nt' and not isinstance(path, unicode):
path = path.decode('mbcs')
return os.path.normcase(os.path.abspath(path))
path = normalize(path)
parent = normalize(parent)
return path == parent or path.startswith(parent + os.sep)
class file_or_std(object):
"""Context manager for opening a file or using a standard stream
If `filename` is non-empty, open the file and close it when exiting the
block. Otherwise, use `sys.stdin` if opening for reading, or `sys.stdout`
if opening for writing or appending."""
file = None
def __init__(self, filename, mode='r', bufsize=-1):
self.filename = filename
self.mode = mode
self.bufsize = bufsize
def __enter__(self):
if not self.filename:
return sys.stdin if 'r' in self.mode else sys.stdout
self.file = open(self.filename, self.mode, self.bufsize)
return self.file
def __exit__(self, et, ev, tb):
if self.file is not None:
self.file.close()
# -- sys utils
def fq_class_name(obj):
"""Return the fully qualified class name of given object."""
c = type(obj)
m, n = c.__module__, c.__name__
return n if m == '__builtin__' else '%s.%s' % (m, n)
def arity(f):
"""Return the number of arguments expected by the given function, unbound
or bound method.
"""
return f.func_code.co_argcount - bool(getattr(f, 'im_self', False))
def get_last_traceback():
"""Retrieve the last traceback as an `unicode` string."""
import traceback
tb = StringIO()
traceback.print_exc(file=tb)
return to_unicode(tb.getvalue())
_egg_path_re = re.compile(r'build/bdist\.[^/]+/egg/(.*)')
def get_lines_from_file(filename, lineno, context=0, globals=None):
"""Return `content` number of lines before and after the specified
`lineno` from the (source code) file identified by `filename`.
Returns a `(lines_before, line, lines_after)` tuple.
"""
# The linecache module can load source code from eggs since Python 2.6.
# Prior versions return lines from the wrong file, so we try locating
# the file in eggs manually first.
lines = []
match = _egg_path_re.match(filename)
if match:
import zipfile
for path in sys.path:
try:
zip = zipfile.ZipFile(path, 'r')
try:
lines = zip.read(match.group(1)).splitlines()
break
finally:
zip.close()
except Exception:
pass
if not lines:
import linecache
linecache.checkcache(filename)
lines = linecache.getlines(filename, globals)
if not 0 <= lineno < len(lines):
return (), None, ()
lbound = max(0, lineno - context)
ubound = lineno + 1 + context
charset = None
rep = re.compile('coding[=:]\s*([-\w.]+)')
for linestr in lines[:2]:
match = rep.search(linestr)
if match:
charset = match.group(1)
break
before = [to_unicode(l.rstrip('\n'), charset)
for l in lines[lbound:lineno]]
line = to_unicode(lines[lineno].rstrip('\n'), charset)
after = [to_unicode(l.rstrip('\n'), charset)
for l in lines[lineno + 1:ubound]]
return before, line, after
def get_frame_info(tb):
"""Return frame information for a traceback."""
frames = []
while tb:
tb_hide = tb.tb_frame.f_locals.get('__traceback_hide__')
if tb_hide in ('before', 'before_and_this'):
del frames[:]
tb_hide = tb_hide[6:]
if not tb_hide:
filename = tb.tb_frame.f_code.co_filename
filename = filename.replace('\\', '/')
lineno = tb.tb_lineno - 1
before, line, after = get_lines_from_file(filename, lineno, 5,
tb.tb_frame.f_globals)
frames.append({'traceback': tb, 'filename': filename,
'lineno': lineno, 'line': line,
'lines_before': before, 'lines_after': after,
'function': tb.tb_frame.f_code.co_name,
'vars': tb.tb_frame.f_locals})
tb = tb.tb_next
return frames
def safe__import__(module_name):
"""
Safe imports: rollback after a failed import.
Initially inspired from the RollbackImporter in PyUnit,
but it's now much simpler and works better for our needs.
See http://pyunit.sourceforge.net/notes/reloading.html
"""
already_imported = sys.modules.copy()
try:
return __import__(module_name, globals(), locals(), [])
except Exception as e:
for modname in sys.modules.copy():
if modname not in already_imported:
del(sys.modules[modname])
raise e
def safe_repr(x):
"""`repr` replacement which "never" breaks.
Make sure we always get a representation of the input `x`
without risking to trigger an exception (e.g. from a buggy
`x.__repr__`).
.. versionadded :: 1.0
"""
try:
return to_unicode(repr(x))
except Exception as e:
return "<%s object at 0x%X (repr() error: %s)>" % (
fq_class_name(x), id(x), exception_to_unicode(e))
def get_doc(obj):
"""Return the docstring of an object as a tuple `(summary, description)`,
where `summary` is the first paragraph and `description` is the remaining
text.
"""
doc = inspect.getdoc(obj)
if not doc:
return None, None
doc = to_unicode(doc).split('\n\n', 1)
summary = doc[0].replace('\n', ' ')
description = doc[1] if len(doc) > 1 else None
return summary, description
_dont_import = frozenset(['__file__', '__name__', '__package__'])
def import_namespace(globals_dict, module_name):
"""Import the namespace of a module into a globals dict.
This function is used in stub modules to import all symbols defined in
another module into the global namespace of the stub, usually for
backward compatibility.
"""
__import__(module_name)
module = sys.modules[module_name]
globals_dict.update(item for item in module.__dict__.iteritems()
if item[0] not in _dont_import)
globals_dict.pop('import_namespace', None)
# -- setuptools utils
def get_module_path(module):
"""Return the base path the given module is imported from"""
path = module.__file__
module_name = module.__name__
if path.endswith(('.pyc', '.pyo')):
path = path[:-1]
if os.path.basename(path) == '__init__.py':
path = os.path.dirname(path)
base_path = os.path.splitext(path)[0]
while base_path.replace(os.sep, '.').endswith(module_name):
base_path = os.path.dirname(base_path)
module_name = '.'.join(module_name.split('.')[:-1])
if not module_name:
break
return base_path
def get_sources(path):
"""Return a dictionary mapping Python module source paths to the
distributions that contain them.
"""
sources = {}
for dist in find_distributions(path, only=True):
if not dist.has_metadata('top_level.txt'):
continue
toplevels = dist.get_metadata_lines('top_level.txt')
toplevels = [top + '/' for top in toplevels]
if dist.has_metadata('SOURCES.txt'): # *.egg-info/SOURCES.txt
sources.update((src, dist)
for src in dist.get_metadata_lines('SOURCES.txt')
if any(src.startswith(top) for top in toplevels))
continue
if dist.has_metadata('RECORD'): # *.dist-info/RECORD
reader = csv.reader(StringIO(dist.get_metadata('RECORD')))
sources.update((row[0], dist)
for row in reader if any(row[0].startswith(top)
for top in toplevels))
continue
return sources
def get_pkginfo(dist):
"""Get a dictionary containing package information for a package
`dist` can be either a Distribution instance or, as a shortcut,
directly the module instance, if one can safely infer a Distribution
instance from it.
Always returns a dictionary but it will be empty if no Distribution
instance can be created for the given module.
"""
import email
import email.errors
import types
from trac.util.translation import _
def parse_pkginfo(dist, name):
return email.message_from_string(to_utf8(dist.get_metadata(name)))
if isinstance(dist, types.ModuleType):
def has_resource(dist, module, resource_name):
if dist.location.endswith('.egg'): # installed by easy_install
return dist.has_resource(resource_name)
if dist.has_metadata('installed-files.txt'): # installed by pip
resource_name = os.path.normpath('../' + resource_name)
return any(resource_name == os.path.normpath(name)
for name
in dist.get_metadata_lines('installed-files.txt'))
if dist.has_metadata('SOURCES.txt'):
resource_name = os.path.normpath(resource_name)
return any(resource_name == os.path.normpath(name)
for name in dist.get_metadata_lines('SOURCES.txt'))
if dist.has_metadata('RECORD'): # *.dist-info/RECORD
reader = csv.reader(StringIO(dist.get_metadata('RECORD')))
return any(resource_name == row[0] for row in reader)
if dist.has_metadata('PKG-INFO'):
try:
pkginfo = parse_pkginfo(dist, 'PKG-INFO')
provides = pkginfo.get_all('Provides', ())
names = module.__name__.split('.')
if any('.'.join(names[:n + 1]) in provides
for n in xrange(len(names))):
return True
except (IOError, email.Errors.MessageError):
pass
toplevel = resource_name.split('/')[0]
if dist.has_metadata('top_level.txt'):
return toplevel in dist.get_metadata_lines('top_level.txt')
return dist.key == toplevel.lower()
module = dist
module_path = get_module_path(module)
resource_name = module.__name__.replace('.', '/')
if os.path.basename(module.__file__) in ('__init__.py', '__init__.pyc',
'__init__.pyo'):
resource_name += '/__init__.py'
else:
resource_name += '.py'
for dist in find_distributions(module_path, only=True):
if os.path.isfile(module_path) or \
has_resource(dist, module, resource_name):
break
else:
return {}
attrs = ('author', 'author-email', 'license', 'home-page', 'summary',
'name', 'description', 'version')
info = {}
def normalize(attr):
return attr.lower().replace('-', '_')
metadata = 'METADATA' if dist.has_metadata('METADATA') else 'PKG-INFO'
try:
pkginfo = parse_pkginfo(dist, metadata)
for attr in [key for key in attrs if key in pkginfo]:
info[normalize(attr)] = pkginfo[attr]
except IOError as e:
err = _("Failed to read %(metadata)s file for %(dist)s: %(err)s",
metadata=metadata, dist=dist, err=to_unicode(e))
for attr in attrs:
info[normalize(attr)] = err
except email.errors.MessageError as e:
err = _("Failed to parse %(metadata)s file for %(dist)s: %(err)s",
metadata=metadata, dist=dist, err=to_unicode(e))
for attr in attrs:
info[normalize(attr)] = err
return info
def warn_setuptools_issue(out=None):
if not out:
out = sys.stderr
import setuptools
from pkg_resources import parse_version as parse
if parse('5.4') <= parse(setuptools.__version__) < parse('5.7') and \
not os.environ.get('PKG_RESOURCES_CACHE_ZIP_MANIFESTS'):
out.write("Warning: Detected setuptools version %s. The environment "
"variable 'PKG_RESOURCES_CACHE_ZIP_MANIFESTS' must be set "
"to avoid significant performance degradation.\n"
% setuptools.__version__)
# -- crypto utils
try:
os.urandom(16)
urandom = os.urandom
except NotImplementedError:
_entropy = random.Random()
def urandom(n):
result = []
hasher = hashlib.sha1(str(os.getpid()) + str(time_now()))
while len(result) * hasher.digest_size < n:
hasher.update(str(_entropy.random()))
result.append(hasher.digest())
result = ''.join(result)
return result[:n] if len(result) > n else result
def hex_entropy(digits=32):
"""Generate `digits` number of hex digits of entropy."""
result = ''.join('%.2x' % ord(v) for v in urandom((digits + 1) // 2))
return result[:digits] if len(result) > digits else result
def salt(length=2):
"""Returns a string of `length` random letters and numbers."""
return ''.join(random.choice(string.ascii_letters + string.digits + '/.')
for x in range(length))
# Original license for md5crypt:
# Based on FreeBSD src/lib/libcrypt/crypt.c 1.2
#
# "THE BEER-WARE LICENSE" (Revision 42):
# <[email protected]> wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
def md5crypt(password, salt, magic='$1$'):
"""Based on FreeBSD src/lib/libcrypt/crypt.c 1.2
:param password: the plain text password to crypt
:param salt: the raw salt
:param magic: our magic string
"""
# /* The password first, since that is what is most unknown */
# /* Then our magic string */
# /* Then the raw salt */
m = hashlib.md5(password + magic + salt)
# /* Then just as many characters of the MD5(pw,salt,pw) */
mixin = hashlib.md5(password + salt + password).digest()
for i in range(0, len(password)):
m.update(mixin[i % 16])
# /* Then something really weird... */
# Also really broken, as far as I can tell. -m
i = len(password)
while i:
if i & 1:
m.update('\x00')
else:
m.update(password[0])
i >>= 1
final = m.digest()
# /* and now, just to make sure things don't run too fast */
for i in range(1000):
m2 = hashlib.md5()
if i & 1:
m2.update(password)
else:
m2.update(final)
if i % 3:
m2.update(salt)
if i % 7:
m2.update(password)
if i & 1:
m2.update(final)
else:
m2.update(password)
final = m2.digest()
# This is the bit that uses to64() in the original code.
itoa64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
rearranged = ''
for a, b, c in ((0, 6, 12), (1, 7, 13), (2, 8, 14), (3, 9, 15), (4, 10, 5)):
v = ord(final[a]) << 16 | ord(final[b]) << 8 | ord(final[c])
for i in range(4):
rearranged += itoa64[v & 0x3f]
v >>= 6
v = ord(final[11])
for i in range(2):
rearranged += itoa64[v & 0x3f]
v >>= 6
return magic + salt + '$' + rearranged
# -- data structures
class Ranges(object):
"""Holds information about ranges parsed from a string
:author: Tim Hatch
>>> x = Ranges("1,2,9-15")
>>> 1 in x
True
>>> 5 in x
False
>>> 10 in x
True
>>> 16 in x
False
>>> [i for i in range(20) if i in x]
[1, 2, 9, 10, 11, 12, 13, 14, 15]
Also supports iteration, which makes that last example a bit simpler:
>>> list(x)
[1, 2, 9, 10, 11, 12, 13, 14, 15]
Note that it automatically reduces the list and short-circuits when the
desired ranges are a relatively small portion of the entire set:
>>> x = Ranges("99")
>>> 1 in x # really fast
False
>>> x = Ranges("1, 2, 1-2, 2") # reduces this to 1-2
>>> x.pairs
[(1, 2)]
>>> x = Ranges("1-9,2-4") # handle ranges that completely overlap
>>> list(x)
[1, 2, 3, 4, 5, 6, 7, 8, 9]
The members 'a' and 'b' refer to the min and max value of the range, and
are None if the range is empty:
>>> x.a
1
>>> x.b
9
>>> e = Ranges()
>>> e.a, e.b
(None, None)
Empty ranges are ok, and ranges can be constructed in pieces, if you
so choose:
>>> x = Ranges()
>>> x.appendrange("1, 2, 3")
>>> x.appendrange("5-9")
>>> x.appendrange("2-3") # reduce'd away
>>> list(x)
[1, 2, 3, 5, 6, 7, 8, 9]
Reversed ranges are ignored, unless the Ranges has the `reorder` property
set.
>>> str(Ranges("20-10"))
''
>>> str(Ranges("20-10", reorder=True))
'10-20'
As rendered ranges are often using u',\u200b' (comma + Zero-width
space) to enable wrapping, we also support reading such ranges, as
they can be copy/pasted back.
>>> str(Ranges(u'1,\u200b3,\u200b5,\u200b6,\u200b7,\u200b9'))
'1,3,5-7,9'
"""
RE_STR = ur'[0-9]+(?:[-:][0-9]+)?(?:,\u200b?[0-9]+(?:[-:][0-9]+)?)*'
def __init__(self, r=None, reorder=False):
self.pairs = []
self.a = self.b = None
self.reorder = reorder
self.appendrange(r)
def appendrange(self, r):
"""Add ranges to the current one.
A range is specified as a string of the form "low-high", and
`r` can be a list of such strings, a string containing comma-separated
ranges, or `None`.
"""
if not r:
return
p = self.pairs
if isinstance(r, basestring):
r = re.split(u',\u200b?', r)
for x in r:
try:
a, b = map(int, x.split('-', 1))
except ValueError:
a, b = int(x), int(x)
if b >= a:
p.append((a, b))
elif self.reorder:
p.append((b, a))
self._reduce()
def _reduce(self):
"""Come up with the minimal representation of the ranges"""
p = self.pairs
p.sort()
i = 0
while i + 1 < len(p):
if p[i+1][0]-1 <= p[i][1]: # this item overlaps with the next
# make the first include the second
p[i] = (p[i][0], max(p[i][1], p[i+1][1]))
del p[i+1] # delete the second, after adjusting my endpoint
else:
i += 1
if p:
self.a = p[0][0] # min value
self.b = p[-1][1] # max value
else:
self.a = self.b = None
def __iter__(self):
"""
This is another way I came up with to do it. Is it faster?
from itertools import chain
return chain(*[xrange(a, b+1) for a, b in self.pairs])
"""
for a, b in self.pairs:
for i in range(a, b+1):
yield i
def __contains__(self, x):
"""
>>> 55 in Ranges()
False
"""
# short-circuit if outside the possible range
if self.a is not None and self.a <= x <= self.b:
for a, b in self.pairs:
if a <= x <= b:
return True
if b > x: # short-circuit if we've gone too far
break
return False
def __str__(self):
"""Provide a compact string representation of the range.
>>> (str(Ranges("1,2,3,5")), str(Ranges()), str(Ranges('2')))
('1-3,5', '', '2')
>>> str(Ranges('99-1')) # only nondecreasing ranges allowed
''
"""
r = []
for a, b in self.pairs:
if a == b:
r.append(str(a))
else:
r.append("%d-%d" % (a, b))
return ",".join(r)
def __len__(self):
"""The length of the entire span, ignoring holes.
>>> (len(Ranges('99')), len(Ranges('1-2')), len(Ranges('')))
(1, 2, 0)
"""
if self.a is None or self.b is None:
return 0
# Result must fit an int
return min(self.b - self.a + 1, sys.maxint)
def __nonzero__(self):
"""Return True iff the range is not empty.
>>> (bool(Ranges()), bool(Ranges('1-2')))
(False, True)
"""
return self.a is not None and self.b is not None
def truncate(self, max):
"""Truncate the Ranges by setting a maximal allowed value.
Note that this `max` can be a value in a gap, so the only guarantee
is that `self.b` will be lesser than or equal to `max`.
>>> r = Ranges("10-20,25-45")
>>> str(r.truncate(30))
'10-20,25-30'
>>> str(r.truncate(22))
'10-20'
>>> str(r.truncate(10))
'10'
"""
r = Ranges()
r.a, r.b, r.reorder = self.a, self.b, self.reorder
r.pairs = []
for a, b in self.pairs:
if a <= max:
if b > max:
r.pairs.append((a, max))
r.b = max
break
r.pairs.append((a, b))
else:
break
return r
def to_ranges(revs):
"""Converts a list of revisions to a minimal set of ranges.
>>> to_ranges([2, 12, 3, 6, 9, 1, 5, 11])
'1-3,5-6,9,11-12'
>>> to_ranges([])
''
"""
ranges = []
begin = end = None
def store():
if end == begin:
ranges.append(str(begin))
else:
ranges.append('%d-%d' % (begin, end))
for rev in sorted(revs):
if begin is None:
begin = end = rev
elif rev == end + 1:
end = rev
else:
store()
begin = end = rev
if begin is not None:
store()
return ','.join(ranges)
class lazy(object):
"""A lazily-evaluated attribute.
:since: 1.0
"""
def __init__(self, fn):
self.fn = fn
functools.update_wrapper(self, fn)
def __get__(self, instance, owner):
if instance is None:
return self
if self.fn.__name__ in instance.__dict__:
return instance.__dict__[self.fn.__name__]
result = self.fn(instance)
instance.__dict__[self.fn.__name__] = result
return result
def __set__(self, instance, value):
instance.__dict__[self.fn.__name__] = value
def __delete__(self, instance):
if self.fn.__name__ in instance.__dict__:
del instance.__dict__[self.fn.__name__]
# -- algorithmic utilities
DIGITS = re.compile(r'(\d+)')
def embedded_numbers(s):
"""Comparison function for natural order sorting based on
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/214202."""
pieces = DIGITS.split(s)
pieces[1::2] = map(int, pieces[1::2])
return pieces
def partition(iterable, order=None):
"""
>>> partition([(1, "a"), (2, "b"), (3, "a")])
{'a': [1, 3], 'b': [2]}
>>> partition([(1, "a"), (2, "b"), (3, "a")], "ab")
[[1, 3], [2]]
"""
result = {}
if order is not None:
for key in order:
result[key] = []
for item, category in iterable:
result.setdefault(category, []).append(item)
if order is None:
return result
return [result[key] for key in order]
def as_int(s, default, min=None, max=None):
"""Convert s to an int and limit it to the given range, or return default
if unsuccessful."""
try:
value = int(s)
except (TypeError, ValueError):
return default
if min is not None and value < min:
value = min
if max is not None and value > max:
value = max
return value
def as_bool(value, default=False):
"""Convert the given value to a `bool`.
If `value` is a string, return `True` for any of "yes", "true",
"enabled", "on" or non-zero numbers, ignoring case. For non-string
arguments, return the argument converted to a `bool`, or `default`
if the conversion fails.
:since 1.2: the `default` argument can be specified.
"""
if isinstance(value, basestring):
try:
return bool(float(value))
except ValueError:
value = value.strip().lower()
if value in ('yes', 'true', 'enabled', 'on'):
return True
elif value in ('no', 'false', 'disabled', 'off'):
return False
else:
return default
try:
return bool(value)
except (TypeError, ValueError):
return default
def pathjoin(*args):
"""Strip `/` from the arguments and join them with a single `/`."""
return '/'.join(filter(None, (each.strip('/') for each in args if each)))
def to_list(splittable, sep=','):
"""Split a string at `sep` and return a list without any empty items.
>>> to_list('1,2, 3,4 ')
['1', '2', '3', '4']
>>> to_list('1;2; 3;4 ', sep=';')
['1', '2', '3', '4']
>>> to_list('')
[]
>>> to_list(None)
[]
>>> to_list([])
[]
"""
if not splittable:
return []
split = [x.strip() for x in splittable.split(sep)]
return [item for item in split if item]
def sub_val(the_list, item_to_remove, item_to_add):
"""Substitute an item if the item is found in a list, otherwise leave
the list unmodified.
"""
try:
index = the_list.index(item_to_remove)
except ValueError:
pass
else:
the_list[index] = item_to_add
# Imports for backward compatibility (at bottom to avoid circular dependencies)
from trac.core import TracError
from trac.util.compat import reversed
from trac.util.html import escape, unescape, Markup, Deuglifier
from trac.util.text import CRLF, to_utf8, shorten_line, wrap, pretty_size
from trac.util.datefmt import pretty_timedelta, format_datetime, \
format_date, format_time, \
get_date_format_hint, \
get_datetime_format_hint, http_date, \
parse_date
__no_apidoc__ = 'compat presentation translation'
| []
| []
| [
"PKG_RESOURCES_CACHE_ZIP_MANIFESTS"
]
| [] | ["PKG_RESOURCES_CACHE_ZIP_MANIFESTS"] | python | 1 | 0 | |
pyglet/gl/gl_compat.py | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2020 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Wrapper for https://raw.githubusercontent.com/KhronosGroup/OpenGL-Registry/master/xml/gl.xml
Generated by tools/gengl.py.
Do not modify this file.
"""
from ctypes import *
from pyglet.gl.lib import link_GL as _link_function
from pyglet.gl.lib import c_ptrdiff_t
# END OF gl.template
# GL type definitions
GLenum = c_uint
GLboolean = c_ubyte
GLbitfield = c_uint
GLvoid = None
GLbyte = c_char
GLubyte = c_ubyte
GLshort = c_short
GLushort = c_ushort
GLint = c_int
GLuint = c_uint
GLclampx = c_uint
GLsizei = c_int
GLfloat = c_float
GLclampf = c_float
GLdouble = c_double
GLclampd = c_double
GLchar = c_char
GLintptr = c_ptrdiff_t
GLsizeiptr = c_ptrdiff_t
GLint64 = c_int64
GLuint64 = c_uint64
# GL enumerant (token) definitions
GL_FALSE = 0
GL_POINTS = 0
GL_ZERO = 0
GL_NONE = 0
GL_NO_ERROR = 0
GL_TRUE = 1
GL_LINES = 1
GL_ONE = 1
GL_CURRENT_BIT = 1
GL_CLIENT_PIXEL_STORE_BIT = 1
GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT = 1
GL_MAP_READ_BIT = 1
GL_CONTEXT_CORE_PROFILE_BIT = 1
GL_SYNC_FLUSH_COMMANDS_BIT = 1
GL_VERTEX_SHADER_BIT = 1
GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT = 1
GL_LINE_LOOP = 2
GL_POINT_BIT = 2
GL_CLIENT_VERTEX_ARRAY_BIT = 2
GL_MAP_WRITE_BIT = 2
GL_CONTEXT_COMPATIBILITY_PROFILE_BIT = 2
GL_FRAGMENT_SHADER_BIT = 2
GL_ELEMENT_ARRAY_BARRIER_BIT = 2
GL_CONTEXT_FLAG_DEBUG_BIT = 2
GL_LINE_STRIP = 3
GL_TRIANGLES = 4
GL_LINE_BIT = 4
GL_MAP_INVALIDATE_RANGE_BIT = 4
GL_GEOMETRY_SHADER_BIT = 4
GL_UNIFORM_BARRIER_BIT = 4
GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT = 4
GL_TRIANGLE_STRIP = 5
GL_TRIANGLE_FAN = 6
GL_QUADS = 7
GL_POLYGON_BIT = 8
GL_QUAD_STRIP = 8
GL_MAP_INVALIDATE_BUFFER_BIT = 8
GL_TESS_CONTROL_SHADER_BIT = 8
GL_TEXTURE_FETCH_BARRIER_BIT = 8
GL_CONTEXT_FLAG_NO_ERROR_BIT = 8
GL_POLYGON = 9
GL_LINES_ADJACENCY = 10
GL_LINE_STRIP_ADJACENCY = 11
GL_TRIANGLES_ADJACENCY = 12
GL_TRIANGLE_STRIP_ADJACENCY = 13
GL_PATCHES = 14
GL_POLYGON_STIPPLE_BIT = 16
GL_MAP_FLUSH_EXPLICIT_BIT = 16
GL_TESS_EVALUATION_SHADER_BIT = 16
GL_PIXEL_MODE_BIT = 32
GL_MAP_UNSYNCHRONIZED_BIT = 32
GL_SHADER_IMAGE_ACCESS_BARRIER_BIT = 32
GL_COMPUTE_SHADER_BIT = 32
GL_LIGHTING_BIT = 64
GL_COMMAND_BARRIER_BIT = 64
GL_MAP_PERSISTENT_BIT = 64
GL_FOG_BIT = 128
GL_PIXEL_BUFFER_BARRIER_BIT = 128
GL_MAP_COHERENT_BIT = 128
GL_DEPTH_BUFFER_BIT = 256
GL_ACCUM = 256
GL_TEXTURE_UPDATE_BARRIER_BIT = 256
GL_DYNAMIC_STORAGE_BIT = 256
GL_LOAD = 257
GL_RETURN = 258
GL_MULT = 259
GL_ADD = 260
GL_NEVER = 512
GL_ACCUM_BUFFER_BIT = 512
GL_BUFFER_UPDATE_BARRIER_BIT = 512
GL_CLIENT_STORAGE_BIT = 512
GL_LESS = 513
GL_EQUAL = 514
GL_LEQUAL = 515
GL_GREATER = 516
GL_NOTEQUAL = 517
GL_GEQUAL = 518
GL_ALWAYS = 519
GL_SRC_COLOR = 768
GL_ONE_MINUS_SRC_COLOR = 769
GL_SRC_ALPHA = 770
GL_ONE_MINUS_SRC_ALPHA = 771
GL_DST_ALPHA = 772
GL_ONE_MINUS_DST_ALPHA = 773
GL_DST_COLOR = 774
GL_ONE_MINUS_DST_COLOR = 775
GL_SRC_ALPHA_SATURATE = 776
GL_STENCIL_BUFFER_BIT = 1024
GL_FRONT_LEFT = 1024
GL_FRAMEBUFFER_BARRIER_BIT = 1024
GL_FRONT_RIGHT = 1025
GL_BACK_LEFT = 1026
GL_BACK_RIGHT = 1027
GL_FRONT = 1028
GL_BACK = 1029
GL_LEFT = 1030
GL_RIGHT = 1031
GL_FRONT_AND_BACK = 1032
GL_AUX0 = 1033
GL_AUX1 = 1034
GL_AUX2 = 1035
GL_AUX3 = 1036
GL_INVALID_ENUM = 1280
GL_INVALID_VALUE = 1281
GL_INVALID_OPERATION = 1282
GL_STACK_OVERFLOW = 1283
GL_STACK_UNDERFLOW = 1284
GL_OUT_OF_MEMORY = 1285
GL_INVALID_FRAMEBUFFER_OPERATION = 1286
GL_INVALID_FRAMEBUFFER_OPERATION_EXT = 1286
GL_CONTEXT_LOST = 1287
GL_2D = 1536
GL_3D = 1537
GL_3D_COLOR = 1538
GL_3D_COLOR_TEXTURE = 1539
GL_4D_COLOR_TEXTURE = 1540
GL_PASS_THROUGH_TOKEN = 1792
GL_POINT_TOKEN = 1793
GL_LINE_TOKEN = 1794
GL_POLYGON_TOKEN = 1795
GL_BITMAP_TOKEN = 1796
GL_DRAW_PIXEL_TOKEN = 1797
GL_COPY_PIXEL_TOKEN = 1798
GL_LINE_RESET_TOKEN = 1799
GL_VIEWPORT_BIT = 2048
GL_EXP = 2048
GL_TRANSFORM_FEEDBACK_BARRIER_BIT = 2048
GL_EXP2 = 2049
GL_CW = 2304
GL_CCW = 2305
GL_COEFF = 2560
GL_ORDER = 2561
GL_DOMAIN = 2562
GL_CURRENT_COLOR = 2816
GL_CURRENT_INDEX = 2817
GL_CURRENT_NORMAL = 2818
GL_CURRENT_TEXTURE_COORDS = 2819
GL_CURRENT_RASTER_COLOR = 2820
GL_CURRENT_RASTER_INDEX = 2821
GL_CURRENT_RASTER_TEXTURE_COORDS = 2822
GL_CURRENT_RASTER_POSITION = 2823
GL_CURRENT_RASTER_POSITION_VALID = 2824
GL_CURRENT_RASTER_DISTANCE = 2825
GL_POINT_SMOOTH = 2832
GL_POINT_SIZE = 2833
GL_POINT_SIZE_RANGE = 2834
GL_SMOOTH_POINT_SIZE_RANGE = 2834
GL_POINT_SIZE_GRANULARITY = 2835
GL_SMOOTH_POINT_SIZE_GRANULARITY = 2835
GL_LINE_SMOOTH = 2848
GL_LINE_WIDTH = 2849
GL_LINE_WIDTH_RANGE = 2850
GL_SMOOTH_LINE_WIDTH_RANGE = 2850
GL_LINE_WIDTH_GRANULARITY = 2851
GL_SMOOTH_LINE_WIDTH_GRANULARITY = 2851
GL_LINE_STIPPLE = 2852
GL_LINE_STIPPLE_PATTERN = 2853
GL_LINE_STIPPLE_REPEAT = 2854
GL_LIST_MODE = 2864
GL_MAX_LIST_NESTING = 2865
GL_LIST_BASE = 2866
GL_LIST_INDEX = 2867
GL_POLYGON_MODE = 2880
GL_POLYGON_SMOOTH = 2881
GL_POLYGON_STIPPLE = 2882
GL_EDGE_FLAG = 2883
GL_CULL_FACE = 2884
GL_CULL_FACE_MODE = 2885
GL_FRONT_FACE = 2886
GL_LIGHTING = 2896
GL_LIGHT_MODEL_LOCAL_VIEWER = 2897
GL_LIGHT_MODEL_TWO_SIDE = 2898
GL_LIGHT_MODEL_AMBIENT = 2899
GL_SHADE_MODEL = 2900
GL_COLOR_MATERIAL_FACE = 2901
GL_COLOR_MATERIAL_PARAMETER = 2902
GL_COLOR_MATERIAL = 2903
GL_FOG = 2912
GL_FOG_INDEX = 2913
GL_FOG_DENSITY = 2914
GL_FOG_START = 2915
GL_FOG_END = 2916
GL_FOG_MODE = 2917
GL_FOG_COLOR = 2918
GL_DEPTH_RANGE = 2928
GL_DEPTH_TEST = 2929
GL_DEPTH_WRITEMASK = 2930
GL_DEPTH_CLEAR_VALUE = 2931
GL_DEPTH_FUNC = 2932
GL_ACCUM_CLEAR_VALUE = 2944
GL_STENCIL_TEST = 2960
GL_STENCIL_CLEAR_VALUE = 2961
GL_STENCIL_FUNC = 2962
GL_STENCIL_VALUE_MASK = 2963
GL_STENCIL_FAIL = 2964
GL_STENCIL_PASS_DEPTH_FAIL = 2965
GL_STENCIL_PASS_DEPTH_PASS = 2966
GL_STENCIL_REF = 2967
GL_STENCIL_WRITEMASK = 2968
GL_MATRIX_MODE = 2976
GL_NORMALIZE = 2977
GL_VIEWPORT = 2978
GL_MODELVIEW_STACK_DEPTH = 2979
GL_PROJECTION_STACK_DEPTH = 2980
GL_TEXTURE_STACK_DEPTH = 2981
GL_MODELVIEW_MATRIX = 2982
GL_PROJECTION_MATRIX = 2983
GL_TEXTURE_MATRIX = 2984
GL_ATTRIB_STACK_DEPTH = 2992
GL_CLIENT_ATTRIB_STACK_DEPTH = 2993
GL_ALPHA_TEST = 3008
GL_ALPHA_TEST_FUNC = 3009
GL_ALPHA_TEST_REF = 3010
GL_DITHER = 3024
GL_BLEND_DST = 3040
GL_BLEND_SRC = 3041
GL_BLEND = 3042
GL_LOGIC_OP_MODE = 3056
GL_LOGIC_OP = 3057
GL_INDEX_LOGIC_OP = 3057
GL_COLOR_LOGIC_OP = 3058
GL_AUX_BUFFERS = 3072
GL_DRAW_BUFFER = 3073
GL_READ_BUFFER = 3074
GL_SCISSOR_BOX = 3088
GL_SCISSOR_TEST = 3089
GL_INDEX_CLEAR_VALUE = 3104
GL_INDEX_WRITEMASK = 3105
GL_COLOR_CLEAR_VALUE = 3106
GL_COLOR_WRITEMASK = 3107
GL_INDEX_MODE = 3120
GL_RGBA_MODE = 3121
GL_DOUBLEBUFFER = 3122
GL_STEREO = 3123
GL_RENDER_MODE = 3136
GL_PERSPECTIVE_CORRECTION_HINT = 3152
GL_POINT_SMOOTH_HINT = 3153
GL_LINE_SMOOTH_HINT = 3154
GL_POLYGON_SMOOTH_HINT = 3155
GL_FOG_HINT = 3156
GL_TEXTURE_GEN_S = 3168
GL_TEXTURE_GEN_T = 3169
GL_TEXTURE_GEN_R = 3170
GL_TEXTURE_GEN_Q = 3171
GL_PIXEL_MAP_I_TO_I = 3184
GL_PIXEL_MAP_S_TO_S = 3185
GL_PIXEL_MAP_I_TO_R = 3186
GL_PIXEL_MAP_I_TO_G = 3187
GL_PIXEL_MAP_I_TO_B = 3188
GL_PIXEL_MAP_I_TO_A = 3189
GL_PIXEL_MAP_R_TO_R = 3190
GL_PIXEL_MAP_G_TO_G = 3191
GL_PIXEL_MAP_B_TO_B = 3192
GL_PIXEL_MAP_A_TO_A = 3193
GL_PIXEL_MAP_I_TO_I_SIZE = 3248
GL_PIXEL_MAP_S_TO_S_SIZE = 3249
GL_PIXEL_MAP_I_TO_R_SIZE = 3250
GL_PIXEL_MAP_I_TO_G_SIZE = 3251
GL_PIXEL_MAP_I_TO_B_SIZE = 3252
GL_PIXEL_MAP_I_TO_A_SIZE = 3253
GL_PIXEL_MAP_R_TO_R_SIZE = 3254
GL_PIXEL_MAP_G_TO_G_SIZE = 3255
GL_PIXEL_MAP_B_TO_B_SIZE = 3256
GL_PIXEL_MAP_A_TO_A_SIZE = 3257
GL_UNPACK_SWAP_BYTES = 3312
GL_UNPACK_LSB_FIRST = 3313
GL_UNPACK_ROW_LENGTH = 3314
GL_UNPACK_SKIP_ROWS = 3315
GL_UNPACK_SKIP_PIXELS = 3316
GL_UNPACK_ALIGNMENT = 3317
GL_PACK_SWAP_BYTES = 3328
GL_PACK_LSB_FIRST = 3329
GL_PACK_ROW_LENGTH = 3330
GL_PACK_SKIP_ROWS = 3331
GL_PACK_SKIP_PIXELS = 3332
GL_PACK_ALIGNMENT = 3333
GL_MAP_COLOR = 3344
GL_MAP_STENCIL = 3345
GL_INDEX_SHIFT = 3346
GL_INDEX_OFFSET = 3347
GL_RED_SCALE = 3348
GL_RED_BIAS = 3349
GL_ZOOM_X = 3350
GL_ZOOM_Y = 3351
GL_GREEN_SCALE = 3352
GL_GREEN_BIAS = 3353
GL_BLUE_SCALE = 3354
GL_BLUE_BIAS = 3355
GL_ALPHA_SCALE = 3356
GL_ALPHA_BIAS = 3357
GL_DEPTH_SCALE = 3358
GL_DEPTH_BIAS = 3359
GL_MAX_EVAL_ORDER = 3376
GL_MAX_LIGHTS = 3377
GL_MAX_CLIP_PLANES = 3378
GL_MAX_CLIP_DISTANCES = 3378
GL_MAX_TEXTURE_SIZE = 3379
GL_MAX_PIXEL_MAP_TABLE = 3380
GL_MAX_ATTRIB_STACK_DEPTH = 3381
GL_MAX_MODELVIEW_STACK_DEPTH = 3382
GL_MAX_NAME_STACK_DEPTH = 3383
GL_MAX_PROJECTION_STACK_DEPTH = 3384
GL_MAX_TEXTURE_STACK_DEPTH = 3385
GL_MAX_VIEWPORT_DIMS = 3386
GL_MAX_CLIENT_ATTRIB_STACK_DEPTH = 3387
GL_SUBPIXEL_BITS = 3408
GL_INDEX_BITS = 3409
GL_RED_BITS = 3410
GL_GREEN_BITS = 3411
GL_BLUE_BITS = 3412
GL_ALPHA_BITS = 3413
GL_DEPTH_BITS = 3414
GL_STENCIL_BITS = 3415
GL_ACCUM_RED_BITS = 3416
GL_ACCUM_GREEN_BITS = 3417
GL_ACCUM_BLUE_BITS = 3418
GL_ACCUM_ALPHA_BITS = 3419
GL_NAME_STACK_DEPTH = 3440
GL_AUTO_NORMAL = 3456
GL_MAP1_COLOR_4 = 3472
GL_MAP1_INDEX = 3473
GL_MAP1_NORMAL = 3474
GL_MAP1_TEXTURE_COORD_1 = 3475
GL_MAP1_TEXTURE_COORD_2 = 3476
GL_MAP1_TEXTURE_COORD_3 = 3477
GL_MAP1_TEXTURE_COORD_4 = 3478
GL_MAP1_VERTEX_3 = 3479
GL_MAP1_VERTEX_4 = 3480
GL_MAP2_COLOR_4 = 3504
GL_MAP2_INDEX = 3505
GL_MAP2_NORMAL = 3506
GL_MAP2_TEXTURE_COORD_1 = 3507
GL_MAP2_TEXTURE_COORD_2 = 3508
GL_MAP2_TEXTURE_COORD_3 = 3509
GL_MAP2_TEXTURE_COORD_4 = 3510
GL_MAP2_VERTEX_3 = 3511
GL_MAP2_VERTEX_4 = 3512
GL_MAP1_GRID_DOMAIN = 3536
GL_MAP1_GRID_SEGMENTS = 3537
GL_MAP2_GRID_DOMAIN = 3538
GL_MAP2_GRID_SEGMENTS = 3539
GL_TEXTURE_1D = 3552
GL_TEXTURE_2D = 3553
GL_FEEDBACK_BUFFER_POINTER = 3568
GL_FEEDBACK_BUFFER_SIZE = 3569
GL_FEEDBACK_BUFFER_TYPE = 3570
GL_SELECTION_BUFFER_POINTER = 3571
GL_SELECTION_BUFFER_SIZE = 3572
GL_TEXTURE_WIDTH = 4096
GL_TRANSFORM_BIT = 4096
GL_ATOMIC_COUNTER_BARRIER_BIT = 4096
GL_TEXTURE_HEIGHT = 4097
GL_TEXTURE_COMPONENTS = 4099
GL_TEXTURE_INTERNAL_FORMAT = 4099
GL_TEXTURE_BORDER_COLOR = 4100
GL_TEXTURE_BORDER = 4101
GL_TEXTURE_TARGET = 4102
GL_DONT_CARE = 4352
GL_FASTEST = 4353
GL_NICEST = 4354
GL_AMBIENT = 4608
GL_DIFFUSE = 4609
GL_SPECULAR = 4610
GL_POSITION = 4611
GL_SPOT_DIRECTION = 4612
GL_SPOT_EXPONENT = 4613
GL_SPOT_CUTOFF = 4614
GL_CONSTANT_ATTENUATION = 4615
GL_LINEAR_ATTENUATION = 4616
GL_QUADRATIC_ATTENUATION = 4617
GL_COMPILE = 4864
GL_COMPILE_AND_EXECUTE = 4865
GL_BYTE = 5120
GL_UNSIGNED_BYTE = 5121
GL_SHORT = 5122
GL_UNSIGNED_SHORT = 5123
GL_INT = 5124
GL_UNSIGNED_INT = 5125
GL_FLOAT = 5126
GL_2_BYTES = 5127
GL_3_BYTES = 5128
GL_4_BYTES = 5129
GL_DOUBLE = 5130
GL_HALF_FLOAT = 5131
GL_FIXED = 5132
GL_CLEAR = 5376
GL_AND = 5377
GL_AND_REVERSE = 5378
GL_COPY = 5379
GL_AND_INVERTED = 5380
GL_NOOP = 5381
GL_XOR = 5382
GL_OR = 5383
GL_NOR = 5384
GL_EQUIV = 5385
GL_INVERT = 5386
GL_OR_REVERSE = 5387
GL_COPY_INVERTED = 5388
GL_OR_INVERTED = 5389
GL_NAND = 5390
GL_SET = 5391
GL_EMISSION = 5632
GL_SHININESS = 5633
GL_AMBIENT_AND_DIFFUSE = 5634
GL_COLOR_INDEXES = 5635
GL_MODELVIEW = 5888
GL_PROJECTION = 5889
GL_TEXTURE = 5890
GL_COLOR = 6144
GL_DEPTH = 6145
GL_STENCIL = 6146
GL_COLOR_INDEX = 6400
GL_STENCIL_INDEX = 6401
GL_DEPTH_COMPONENT = 6402
GL_RED = 6403
GL_GREEN = 6404
GL_BLUE = 6405
GL_ALPHA = 6406
GL_RGB = 6407
GL_RGBA = 6408
GL_LUMINANCE = 6409
GL_LUMINANCE_ALPHA = 6410
GL_BITMAP = 6656
GL_POINT = 6912
GL_LINE = 6913
GL_FILL = 6914
GL_RENDER = 7168
GL_FEEDBACK = 7169
GL_SELECT = 7170
GL_FLAT = 7424
GL_SMOOTH = 7425
GL_KEEP = 7680
GL_REPLACE = 7681
GL_INCR = 7682
GL_DECR = 7683
GL_VENDOR = 7936
GL_RENDERER = 7937
GL_VERSION = 7938
GL_EXTENSIONS = 7939
GL_ENABLE_BIT = 8192
GL_S = 8192
GL_SHADER_STORAGE_BARRIER_BIT = 8192
GL_T = 8193
GL_R = 8194
GL_Q = 8195
GL_MODULATE = 8448
GL_DECAL = 8449
GL_TEXTURE_ENV_MODE = 8704
GL_TEXTURE_ENV_COLOR = 8705
GL_TEXTURE_ENV = 8960
GL_EYE_LINEAR = 9216
GL_OBJECT_LINEAR = 9217
GL_SPHERE_MAP = 9218
GL_TEXTURE_GEN_MODE = 9472
GL_OBJECT_PLANE = 9473
GL_EYE_PLANE = 9474
GL_NEAREST = 9728
GL_LINEAR = 9729
GL_NEAREST_MIPMAP_NEAREST = 9984
GL_LINEAR_MIPMAP_NEAREST = 9985
GL_NEAREST_MIPMAP_LINEAR = 9986
GL_LINEAR_MIPMAP_LINEAR = 9987
GL_TEXTURE_MAG_FILTER = 10240
GL_TEXTURE_MIN_FILTER = 10241
GL_TEXTURE_WRAP_S = 10242
GL_TEXTURE_WRAP_T = 10243
GL_CLAMP = 10496
GL_REPEAT = 10497
GL_POLYGON_OFFSET_UNITS = 10752
GL_POLYGON_OFFSET_POINT = 10753
GL_POLYGON_OFFSET_LINE = 10754
GL_R3_G3_B2 = 10768
GL_V2F = 10784
GL_V3F = 10785
GL_C4UB_V2F = 10786
GL_C4UB_V3F = 10787
GL_C3F_V3F = 10788
GL_N3F_V3F = 10789
GL_C4F_N3F_V3F = 10790
GL_T2F_V3F = 10791
GL_T4F_V4F = 10792
GL_T2F_C4UB_V3F = 10793
GL_T2F_C3F_V3F = 10794
GL_T2F_N3F_V3F = 10795
GL_T2F_C4F_N3F_V3F = 10796
GL_T4F_C4F_N3F_V4F = 10797
GL_CLIP_PLANE0 = 12288
GL_CLIP_DISTANCE0 = 12288
GL_CLIP_PLANE1 = 12289
GL_CLIP_DISTANCE1 = 12289
GL_CLIP_PLANE2 = 12290
GL_CLIP_DISTANCE2 = 12290
GL_CLIP_PLANE3 = 12291
GL_CLIP_DISTANCE3 = 12291
GL_CLIP_PLANE4 = 12292
GL_CLIP_DISTANCE4 = 12292
GL_CLIP_PLANE5 = 12293
GL_CLIP_DISTANCE5 = 12293
GL_CLIP_DISTANCE6 = 12294
GL_CLIP_DISTANCE7 = 12295
GL_COLOR_BUFFER_BIT = 16384
GL_LIGHT0 = 16384
GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT = 16384
GL_LIGHT1 = 16385
GL_LIGHT2 = 16386
GL_LIGHT3 = 16387
GL_LIGHT4 = 16388
GL_LIGHT5 = 16389
GL_LIGHT6 = 16390
GL_LIGHT7 = 16391
GL_HINT_BIT = 32768
GL_QUERY_BUFFER_BARRIER_BIT = 32768
GL_CONSTANT_COLOR = 32769
GL_ONE_MINUS_CONSTANT_COLOR = 32770
GL_CONSTANT_ALPHA = 32771
GL_ONE_MINUS_CONSTANT_ALPHA = 32772
GL_BLEND_COLOR = 32773
GL_FUNC_ADD = 32774
GL_MIN = 32775
GL_MAX = 32776
GL_BLEND_EQUATION = 32777
GL_BLEND_EQUATION_RGB = 32777
GL_FUNC_SUBTRACT = 32778
GL_FUNC_REVERSE_SUBTRACT = 32779
GL_CONVOLUTION_1D = 32784
GL_CONVOLUTION_2D = 32785
GL_SEPARABLE_2D = 32786
GL_HISTOGRAM = 32804
GL_PROXY_HISTOGRAM = 32805
GL_MINMAX = 32814
GL_UNSIGNED_BYTE_3_3_2 = 32818
GL_UNSIGNED_SHORT_4_4_4_4 = 32819
GL_UNSIGNED_SHORT_5_5_5_1 = 32820
GL_UNSIGNED_INT_8_8_8_8 = 32821
GL_UNSIGNED_INT_10_10_10_2 = 32822
GL_POLYGON_OFFSET_FILL = 32823
GL_POLYGON_OFFSET_FACTOR = 32824
GL_RESCALE_NORMAL = 32826
GL_ALPHA4 = 32827
GL_ALPHA8 = 32828
GL_ALPHA12 = 32829
GL_ALPHA16 = 32830
GL_LUMINANCE4 = 32831
GL_LUMINANCE8 = 32832
GL_LUMINANCE12 = 32833
GL_LUMINANCE16 = 32834
GL_LUMINANCE4_ALPHA4 = 32835
GL_LUMINANCE6_ALPHA2 = 32836
GL_LUMINANCE8_ALPHA8 = 32837
GL_LUMINANCE12_ALPHA4 = 32838
GL_LUMINANCE12_ALPHA12 = 32839
GL_LUMINANCE16_ALPHA16 = 32840
GL_INTENSITY = 32841
GL_INTENSITY4 = 32842
GL_INTENSITY8 = 32843
GL_INTENSITY12 = 32844
GL_INTENSITY16 = 32845
GL_RGB4 = 32847
GL_RGB5 = 32848
GL_RGB8 = 32849
GL_RGB10 = 32850
GL_RGB12 = 32851
GL_RGB16 = 32852
GL_RGBA2 = 32853
GL_RGBA4 = 32854
GL_RGB5_A1 = 32855
GL_RGBA8 = 32856
GL_RGB10_A2 = 32857
GL_RGBA12 = 32858
GL_RGBA16 = 32859
GL_TEXTURE_RED_SIZE = 32860
GL_TEXTURE_GREEN_SIZE = 32861
GL_TEXTURE_BLUE_SIZE = 32862
GL_TEXTURE_ALPHA_SIZE = 32863
GL_TEXTURE_LUMINANCE_SIZE = 32864
GL_TEXTURE_INTENSITY_SIZE = 32865
GL_PROXY_TEXTURE_1D = 32867
GL_PROXY_TEXTURE_2D = 32868
GL_TEXTURE_PRIORITY = 32870
GL_TEXTURE_RESIDENT = 32871
GL_TEXTURE_BINDING_1D = 32872
GL_TEXTURE_BINDING_2D = 32873
GL_TEXTURE_BINDING_3D = 32874
GL_PACK_SKIP_IMAGES = 32875
GL_PACK_IMAGE_HEIGHT = 32876
GL_UNPACK_SKIP_IMAGES = 32877
GL_UNPACK_IMAGE_HEIGHT = 32878
GL_TEXTURE_3D = 32879
GL_PROXY_TEXTURE_3D = 32880
GL_TEXTURE_DEPTH = 32881
GL_TEXTURE_WRAP_R = 32882
GL_MAX_3D_TEXTURE_SIZE = 32883
GL_VERTEX_ARRAY = 32884
GL_NORMAL_ARRAY = 32885
GL_COLOR_ARRAY = 32886
GL_INDEX_ARRAY = 32887
GL_TEXTURE_COORD_ARRAY = 32888
GL_EDGE_FLAG_ARRAY = 32889
GL_VERTEX_ARRAY_SIZE = 32890
GL_VERTEX_ARRAY_TYPE = 32891
GL_VERTEX_ARRAY_STRIDE = 32892
GL_NORMAL_ARRAY_TYPE = 32894
GL_NORMAL_ARRAY_STRIDE = 32895
GL_COLOR_ARRAY_SIZE = 32897
GL_COLOR_ARRAY_TYPE = 32898
GL_COLOR_ARRAY_STRIDE = 32899
GL_INDEX_ARRAY_TYPE = 32901
GL_INDEX_ARRAY_STRIDE = 32902
GL_TEXTURE_COORD_ARRAY_SIZE = 32904
GL_TEXTURE_COORD_ARRAY_TYPE = 32905
GL_TEXTURE_COORD_ARRAY_STRIDE = 32906
GL_EDGE_FLAG_ARRAY_STRIDE = 32908
GL_VERTEX_ARRAY_POINTER = 32910
GL_NORMAL_ARRAY_POINTER = 32911
GL_COLOR_ARRAY_POINTER = 32912
GL_INDEX_ARRAY_POINTER = 32913
GL_TEXTURE_COORD_ARRAY_POINTER = 32914
GL_EDGE_FLAG_ARRAY_POINTER = 32915
GL_MULTISAMPLE = 32925
GL_MULTISAMPLE_ARB = 32925
GL_SAMPLE_ALPHA_TO_COVERAGE = 32926
GL_SAMPLE_ALPHA_TO_COVERAGE_ARB = 32926
GL_SAMPLE_ALPHA_TO_ONE = 32927
GL_SAMPLE_ALPHA_TO_ONE_ARB = 32927
GL_SAMPLE_COVERAGE = 32928
GL_SAMPLE_COVERAGE_ARB = 32928
GL_SAMPLE_BUFFERS = 32936
GL_SAMPLE_BUFFERS_ARB = 32936
GL_SAMPLES = 32937
GL_SAMPLES_ARB = 32937
GL_SAMPLE_COVERAGE_VALUE = 32938
GL_SAMPLE_COVERAGE_VALUE_ARB = 32938
GL_SAMPLE_COVERAGE_INVERT = 32939
GL_SAMPLE_COVERAGE_INVERT_ARB = 32939
GL_BLEND_DST_RGB = 32968
GL_BLEND_SRC_RGB = 32969
GL_BLEND_DST_ALPHA = 32970
GL_BLEND_SRC_ALPHA = 32971
GL_COLOR_TABLE = 32976
GL_POST_CONVOLUTION_COLOR_TABLE = 32977
GL_POST_COLOR_MATRIX_COLOR_TABLE = 32978
GL_PROXY_COLOR_TABLE = 32979
GL_PROXY_POST_CONVOLUTION_COLOR_TABLE = 32980
GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE = 32981
GL_BGR = 32992
GL_BGRA = 32993
GL_MAX_ELEMENTS_VERTICES = 33000
GL_MAX_ELEMENTS_INDICES = 33001
GL_PARAMETER_BUFFER = 33006
GL_PARAMETER_BUFFER_BINDING = 33007
GL_POINT_SIZE_MIN = 33062
GL_POINT_SIZE_MAX = 33063
GL_POINT_FADE_THRESHOLD_SIZE = 33064
GL_POINT_DISTANCE_ATTENUATION = 33065
GL_CLAMP_TO_BORDER = 33069
GL_CLAMP_TO_EDGE = 33071
GL_TEXTURE_MIN_LOD = 33082
GL_TEXTURE_MAX_LOD = 33083
GL_TEXTURE_BASE_LEVEL = 33084
GL_TEXTURE_MAX_LEVEL = 33085
GL_GENERATE_MIPMAP = 33169
GL_GENERATE_MIPMAP_HINT = 33170
GL_DEPTH_COMPONENT16 = 33189
GL_DEPTH_COMPONENT24 = 33190
GL_DEPTH_COMPONENT32 = 33191
GL_LIGHT_MODEL_COLOR_CONTROL = 33272
GL_SINGLE_COLOR = 33273
GL_SEPARATE_SPECULAR_COLOR = 33274
GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING = 33296
GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE = 33297
GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE = 33298
GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE = 33299
GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE = 33300
GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE = 33301
GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE = 33302
GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE = 33303
GL_FRAMEBUFFER_DEFAULT = 33304
GL_FRAMEBUFFER_UNDEFINED = 33305
GL_DEPTH_STENCIL_ATTACHMENT = 33306
GL_MAJOR_VERSION = 33307
GL_MINOR_VERSION = 33308
GL_NUM_EXTENSIONS = 33309
GL_CONTEXT_FLAGS = 33310
GL_BUFFER_IMMUTABLE_STORAGE = 33311
GL_BUFFER_STORAGE_FLAGS = 33312
GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED = 33313
GL_INDEX = 33314
GL_COMPRESSED_RED = 33317
GL_COMPRESSED_RG = 33318
GL_RG = 33319
GL_RG_INTEGER = 33320
GL_R8 = 33321
GL_R16 = 33322
GL_RG8 = 33323
GL_RG16 = 33324
GL_R16F = 33325
GL_R32F = 33326
GL_RG16F = 33327
GL_RG32F = 33328
GL_R8I = 33329
GL_R8UI = 33330
GL_R16I = 33331
GL_R16UI = 33332
GL_R32I = 33333
GL_R32UI = 33334
GL_RG8I = 33335
GL_RG8UI = 33336
GL_RG16I = 33337
GL_RG16UI = 33338
GL_RG32I = 33339
GL_RG32UI = 33340
GL_DEBUG_OUTPUT_SYNCHRONOUS = 33346
GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH = 33347
GL_DEBUG_CALLBACK_FUNCTION = 33348
GL_DEBUG_CALLBACK_USER_PARAM = 33349
GL_DEBUG_SOURCE_API = 33350
GL_DEBUG_SOURCE_WINDOW_SYSTEM = 33351
GL_DEBUG_SOURCE_SHADER_COMPILER = 33352
GL_DEBUG_SOURCE_THIRD_PARTY = 33353
GL_DEBUG_SOURCE_APPLICATION = 33354
GL_DEBUG_SOURCE_OTHER = 33355
GL_DEBUG_TYPE_ERROR = 33356
GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR = 33357
GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR = 33358
GL_DEBUG_TYPE_PORTABILITY = 33359
GL_DEBUG_TYPE_PERFORMANCE = 33360
GL_DEBUG_TYPE_OTHER = 33361
GL_LOSE_CONTEXT_ON_RESET = 33362
GL_GUILTY_CONTEXT_RESET = 33363
GL_INNOCENT_CONTEXT_RESET = 33364
GL_UNKNOWN_CONTEXT_RESET = 33365
GL_RESET_NOTIFICATION_STRATEGY = 33366
GL_PROGRAM_BINARY_RETRIEVABLE_HINT = 33367
GL_PROGRAM_SEPARABLE = 33368
GL_ACTIVE_PROGRAM = 33369
GL_PROGRAM_PIPELINE_BINDING = 33370
GL_MAX_VIEWPORTS = 33371
GL_VIEWPORT_SUBPIXEL_BITS = 33372
GL_VIEWPORT_BOUNDS_RANGE = 33373
GL_LAYER_PROVOKING_VERTEX = 33374
GL_VIEWPORT_INDEX_PROVOKING_VERTEX = 33375
GL_UNDEFINED_VERTEX = 33376
GL_NO_RESET_NOTIFICATION = 33377
GL_MAX_COMPUTE_SHARED_MEMORY_SIZE = 33378
GL_MAX_COMPUTE_UNIFORM_COMPONENTS = 33379
GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS = 33380
GL_MAX_COMPUTE_ATOMIC_COUNTERS = 33381
GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS = 33382
GL_COMPUTE_WORK_GROUP_SIZE = 33383
GL_DEBUG_TYPE_MARKER = 33384
GL_DEBUG_TYPE_PUSH_GROUP = 33385
GL_DEBUG_TYPE_POP_GROUP = 33386
GL_DEBUG_SEVERITY_NOTIFICATION = 33387
GL_MAX_DEBUG_GROUP_STACK_DEPTH = 33388
GL_DEBUG_GROUP_STACK_DEPTH = 33389
GL_MAX_UNIFORM_LOCATIONS = 33390
GL_INTERNALFORMAT_SUPPORTED = 33391
GL_INTERNALFORMAT_PREFERRED = 33392
GL_INTERNALFORMAT_RED_SIZE = 33393
GL_INTERNALFORMAT_GREEN_SIZE = 33394
GL_INTERNALFORMAT_BLUE_SIZE = 33395
GL_INTERNALFORMAT_ALPHA_SIZE = 33396
GL_INTERNALFORMAT_DEPTH_SIZE = 33397
GL_INTERNALFORMAT_STENCIL_SIZE = 33398
GL_INTERNALFORMAT_SHARED_SIZE = 33399
GL_INTERNALFORMAT_RED_TYPE = 33400
GL_INTERNALFORMAT_GREEN_TYPE = 33401
GL_INTERNALFORMAT_BLUE_TYPE = 33402
GL_INTERNALFORMAT_ALPHA_TYPE = 33403
GL_INTERNALFORMAT_DEPTH_TYPE = 33404
GL_INTERNALFORMAT_STENCIL_TYPE = 33405
GL_MAX_WIDTH = 33406
GL_MAX_HEIGHT = 33407
GL_MAX_DEPTH = 33408
GL_MAX_LAYERS = 33409
GL_MAX_COMBINED_DIMENSIONS = 33410
GL_COLOR_COMPONENTS = 33411
GL_DEPTH_COMPONENTS = 33412
GL_STENCIL_COMPONENTS = 33413
GL_COLOR_RENDERABLE = 33414
GL_DEPTH_RENDERABLE = 33415
GL_STENCIL_RENDERABLE = 33416
GL_FRAMEBUFFER_RENDERABLE = 33417
GL_FRAMEBUFFER_RENDERABLE_LAYERED = 33418
GL_FRAMEBUFFER_BLEND = 33419
GL_READ_PIXELS = 33420
GL_READ_PIXELS_FORMAT = 33421
GL_READ_PIXELS_TYPE = 33422
GL_TEXTURE_IMAGE_FORMAT = 33423
GL_TEXTURE_IMAGE_TYPE = 33424
GL_GET_TEXTURE_IMAGE_FORMAT = 33425
GL_GET_TEXTURE_IMAGE_TYPE = 33426
GL_MIPMAP = 33427
GL_MANUAL_GENERATE_MIPMAP = 33428
GL_AUTO_GENERATE_MIPMAP = 33429
GL_COLOR_ENCODING = 33430
GL_SRGB_READ = 33431
GL_SRGB_WRITE = 33432
GL_FILTER = 33434
GL_VERTEX_TEXTURE = 33435
GL_TESS_CONTROL_TEXTURE = 33436
GL_TESS_EVALUATION_TEXTURE = 33437
GL_GEOMETRY_TEXTURE = 33438
GL_FRAGMENT_TEXTURE = 33439
GL_COMPUTE_TEXTURE = 33440
GL_TEXTURE_SHADOW = 33441
GL_TEXTURE_GATHER = 33442
GL_TEXTURE_GATHER_SHADOW = 33443
GL_SHADER_IMAGE_LOAD = 33444
GL_SHADER_IMAGE_STORE = 33445
GL_SHADER_IMAGE_ATOMIC = 33446
GL_IMAGE_TEXEL_SIZE = 33447
GL_IMAGE_COMPATIBILITY_CLASS = 33448
GL_IMAGE_PIXEL_FORMAT = 33449
GL_IMAGE_PIXEL_TYPE = 33450
GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST = 33452
GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST = 33453
GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE = 33454
GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE = 33455
GL_TEXTURE_COMPRESSED_BLOCK_WIDTH = 33457
GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT = 33458
GL_TEXTURE_COMPRESSED_BLOCK_SIZE = 33459
GL_CLEAR_BUFFER = 33460
GL_TEXTURE_VIEW = 33461
GL_VIEW_COMPATIBILITY_CLASS = 33462
GL_FULL_SUPPORT = 33463
GL_CAVEAT_SUPPORT = 33464
GL_IMAGE_CLASS_4_X_32 = 33465
GL_IMAGE_CLASS_2_X_32 = 33466
GL_IMAGE_CLASS_1_X_32 = 33467
GL_IMAGE_CLASS_4_X_16 = 33468
GL_IMAGE_CLASS_2_X_16 = 33469
GL_IMAGE_CLASS_1_X_16 = 33470
GL_IMAGE_CLASS_4_X_8 = 33471
GL_IMAGE_CLASS_2_X_8 = 33472
GL_IMAGE_CLASS_1_X_8 = 33473
GL_IMAGE_CLASS_11_11_10 = 33474
GL_IMAGE_CLASS_10_10_10_2 = 33475
GL_VIEW_CLASS_128_BITS = 33476
GL_VIEW_CLASS_96_BITS = 33477
GL_VIEW_CLASS_64_BITS = 33478
GL_VIEW_CLASS_48_BITS = 33479
GL_VIEW_CLASS_32_BITS = 33480
GL_VIEW_CLASS_24_BITS = 33481
GL_VIEW_CLASS_16_BITS = 33482
GL_VIEW_CLASS_8_BITS = 33483
GL_VIEW_CLASS_S3TC_DXT1_RGB = 33484
GL_VIEW_CLASS_S3TC_DXT1_RGBA = 33485
GL_VIEW_CLASS_S3TC_DXT3_RGBA = 33486
GL_VIEW_CLASS_S3TC_DXT5_RGBA = 33487
GL_VIEW_CLASS_RGTC1_RED = 33488
GL_VIEW_CLASS_RGTC2_RG = 33489
GL_VIEW_CLASS_BPTC_UNORM = 33490
GL_VIEW_CLASS_BPTC_FLOAT = 33491
GL_VERTEX_ATTRIB_BINDING = 33492
GL_VERTEX_ATTRIB_RELATIVE_OFFSET = 33493
GL_VERTEX_BINDING_DIVISOR = 33494
GL_VERTEX_BINDING_OFFSET = 33495
GL_VERTEX_BINDING_STRIDE = 33496
GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET = 33497
GL_MAX_VERTEX_ATTRIB_BINDINGS = 33498
GL_TEXTURE_VIEW_MIN_LEVEL = 33499
GL_TEXTURE_VIEW_NUM_LEVELS = 33500
GL_TEXTURE_VIEW_MIN_LAYER = 33501
GL_TEXTURE_VIEW_NUM_LAYERS = 33502
GL_TEXTURE_IMMUTABLE_LEVELS = 33503
GL_BUFFER = 33504
GL_SHADER = 33505
GL_PROGRAM = 33506
GL_QUERY = 33507
GL_PROGRAM_PIPELINE = 33508
GL_MAX_VERTEX_ATTRIB_STRIDE = 33509
GL_SAMPLER = 33510
GL_DISPLAY_LIST = 33511
GL_MAX_LABEL_LENGTH = 33512
GL_NUM_SHADING_LANGUAGE_VERSIONS = 33513
GL_QUERY_TARGET = 33514
GL_TRANSFORM_FEEDBACK_OVERFLOW = 33516
GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW = 33517
GL_VERTICES_SUBMITTED = 33518
GL_PRIMITIVES_SUBMITTED = 33519
GL_VERTEX_SHADER_INVOCATIONS = 33520
GL_TESS_CONTROL_SHADER_PATCHES = 33521
GL_TESS_EVALUATION_SHADER_INVOCATIONS = 33522
GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED = 33523
GL_FRAGMENT_SHADER_INVOCATIONS = 33524
GL_COMPUTE_SHADER_INVOCATIONS = 33525
GL_CLIPPING_INPUT_PRIMITIVES = 33526
GL_CLIPPING_OUTPUT_PRIMITIVES = 33527
GL_MAX_CULL_DISTANCES = 33529
GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES = 33530
GL_CONTEXT_RELEASE_BEHAVIOR = 33531
GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH = 33532
GL_UNSIGNED_BYTE_2_3_3_REV = 33634
GL_UNSIGNED_SHORT_5_6_5 = 33635
GL_UNSIGNED_SHORT_5_6_5_REV = 33636
GL_UNSIGNED_SHORT_4_4_4_4_REV = 33637
GL_UNSIGNED_SHORT_1_5_5_5_REV = 33638
GL_UNSIGNED_INT_8_8_8_8_REV = 33639
GL_UNSIGNED_INT_2_10_10_10_REV = 33640
GL_MIRRORED_REPEAT = 33648
GL_COMPRESSED_RGB_S3TC_DXT1_EXT = 33776
GL_COMPRESSED_RGBA_S3TC_DXT1_EXT = 33777
GL_COMPRESSED_RGBA_S3TC_DXT3_EXT = 33778
GL_COMPRESSED_RGBA_S3TC_DXT5_EXT = 33779
GL_FOG_COORDINATE_SOURCE = 33872
GL_FOG_COORD_SRC = 33872
GL_FOG_COORDINATE = 33873
GL_FOG_COORD = 33873
GL_FRAGMENT_DEPTH = 33874
GL_CURRENT_FOG_COORDINATE = 33875
GL_CURRENT_FOG_COORD = 33875
GL_FOG_COORDINATE_ARRAY_TYPE = 33876
GL_FOG_COORD_ARRAY_TYPE = 33876
GL_FOG_COORDINATE_ARRAY_STRIDE = 33877
GL_FOG_COORD_ARRAY_STRIDE = 33877
GL_FOG_COORDINATE_ARRAY_POINTER = 33878
GL_FOG_COORD_ARRAY_POINTER = 33878
GL_FOG_COORDINATE_ARRAY = 33879
GL_FOG_COORD_ARRAY = 33879
GL_COLOR_SUM = 33880
GL_CURRENT_SECONDARY_COLOR = 33881
GL_SECONDARY_COLOR_ARRAY_SIZE = 33882
GL_SECONDARY_COLOR_ARRAY_TYPE = 33883
GL_SECONDARY_COLOR_ARRAY_STRIDE = 33884
GL_SECONDARY_COLOR_ARRAY_POINTER = 33885
GL_SECONDARY_COLOR_ARRAY = 33886
GL_CURRENT_RASTER_SECONDARY_COLOR = 33887
GL_ALIASED_POINT_SIZE_RANGE = 33901
GL_ALIASED_LINE_WIDTH_RANGE = 33902
GL_TEXTURE0 = 33984
GL_TEXTURE1 = 33985
GL_TEXTURE2 = 33986
GL_TEXTURE3 = 33987
GL_TEXTURE4 = 33988
GL_TEXTURE5 = 33989
GL_TEXTURE6 = 33990
GL_TEXTURE7 = 33991
GL_TEXTURE8 = 33992
GL_TEXTURE9 = 33993
GL_TEXTURE10 = 33994
GL_TEXTURE11 = 33995
GL_TEXTURE12 = 33996
GL_TEXTURE13 = 33997
GL_TEXTURE14 = 33998
GL_TEXTURE15 = 33999
GL_TEXTURE16 = 34000
GL_TEXTURE17 = 34001
GL_TEXTURE18 = 34002
GL_TEXTURE19 = 34003
GL_TEXTURE20 = 34004
GL_TEXTURE21 = 34005
GL_TEXTURE22 = 34006
GL_TEXTURE23 = 34007
GL_TEXTURE24 = 34008
GL_TEXTURE25 = 34009
GL_TEXTURE26 = 34010
GL_TEXTURE27 = 34011
GL_TEXTURE28 = 34012
GL_TEXTURE29 = 34013
GL_TEXTURE30 = 34014
GL_TEXTURE31 = 34015
GL_ACTIVE_TEXTURE = 34016
GL_CLIENT_ACTIVE_TEXTURE = 34017
GL_MAX_TEXTURE_UNITS = 34018
GL_TRANSPOSE_MODELVIEW_MATRIX = 34019
GL_TRANSPOSE_PROJECTION_MATRIX = 34020
GL_TRANSPOSE_TEXTURE_MATRIX = 34021
GL_TRANSPOSE_COLOR_MATRIX = 34022
GL_SUBTRACT = 34023
GL_MAX_RENDERBUFFER_SIZE = 34024
GL_MAX_RENDERBUFFER_SIZE_EXT = 34024
GL_COMPRESSED_ALPHA = 34025
GL_COMPRESSED_LUMINANCE = 34026
GL_COMPRESSED_LUMINANCE_ALPHA = 34027
GL_COMPRESSED_INTENSITY = 34028
GL_COMPRESSED_RGB = 34029
GL_COMPRESSED_RGBA = 34030
GL_TEXTURE_COMPRESSION_HINT = 34031
GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER = 34032
GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER = 34033
GL_TEXTURE_RECTANGLE = 34037
GL_TEXTURE_BINDING_RECTANGLE = 34038
GL_PROXY_TEXTURE_RECTANGLE = 34039
GL_MAX_RECTANGLE_TEXTURE_SIZE = 34040
GL_DEPTH_STENCIL = 34041
GL_UNSIGNED_INT_24_8 = 34042
GL_MAX_TEXTURE_LOD_BIAS = 34045
GL_TEXTURE_MAX_ANISOTROPY = 34046
GL_MAX_TEXTURE_MAX_ANISOTROPY = 34047
GL_TEXTURE_FILTER_CONTROL = 34048
GL_TEXTURE_LOD_BIAS = 34049
GL_INCR_WRAP = 34055
GL_DECR_WRAP = 34056
GL_NORMAL_MAP = 34065
GL_REFLECTION_MAP = 34066
GL_TEXTURE_CUBE_MAP = 34067
GL_TEXTURE_BINDING_CUBE_MAP = 34068
GL_TEXTURE_CUBE_MAP_POSITIVE_X = 34069
GL_TEXTURE_CUBE_MAP_NEGATIVE_X = 34070
GL_TEXTURE_CUBE_MAP_POSITIVE_Y = 34071
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y = 34072
GL_TEXTURE_CUBE_MAP_POSITIVE_Z = 34073
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z = 34074
GL_PROXY_TEXTURE_CUBE_MAP = 34075
GL_MAX_CUBE_MAP_TEXTURE_SIZE = 34076
GL_COMBINE = 34160
GL_COMBINE_RGB = 34161
GL_COMBINE_ALPHA = 34162
GL_RGB_SCALE = 34163
GL_ADD_SIGNED = 34164
GL_INTERPOLATE = 34165
GL_CONSTANT = 34166
GL_PRIMARY_COLOR = 34167
GL_PREVIOUS = 34168
GL_SOURCE0_RGB = 34176
GL_SRC0_RGB = 34176
GL_SOURCE1_RGB = 34177
GL_SRC1_RGB = 34177
GL_SOURCE2_RGB = 34178
GL_SRC2_RGB = 34178
GL_SOURCE0_ALPHA = 34184
GL_SRC0_ALPHA = 34184
GL_SOURCE1_ALPHA = 34185
GL_SRC1_ALPHA = 34185
GL_SOURCE2_ALPHA = 34186
GL_SRC2_ALPHA = 34186
GL_OPERAND0_RGB = 34192
GL_OPERAND1_RGB = 34193
GL_OPERAND2_RGB = 34194
GL_OPERAND0_ALPHA = 34200
GL_OPERAND1_ALPHA = 34201
GL_OPERAND2_ALPHA = 34202
GL_VERTEX_ARRAY_BINDING = 34229
GL_VERTEX_ATTRIB_ARRAY_ENABLED = 34338
GL_VERTEX_ATTRIB_ARRAY_SIZE = 34339
GL_VERTEX_ATTRIB_ARRAY_STRIDE = 34340
GL_VERTEX_ATTRIB_ARRAY_TYPE = 34341
GL_CURRENT_VERTEX_ATTRIB = 34342
GL_VERTEX_PROGRAM_POINT_SIZE = 34370
GL_PROGRAM_POINT_SIZE = 34370
GL_VERTEX_PROGRAM_TWO_SIDE = 34371
GL_VERTEX_ATTRIB_ARRAY_POINTER = 34373
GL_DEPTH_CLAMP = 34383
GL_TEXTURE_COMPRESSED_IMAGE_SIZE = 34464
GL_TEXTURE_COMPRESSED = 34465
GL_NUM_COMPRESSED_TEXTURE_FORMATS = 34466
GL_COMPRESSED_TEXTURE_FORMATS = 34467
GL_DOT3_RGB = 34478
GL_DOT3_RGBA = 34479
GL_PROGRAM_BINARY_LENGTH = 34625
GL_MIRROR_CLAMP_TO_EDGE = 34627
GL_VERTEX_ATTRIB_ARRAY_LONG = 34638
GL_BUFFER_SIZE = 34660
GL_BUFFER_USAGE = 34661
GL_NUM_PROGRAM_BINARY_FORMATS = 34814
GL_PROGRAM_BINARY_FORMATS = 34815
GL_STENCIL_BACK_FUNC = 34816
GL_STENCIL_BACK_FAIL = 34817
GL_STENCIL_BACK_PASS_DEPTH_FAIL = 34818
GL_STENCIL_BACK_PASS_DEPTH_PASS = 34819
GL_RGBA32F = 34836
GL_RGB32F = 34837
GL_RGBA16F = 34842
GL_RGB16F = 34843
GL_MAX_DRAW_BUFFERS = 34852
GL_DRAW_BUFFER0 = 34853
GL_DRAW_BUFFER1 = 34854
GL_DRAW_BUFFER2 = 34855
GL_DRAW_BUFFER3 = 34856
GL_DRAW_BUFFER4 = 34857
GL_DRAW_BUFFER5 = 34858
GL_DRAW_BUFFER6 = 34859
GL_DRAW_BUFFER7 = 34860
GL_DRAW_BUFFER8 = 34861
GL_DRAW_BUFFER9 = 34862
GL_DRAW_BUFFER10 = 34863
GL_DRAW_BUFFER11 = 34864
GL_DRAW_BUFFER12 = 34865
GL_DRAW_BUFFER13 = 34866
GL_DRAW_BUFFER14 = 34867
GL_DRAW_BUFFER15 = 34868
GL_BLEND_EQUATION_ALPHA = 34877
GL_TEXTURE_DEPTH_SIZE = 34890
GL_DEPTH_TEXTURE_MODE = 34891
GL_TEXTURE_COMPARE_MODE = 34892
GL_TEXTURE_COMPARE_FUNC = 34893
GL_COMPARE_R_TO_TEXTURE = 34894
GL_COMPARE_REF_TO_TEXTURE = 34894
GL_TEXTURE_CUBE_MAP_SEAMLESS = 34895
GL_POINT_SPRITE = 34913
GL_COORD_REPLACE = 34914
GL_QUERY_COUNTER_BITS = 34916
GL_CURRENT_QUERY = 34917
GL_QUERY_RESULT = 34918
GL_QUERY_RESULT_AVAILABLE = 34919
GL_MAX_VERTEX_ATTRIBS = 34921
GL_VERTEX_ATTRIB_ARRAY_NORMALIZED = 34922
GL_MAX_TESS_CONTROL_INPUT_COMPONENTS = 34924
GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS = 34925
GL_MAX_TEXTURE_COORDS = 34929
GL_MAX_TEXTURE_IMAGE_UNITS = 34930
GL_GEOMETRY_SHADER_INVOCATIONS = 34943
GL_ARRAY_BUFFER = 34962
GL_ELEMENT_ARRAY_BUFFER = 34963
GL_ARRAY_BUFFER_BINDING = 34964
GL_ELEMENT_ARRAY_BUFFER_BINDING = 34965
GL_VERTEX_ARRAY_BUFFER_BINDING = 34966
GL_NORMAL_ARRAY_BUFFER_BINDING = 34967
GL_COLOR_ARRAY_BUFFER_BINDING = 34968
GL_INDEX_ARRAY_BUFFER_BINDING = 34969
GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING = 34970
GL_EDGE_FLAG_ARRAY_BUFFER_BINDING = 34971
GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING = 34972
GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING = 34973
GL_FOG_COORD_ARRAY_BUFFER_BINDING = 34973
GL_WEIGHT_ARRAY_BUFFER_BINDING = 34974
GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING = 34975
GL_READ_ONLY = 35000
GL_WRITE_ONLY = 35001
GL_READ_WRITE = 35002
GL_BUFFER_ACCESS = 35003
GL_BUFFER_MAPPED = 35004
GL_BUFFER_MAP_POINTER = 35005
GL_TIME_ELAPSED = 35007
GL_STREAM_DRAW = 35040
GL_STREAM_READ = 35041
GL_STREAM_COPY = 35042
GL_STATIC_DRAW = 35044
GL_STATIC_READ = 35045
GL_STATIC_COPY = 35046
GL_DYNAMIC_DRAW = 35048
GL_DYNAMIC_READ = 35049
GL_DYNAMIC_COPY = 35050
GL_PIXEL_PACK_BUFFER = 35051
GL_PIXEL_UNPACK_BUFFER = 35052
GL_PIXEL_PACK_BUFFER_BINDING = 35053
GL_PIXEL_UNPACK_BUFFER_BINDING = 35055
GL_DEPTH24_STENCIL8 = 35056
GL_TEXTURE_STENCIL_SIZE = 35057
GL_SRC1_COLOR = 35065
GL_ONE_MINUS_SRC1_COLOR = 35066
GL_ONE_MINUS_SRC1_ALPHA = 35067
GL_MAX_DUAL_SOURCE_DRAW_BUFFERS = 35068
GL_VERTEX_ATTRIB_ARRAY_INTEGER = 35069
GL_VERTEX_ATTRIB_ARRAY_DIVISOR = 35070
GL_MAX_ARRAY_TEXTURE_LAYERS = 35071
GL_MIN_PROGRAM_TEXEL_OFFSET = 35076
GL_MAX_PROGRAM_TEXEL_OFFSET = 35077
GL_SAMPLES_PASSED = 35092
GL_GEOMETRY_VERTICES_OUT = 35094
GL_GEOMETRY_INPUT_TYPE = 35095
GL_GEOMETRY_OUTPUT_TYPE = 35096
GL_SAMPLER_BINDING = 35097
GL_CLAMP_VERTEX_COLOR = 35098
GL_CLAMP_FRAGMENT_COLOR = 35099
GL_CLAMP_READ_COLOR = 35100
GL_FIXED_ONLY = 35101
GL_UNIFORM_BUFFER = 35345
GL_UNIFORM_BUFFER_BINDING = 35368
GL_UNIFORM_BUFFER_START = 35369
GL_UNIFORM_BUFFER_SIZE = 35370
GL_MAX_VERTEX_UNIFORM_BLOCKS = 35371
GL_MAX_GEOMETRY_UNIFORM_BLOCKS = 35372
GL_MAX_FRAGMENT_UNIFORM_BLOCKS = 35373
GL_MAX_COMBINED_UNIFORM_BLOCKS = 35374
GL_MAX_UNIFORM_BUFFER_BINDINGS = 35375
GL_MAX_UNIFORM_BLOCK_SIZE = 35376
GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS = 35377
GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS = 35378
GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS = 35379
GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT = 35380
GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH = 35381
GL_ACTIVE_UNIFORM_BLOCKS = 35382
GL_UNIFORM_TYPE = 35383
GL_UNIFORM_SIZE = 35384
GL_UNIFORM_NAME_LENGTH = 35385
GL_UNIFORM_BLOCK_INDEX = 35386
GL_UNIFORM_OFFSET = 35387
GL_UNIFORM_ARRAY_STRIDE = 35388
GL_UNIFORM_MATRIX_STRIDE = 35389
GL_UNIFORM_IS_ROW_MAJOR = 35390
GL_UNIFORM_BLOCK_BINDING = 35391
GL_UNIFORM_BLOCK_DATA_SIZE = 35392
GL_UNIFORM_BLOCK_NAME_LENGTH = 35393
GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS = 35394
GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES = 35395
GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER = 35396
GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER = 35397
GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER = 35398
GL_FRAGMENT_SHADER = 35632
GL_VERTEX_SHADER = 35633
GL_MAX_FRAGMENT_UNIFORM_COMPONENTS = 35657
GL_MAX_VERTEX_UNIFORM_COMPONENTS = 35658
GL_MAX_VARYING_FLOATS = 35659
GL_MAX_VARYING_COMPONENTS = 35659
GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS = 35660
GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS = 35661
GL_SHADER_TYPE = 35663
GL_FLOAT_VEC2 = 35664
GL_FLOAT_VEC3 = 35665
GL_FLOAT_VEC4 = 35666
GL_INT_VEC2 = 35667
GL_INT_VEC3 = 35668
GL_INT_VEC4 = 35669
GL_BOOL = 35670
GL_BOOL_VEC2 = 35671
GL_BOOL_VEC3 = 35672
GL_BOOL_VEC4 = 35673
GL_FLOAT_MAT2 = 35674
GL_FLOAT_MAT3 = 35675
GL_FLOAT_MAT4 = 35676
GL_SAMPLER_1D = 35677
GL_SAMPLER_2D = 35678
GL_SAMPLER_3D = 35679
GL_SAMPLER_CUBE = 35680
GL_SAMPLER_1D_SHADOW = 35681
GL_SAMPLER_2D_SHADOW = 35682
GL_SAMPLER_2D_RECT = 35683
GL_SAMPLER_2D_RECT_SHADOW = 35684
GL_FLOAT_MAT2x3 = 35685
GL_FLOAT_MAT2x4 = 35686
GL_FLOAT_MAT3x2 = 35687
GL_FLOAT_MAT3x4 = 35688
GL_FLOAT_MAT4x2 = 35689
GL_FLOAT_MAT4x3 = 35690
GL_DELETE_STATUS = 35712
GL_COMPILE_STATUS = 35713
GL_LINK_STATUS = 35714
GL_VALIDATE_STATUS = 35715
GL_INFO_LOG_LENGTH = 35716
GL_ATTACHED_SHADERS = 35717
GL_ACTIVE_UNIFORMS = 35718
GL_ACTIVE_UNIFORM_MAX_LENGTH = 35719
GL_SHADER_SOURCE_LENGTH = 35720
GL_ACTIVE_ATTRIBUTES = 35721
GL_ACTIVE_ATTRIBUTE_MAX_LENGTH = 35722
GL_FRAGMENT_SHADER_DERIVATIVE_HINT = 35723
GL_SHADING_LANGUAGE_VERSION = 35724
GL_CURRENT_PROGRAM = 35725
GL_IMPLEMENTATION_COLOR_READ_TYPE = 35738
GL_IMPLEMENTATION_COLOR_READ_FORMAT = 35739
GL_TEXTURE_RED_TYPE = 35856
GL_TEXTURE_GREEN_TYPE = 35857
GL_TEXTURE_BLUE_TYPE = 35858
GL_TEXTURE_ALPHA_TYPE = 35859
GL_TEXTURE_LUMINANCE_TYPE = 35860
GL_TEXTURE_INTENSITY_TYPE = 35861
GL_TEXTURE_DEPTH_TYPE = 35862
GL_UNSIGNED_NORMALIZED = 35863
GL_TEXTURE_1D_ARRAY = 35864
GL_PROXY_TEXTURE_1D_ARRAY = 35865
GL_TEXTURE_2D_ARRAY = 35866
GL_PROXY_TEXTURE_2D_ARRAY = 35867
GL_TEXTURE_BINDING_1D_ARRAY = 35868
GL_TEXTURE_BINDING_2D_ARRAY = 35869
GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS = 35881
GL_TEXTURE_BUFFER = 35882
GL_TEXTURE_BUFFER_BINDING = 35882
GL_MAX_TEXTURE_BUFFER_SIZE = 35883
GL_TEXTURE_BINDING_BUFFER = 35884
GL_TEXTURE_BUFFER_DATA_STORE_BINDING = 35885
GL_ANY_SAMPLES_PASSED = 35887
GL_SAMPLE_SHADING = 35894
GL_MIN_SAMPLE_SHADING_VALUE = 35895
GL_R11F_G11F_B10F = 35898
GL_UNSIGNED_INT_10F_11F_11F_REV = 35899
GL_RGB9_E5 = 35901
GL_UNSIGNED_INT_5_9_9_9_REV = 35902
GL_TEXTURE_SHARED_SIZE = 35903
GL_SRGB = 35904
GL_SRGB8 = 35905
GL_SRGB_ALPHA = 35906
GL_SRGB8_ALPHA8 = 35907
GL_SLUMINANCE_ALPHA = 35908
GL_SLUMINANCE8_ALPHA8 = 35909
GL_SLUMINANCE = 35910
GL_SLUMINANCE8 = 35911
GL_COMPRESSED_SRGB = 35912
GL_COMPRESSED_SRGB_ALPHA = 35913
GL_COMPRESSED_SLUMINANCE = 35914
GL_COMPRESSED_SLUMINANCE_ALPHA = 35915
GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH = 35958
GL_TRANSFORM_FEEDBACK_BUFFER_MODE = 35967
GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS = 35968
GL_TRANSFORM_FEEDBACK_VARYINGS = 35971
GL_TRANSFORM_FEEDBACK_BUFFER_START = 35972
GL_TRANSFORM_FEEDBACK_BUFFER_SIZE = 35973
GL_PRIMITIVES_GENERATED = 35975
GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN = 35976
GL_RASTERIZER_DISCARD = 35977
GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS = 35978
GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS = 35979
GL_INTERLEAVED_ATTRIBS = 35980
GL_SEPARATE_ATTRIBS = 35981
GL_TRANSFORM_FEEDBACK_BUFFER = 35982
GL_TRANSFORM_FEEDBACK_BUFFER_BINDING = 35983
GL_POINT_SPRITE_COORD_ORIGIN = 36000
GL_LOWER_LEFT = 36001
GL_UPPER_LEFT = 36002
GL_STENCIL_BACK_REF = 36003
GL_STENCIL_BACK_VALUE_MASK = 36004
GL_STENCIL_BACK_WRITEMASK = 36005
GL_FRAMEBUFFER_BINDING = 36006
GL_DRAW_FRAMEBUFFER_BINDING = 36006
GL_FRAMEBUFFER_BINDING_EXT = 36006
GL_RENDERBUFFER_BINDING = 36007
GL_RENDERBUFFER_BINDING_EXT = 36007
GL_READ_FRAMEBUFFER = 36008
GL_DRAW_FRAMEBUFFER = 36009
GL_READ_FRAMEBUFFER_BINDING = 36010
GL_RENDERBUFFER_SAMPLES = 36011
GL_DEPTH_COMPONENT32F = 36012
GL_DEPTH32F_STENCIL8 = 36013
GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE = 36048
GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_EXT = 36048
GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME = 36049
GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_EXT = 36049
GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL = 36050
GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_EXT = 36050
GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE = 36051
GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_EXT = 36051
GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER = 36052
GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_EXT = 36052
GL_FRAMEBUFFER_COMPLETE = 36053
GL_FRAMEBUFFER_COMPLETE_EXT = 36053
GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT = 36054
GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT = 36054
GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT = 36055
GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT = 36055
GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT = 36057
GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT = 36058
GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER = 36059
GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT = 36059
GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER = 36060
GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT = 36060
GL_FRAMEBUFFER_UNSUPPORTED = 36061
GL_FRAMEBUFFER_UNSUPPORTED_EXT = 36061
GL_MAX_COLOR_ATTACHMENTS = 36063
GL_MAX_COLOR_ATTACHMENTS_EXT = 36063
GL_COLOR_ATTACHMENT0 = 36064
GL_COLOR_ATTACHMENT0_EXT = 36064
GL_COLOR_ATTACHMENT1 = 36065
GL_COLOR_ATTACHMENT1_EXT = 36065
GL_COLOR_ATTACHMENT2 = 36066
GL_COLOR_ATTACHMENT2_EXT = 36066
GL_COLOR_ATTACHMENT3 = 36067
GL_COLOR_ATTACHMENT3_EXT = 36067
GL_COLOR_ATTACHMENT4 = 36068
GL_COLOR_ATTACHMENT4_EXT = 36068
GL_COLOR_ATTACHMENT5 = 36069
GL_COLOR_ATTACHMENT5_EXT = 36069
GL_COLOR_ATTACHMENT6 = 36070
GL_COLOR_ATTACHMENT6_EXT = 36070
GL_COLOR_ATTACHMENT7 = 36071
GL_COLOR_ATTACHMENT7_EXT = 36071
GL_COLOR_ATTACHMENT8 = 36072
GL_COLOR_ATTACHMENT8_EXT = 36072
GL_COLOR_ATTACHMENT9 = 36073
GL_COLOR_ATTACHMENT9_EXT = 36073
GL_COLOR_ATTACHMENT10 = 36074
GL_COLOR_ATTACHMENT10_EXT = 36074
GL_COLOR_ATTACHMENT11 = 36075
GL_COLOR_ATTACHMENT11_EXT = 36075
GL_COLOR_ATTACHMENT12 = 36076
GL_COLOR_ATTACHMENT12_EXT = 36076
GL_COLOR_ATTACHMENT13 = 36077
GL_COLOR_ATTACHMENT13_EXT = 36077
GL_COLOR_ATTACHMENT14 = 36078
GL_COLOR_ATTACHMENT14_EXT = 36078
GL_COLOR_ATTACHMENT15 = 36079
GL_COLOR_ATTACHMENT15_EXT = 36079
GL_COLOR_ATTACHMENT16 = 36080
GL_COLOR_ATTACHMENT17 = 36081
GL_COLOR_ATTACHMENT18 = 36082
GL_COLOR_ATTACHMENT19 = 36083
GL_COLOR_ATTACHMENT20 = 36084
GL_COLOR_ATTACHMENT21 = 36085
GL_COLOR_ATTACHMENT22 = 36086
GL_COLOR_ATTACHMENT23 = 36087
GL_COLOR_ATTACHMENT24 = 36088
GL_COLOR_ATTACHMENT25 = 36089
GL_COLOR_ATTACHMENT26 = 36090
GL_COLOR_ATTACHMENT27 = 36091
GL_COLOR_ATTACHMENT28 = 36092
GL_COLOR_ATTACHMENT29 = 36093
GL_COLOR_ATTACHMENT30 = 36094
GL_COLOR_ATTACHMENT31 = 36095
GL_DEPTH_ATTACHMENT = 36096
GL_DEPTH_ATTACHMENT_EXT = 36096
GL_STENCIL_ATTACHMENT = 36128
GL_STENCIL_ATTACHMENT_EXT = 36128
GL_FRAMEBUFFER = 36160
GL_FRAMEBUFFER_EXT = 36160
GL_RENDERBUFFER = 36161
GL_RENDERBUFFER_EXT = 36161
GL_RENDERBUFFER_WIDTH = 36162
GL_RENDERBUFFER_WIDTH_EXT = 36162
GL_RENDERBUFFER_HEIGHT = 36163
GL_RENDERBUFFER_HEIGHT_EXT = 36163
GL_RENDERBUFFER_INTERNAL_FORMAT = 36164
GL_RENDERBUFFER_INTERNAL_FORMAT_EXT = 36164
GL_STENCIL_INDEX1 = 36166
GL_STENCIL_INDEX1_EXT = 36166
GL_STENCIL_INDEX4 = 36167
GL_STENCIL_INDEX4_EXT = 36167
GL_STENCIL_INDEX8 = 36168
GL_STENCIL_INDEX8_EXT = 36168
GL_STENCIL_INDEX16 = 36169
GL_STENCIL_INDEX16_EXT = 36169
GL_RENDERBUFFER_RED_SIZE = 36176
GL_RENDERBUFFER_RED_SIZE_EXT = 36176
GL_RENDERBUFFER_GREEN_SIZE = 36177
GL_RENDERBUFFER_GREEN_SIZE_EXT = 36177
GL_RENDERBUFFER_BLUE_SIZE = 36178
GL_RENDERBUFFER_BLUE_SIZE_EXT = 36178
GL_RENDERBUFFER_ALPHA_SIZE = 36179
GL_RENDERBUFFER_ALPHA_SIZE_EXT = 36179
GL_RENDERBUFFER_DEPTH_SIZE = 36180
GL_RENDERBUFFER_DEPTH_SIZE_EXT = 36180
GL_RENDERBUFFER_STENCIL_SIZE = 36181
GL_RENDERBUFFER_STENCIL_SIZE_EXT = 36181
GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE = 36182
GL_MAX_SAMPLES = 36183
GL_RGB565 = 36194
GL_PRIMITIVE_RESTART_FIXED_INDEX = 36201
GL_ANY_SAMPLES_PASSED_CONSERVATIVE = 36202
GL_MAX_ELEMENT_INDEX = 36203
GL_RGBA32UI = 36208
GL_RGB32UI = 36209
GL_RGBA16UI = 36214
GL_RGB16UI = 36215
GL_RGBA8UI = 36220
GL_RGB8UI = 36221
GL_RGBA32I = 36226
GL_RGB32I = 36227
GL_RGBA16I = 36232
GL_RGB16I = 36233
GL_RGBA8I = 36238
GL_RGB8I = 36239
GL_RED_INTEGER = 36244
GL_GREEN_INTEGER = 36245
GL_BLUE_INTEGER = 36246
GL_ALPHA_INTEGER = 36247
GL_RGB_INTEGER = 36248
GL_RGBA_INTEGER = 36249
GL_BGR_INTEGER = 36250
GL_BGRA_INTEGER = 36251
GL_INT_2_10_10_10_REV = 36255
GL_FRAMEBUFFER_ATTACHMENT_LAYERED = 36263
GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS = 36264
GL_FLOAT_32_UNSIGNED_INT_24_8_REV = 36269
GL_FRAMEBUFFER_SRGB = 36281
GL_COMPRESSED_RED_RGTC1 = 36283
GL_COMPRESSED_SIGNED_RED_RGTC1 = 36284
GL_COMPRESSED_RG_RGTC2 = 36285
GL_COMPRESSED_SIGNED_RG_RGTC2 = 36286
GL_SAMPLER_1D_ARRAY = 36288
GL_SAMPLER_2D_ARRAY = 36289
GL_SAMPLER_BUFFER = 36290
GL_SAMPLER_1D_ARRAY_SHADOW = 36291
GL_SAMPLER_2D_ARRAY_SHADOW = 36292
GL_SAMPLER_CUBE_SHADOW = 36293
GL_UNSIGNED_INT_VEC2 = 36294
GL_UNSIGNED_INT_VEC3 = 36295
GL_UNSIGNED_INT_VEC4 = 36296
GL_INT_SAMPLER_1D = 36297
GL_INT_SAMPLER_2D = 36298
GL_INT_SAMPLER_3D = 36299
GL_INT_SAMPLER_CUBE = 36300
GL_INT_SAMPLER_2D_RECT = 36301
GL_INT_SAMPLER_1D_ARRAY = 36302
GL_INT_SAMPLER_2D_ARRAY = 36303
GL_INT_SAMPLER_BUFFER = 36304
GL_UNSIGNED_INT_SAMPLER_1D = 36305
GL_UNSIGNED_INT_SAMPLER_2D = 36306
GL_UNSIGNED_INT_SAMPLER_3D = 36307
GL_UNSIGNED_INT_SAMPLER_CUBE = 36308
GL_UNSIGNED_INT_SAMPLER_2D_RECT = 36309
GL_UNSIGNED_INT_SAMPLER_1D_ARRAY = 36310
GL_UNSIGNED_INT_SAMPLER_2D_ARRAY = 36311
GL_UNSIGNED_INT_SAMPLER_BUFFER = 36312
GL_GEOMETRY_SHADER = 36313
GL_MAX_GEOMETRY_UNIFORM_COMPONENTS = 36319
GL_MAX_GEOMETRY_OUTPUT_VERTICES = 36320
GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS = 36321
GL_ACTIVE_SUBROUTINES = 36325
GL_ACTIVE_SUBROUTINE_UNIFORMS = 36326
GL_MAX_SUBROUTINES = 36327
GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS = 36328
GL_LOW_FLOAT = 36336
GL_MEDIUM_FLOAT = 36337
GL_HIGH_FLOAT = 36338
GL_LOW_INT = 36339
GL_MEDIUM_INT = 36340
GL_HIGH_INT = 36341
GL_SHADER_BINARY_FORMATS = 36344
GL_NUM_SHADER_BINARY_FORMATS = 36345
GL_SHADER_COMPILER = 36346
GL_MAX_VERTEX_UNIFORM_VECTORS = 36347
GL_MAX_VARYING_VECTORS = 36348
GL_MAX_FRAGMENT_UNIFORM_VECTORS = 36349
GL_QUERY_WAIT = 36371
GL_QUERY_NO_WAIT = 36372
GL_QUERY_BY_REGION_WAIT = 36373
GL_QUERY_BY_REGION_NO_WAIT = 36374
GL_QUERY_WAIT_INVERTED = 36375
GL_QUERY_NO_WAIT_INVERTED = 36376
GL_QUERY_BY_REGION_WAIT_INVERTED = 36377
GL_QUERY_BY_REGION_NO_WAIT_INVERTED = 36378
GL_POLYGON_OFFSET_CLAMP = 36379
GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS = 36382
GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS = 36383
GL_TRANSFORM_FEEDBACK = 36386
GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED = 36387
GL_TRANSFORM_FEEDBACK_PAUSED = 36387
GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE = 36388
GL_TRANSFORM_FEEDBACK_ACTIVE = 36388
GL_TRANSFORM_FEEDBACK_BINDING = 36389
GL_TIMESTAMP = 36392
GL_TEXTURE_SWIZZLE_R = 36418
GL_TEXTURE_SWIZZLE_G = 36419
GL_TEXTURE_SWIZZLE_B = 36420
GL_TEXTURE_SWIZZLE_A = 36421
GL_TEXTURE_SWIZZLE_RGBA = 36422
GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS = 36423
GL_ACTIVE_SUBROUTINE_MAX_LENGTH = 36424
GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH = 36425
GL_NUM_COMPATIBLE_SUBROUTINES = 36426
GL_COMPATIBLE_SUBROUTINES = 36427
GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION = 36428
GL_FIRST_VERTEX_CONVENTION = 36429
GL_LAST_VERTEX_CONVENTION = 36430
GL_PROVOKING_VERTEX = 36431
GL_SAMPLE_POSITION = 36432
GL_SAMPLE_MASK = 36433
GL_SAMPLE_MASK_VALUE = 36434
GL_MAX_SAMPLE_MASK_WORDS = 36441
GL_MAX_GEOMETRY_SHADER_INVOCATIONS = 36442
GL_MIN_FRAGMENT_INTERPOLATION_OFFSET = 36443
GL_MAX_FRAGMENT_INTERPOLATION_OFFSET = 36444
GL_FRAGMENT_INTERPOLATION_OFFSET_BITS = 36445
GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET = 36446
GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET = 36447
GL_MAX_TRANSFORM_FEEDBACK_BUFFERS = 36464
GL_MAX_VERTEX_STREAMS = 36465
GL_PATCH_VERTICES = 36466
GL_PATCH_DEFAULT_INNER_LEVEL = 36467
GL_PATCH_DEFAULT_OUTER_LEVEL = 36468
GL_TESS_CONTROL_OUTPUT_VERTICES = 36469
GL_TESS_GEN_MODE = 36470
GL_TESS_GEN_SPACING = 36471
GL_TESS_GEN_VERTEX_ORDER = 36472
GL_TESS_GEN_POINT_MODE = 36473
GL_ISOLINES = 36474
GL_FRACTIONAL_ODD = 36475
GL_FRACTIONAL_EVEN = 36476
GL_MAX_PATCH_VERTICES = 36477
GL_MAX_TESS_GEN_LEVEL = 36478
GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS = 36479
GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS = 36480
GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS = 36481
GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS = 36482
GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS = 36483
GL_MAX_TESS_PATCH_COMPONENTS = 36484
GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS = 36485
GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS = 36486
GL_TESS_EVALUATION_SHADER = 36487
GL_TESS_CONTROL_SHADER = 36488
GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS = 36489
GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS = 36490
GL_COMPRESSED_RGBA_BPTC_UNORM = 36492
GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM = 36493
GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT = 36494
GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT = 36495
GL_COPY_READ_BUFFER = 36662
GL_COPY_READ_BUFFER_BINDING = 36662
GL_COPY_WRITE_BUFFER = 36663
GL_COPY_WRITE_BUFFER_BINDING = 36663
GL_MAX_IMAGE_UNITS = 36664
GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS = 36665
GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES = 36665
GL_IMAGE_BINDING_NAME = 36666
GL_IMAGE_BINDING_LEVEL = 36667
GL_IMAGE_BINDING_LAYERED = 36668
GL_IMAGE_BINDING_LAYER = 36669
GL_IMAGE_BINDING_ACCESS = 36670
GL_DRAW_INDIRECT_BUFFER = 36671
GL_DRAW_INDIRECT_BUFFER_BINDING = 36675
GL_DOUBLE_MAT2 = 36678
GL_DOUBLE_MAT3 = 36679
GL_DOUBLE_MAT4 = 36680
GL_DOUBLE_MAT2x3 = 36681
GL_DOUBLE_MAT2x4 = 36682
GL_DOUBLE_MAT3x2 = 36683
GL_DOUBLE_MAT3x4 = 36684
GL_DOUBLE_MAT4x2 = 36685
GL_DOUBLE_MAT4x3 = 36686
GL_VERTEX_BINDING_BUFFER = 36687
GL_R8_SNORM = 36756
GL_RG8_SNORM = 36757
GL_RGB8_SNORM = 36758
GL_RGBA8_SNORM = 36759
GL_R16_SNORM = 36760
GL_RG16_SNORM = 36761
GL_RGB16_SNORM = 36762
GL_RGBA16_SNORM = 36763
GL_SIGNED_NORMALIZED = 36764
GL_PRIMITIVE_RESTART = 36765
GL_PRIMITIVE_RESTART_INDEX = 36766
GL_DOUBLE_VEC2 = 36860
GL_DOUBLE_VEC3 = 36861
GL_DOUBLE_VEC4 = 36862
GL_TEXTURE_CUBE_MAP_ARRAY = 36873
GL_TEXTURE_BINDING_CUBE_MAP_ARRAY = 36874
GL_PROXY_TEXTURE_CUBE_MAP_ARRAY = 36875
GL_SAMPLER_CUBE_MAP_ARRAY = 36876
GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW = 36877
GL_INT_SAMPLER_CUBE_MAP_ARRAY = 36878
GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY = 36879
GL_IMAGE_1D = 36940
GL_IMAGE_2D = 36941
GL_IMAGE_3D = 36942
GL_IMAGE_2D_RECT = 36943
GL_IMAGE_CUBE = 36944
GL_IMAGE_BUFFER = 36945
GL_IMAGE_1D_ARRAY = 36946
GL_IMAGE_2D_ARRAY = 36947
GL_IMAGE_CUBE_MAP_ARRAY = 36948
GL_IMAGE_2D_MULTISAMPLE = 36949
GL_IMAGE_2D_MULTISAMPLE_ARRAY = 36950
GL_INT_IMAGE_1D = 36951
GL_INT_IMAGE_2D = 36952
GL_INT_IMAGE_3D = 36953
GL_INT_IMAGE_2D_RECT = 36954
GL_INT_IMAGE_CUBE = 36955
GL_INT_IMAGE_BUFFER = 36956
GL_INT_IMAGE_1D_ARRAY = 36957
GL_INT_IMAGE_2D_ARRAY = 36958
GL_INT_IMAGE_CUBE_MAP_ARRAY = 36959
GL_INT_IMAGE_2D_MULTISAMPLE = 36960
GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY = 36961
GL_UNSIGNED_INT_IMAGE_1D = 36962
GL_UNSIGNED_INT_IMAGE_2D = 36963
GL_UNSIGNED_INT_IMAGE_3D = 36964
GL_UNSIGNED_INT_IMAGE_2D_RECT = 36965
GL_UNSIGNED_INT_IMAGE_CUBE = 36966
GL_UNSIGNED_INT_IMAGE_BUFFER = 36967
GL_UNSIGNED_INT_IMAGE_1D_ARRAY = 36968
GL_UNSIGNED_INT_IMAGE_2D_ARRAY = 36969
GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY = 36970
GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE = 36971
GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY = 36972
GL_MAX_IMAGE_SAMPLES = 36973
GL_IMAGE_BINDING_FORMAT = 36974
GL_RGB10_A2UI = 36975
GL_MIN_MAP_BUFFER_ALIGNMENT = 37052
GL_IMAGE_FORMAT_COMPATIBILITY_TYPE = 37063
GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE = 37064
GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS = 37065
GL_MAX_VERTEX_IMAGE_UNIFORMS = 37066
GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS = 37067
GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS = 37068
GL_MAX_GEOMETRY_IMAGE_UNIFORMS = 37069
GL_MAX_FRAGMENT_IMAGE_UNIFORMS = 37070
GL_MAX_COMBINED_IMAGE_UNIFORMS = 37071
GL_SHADER_STORAGE_BUFFER = 37074
GL_SHADER_STORAGE_BUFFER_BINDING = 37075
GL_SHADER_STORAGE_BUFFER_START = 37076
GL_SHADER_STORAGE_BUFFER_SIZE = 37077
GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS = 37078
GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS = 37079
GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS = 37080
GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS = 37081
GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS = 37082
GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS = 37083
GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS = 37084
GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS = 37085
GL_MAX_SHADER_STORAGE_BLOCK_SIZE = 37086
GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT = 37087
GL_DEPTH_STENCIL_TEXTURE_MODE = 37098
GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS = 37099
GL_UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER = 37100
GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER = 37101
GL_DISPATCH_INDIRECT_BUFFER = 37102
GL_DISPATCH_INDIRECT_BUFFER_BINDING = 37103
GL_TEXTURE_2D_MULTISAMPLE = 37120
GL_PROXY_TEXTURE_2D_MULTISAMPLE = 37121
GL_TEXTURE_2D_MULTISAMPLE_ARRAY = 37122
GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY = 37123
GL_TEXTURE_BINDING_2D_MULTISAMPLE = 37124
GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY = 37125
GL_TEXTURE_SAMPLES = 37126
GL_TEXTURE_FIXED_SAMPLE_LOCATIONS = 37127
GL_SAMPLER_2D_MULTISAMPLE = 37128
GL_INT_SAMPLER_2D_MULTISAMPLE = 37129
GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE = 37130
GL_SAMPLER_2D_MULTISAMPLE_ARRAY = 37131
GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY = 37132
GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY = 37133
GL_MAX_COLOR_TEXTURE_SAMPLES = 37134
GL_MAX_DEPTH_TEXTURE_SAMPLES = 37135
GL_MAX_INTEGER_SAMPLES = 37136
GL_MAX_SERVER_WAIT_TIMEOUT = 37137
GL_OBJECT_TYPE = 37138
GL_SYNC_CONDITION = 37139
GL_SYNC_STATUS = 37140
GL_SYNC_FLAGS = 37141
GL_SYNC_FENCE = 37142
GL_SYNC_GPU_COMMANDS_COMPLETE = 37143
GL_UNSIGNALED = 37144
GL_SIGNALED = 37145
GL_ALREADY_SIGNALED = 37146
GL_TIMEOUT_EXPIRED = 37147
GL_CONDITION_SATISFIED = 37148
GL_WAIT_FAILED = 37149
GL_BUFFER_ACCESS_FLAGS = 37151
GL_BUFFER_MAP_LENGTH = 37152
GL_BUFFER_MAP_OFFSET = 37153
GL_MAX_VERTEX_OUTPUT_COMPONENTS = 37154
GL_MAX_GEOMETRY_INPUT_COMPONENTS = 37155
GL_MAX_GEOMETRY_OUTPUT_COMPONENTS = 37156
GL_MAX_FRAGMENT_INPUT_COMPONENTS = 37157
GL_CONTEXT_PROFILE_MASK = 37158
GL_UNPACK_COMPRESSED_BLOCK_WIDTH = 37159
GL_UNPACK_COMPRESSED_BLOCK_HEIGHT = 37160
GL_UNPACK_COMPRESSED_BLOCK_DEPTH = 37161
GL_UNPACK_COMPRESSED_BLOCK_SIZE = 37162
GL_PACK_COMPRESSED_BLOCK_WIDTH = 37163
GL_PACK_COMPRESSED_BLOCK_HEIGHT = 37164
GL_PACK_COMPRESSED_BLOCK_DEPTH = 37165
GL_PACK_COMPRESSED_BLOCK_SIZE = 37166
GL_TEXTURE_IMMUTABLE_FORMAT = 37167
GL_MAX_DEBUG_MESSAGE_LENGTH = 37187
GL_MAX_DEBUG_LOGGED_MESSAGES = 37188
GL_DEBUG_LOGGED_MESSAGES = 37189
GL_DEBUG_SEVERITY_HIGH = 37190
GL_DEBUG_SEVERITY_MEDIUM = 37191
GL_DEBUG_SEVERITY_LOW = 37192
GL_QUERY_BUFFER = 37266
GL_QUERY_BUFFER_BINDING = 37267
GL_QUERY_RESULT_NO_WAIT = 37268
GL_TEXTURE_BUFFER_OFFSET = 37277
GL_TEXTURE_BUFFER_SIZE = 37278
GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT = 37279
GL_COMPUTE_SHADER = 37305
GL_MAX_COMPUTE_UNIFORM_BLOCKS = 37307
GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS = 37308
GL_MAX_COMPUTE_IMAGE_UNIFORMS = 37309
GL_MAX_COMPUTE_WORK_GROUP_COUNT = 37310
GL_MAX_COMPUTE_WORK_GROUP_SIZE = 37311
GL_COMPRESSED_R11_EAC = 37488
GL_COMPRESSED_SIGNED_R11_EAC = 37489
GL_COMPRESSED_RG11_EAC = 37490
GL_COMPRESSED_SIGNED_RG11_EAC = 37491
GL_COMPRESSED_RGB8_ETC2 = 37492
GL_COMPRESSED_SRGB8_ETC2 = 37493
GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 37494
GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 37495
GL_COMPRESSED_RGBA8_ETC2_EAC = 37496
GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC = 37497
GL_ATOMIC_COUNTER_BUFFER = 37568
GL_ATOMIC_COUNTER_BUFFER_BINDING = 37569
GL_ATOMIC_COUNTER_BUFFER_START = 37570
GL_ATOMIC_COUNTER_BUFFER_SIZE = 37571
GL_ATOMIC_COUNTER_BUFFER_DATA_SIZE = 37572
GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTERS = 37573
GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTER_INDICES = 37574
GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_VERTEX_SHADER = 37575
GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_CONTROL_SHADER = 37576
GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_EVALUATION_SHADER = 37577
GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_GEOMETRY_SHADER = 37578
GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_FRAGMENT_SHADER = 37579
GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS = 37580
GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS = 37581
GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS = 37582
GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS = 37583
GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS = 37584
GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS = 37585
GL_MAX_VERTEX_ATOMIC_COUNTERS = 37586
GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS = 37587
GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS = 37588
GL_MAX_GEOMETRY_ATOMIC_COUNTERS = 37589
GL_MAX_FRAGMENT_ATOMIC_COUNTERS = 37590
GL_MAX_COMBINED_ATOMIC_COUNTERS = 37591
GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE = 37592
GL_ACTIVE_ATOMIC_COUNTER_BUFFERS = 37593
GL_UNIFORM_ATOMIC_COUNTER_BUFFER_INDEX = 37594
GL_UNSIGNED_INT_ATOMIC_COUNTER = 37595
GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS = 37596
GL_DEBUG_OUTPUT = 37600
GL_UNIFORM = 37601
GL_UNIFORM_BLOCK = 37602
GL_PROGRAM_INPUT = 37603
GL_PROGRAM_OUTPUT = 37604
GL_BUFFER_VARIABLE = 37605
GL_SHADER_STORAGE_BLOCK = 37606
GL_IS_PER_PATCH = 37607
GL_VERTEX_SUBROUTINE = 37608
GL_TESS_CONTROL_SUBROUTINE = 37609
GL_TESS_EVALUATION_SUBROUTINE = 37610
GL_GEOMETRY_SUBROUTINE = 37611
GL_FRAGMENT_SUBROUTINE = 37612
GL_COMPUTE_SUBROUTINE = 37613
GL_VERTEX_SUBROUTINE_UNIFORM = 37614
GL_TESS_CONTROL_SUBROUTINE_UNIFORM = 37615
GL_TESS_EVALUATION_SUBROUTINE_UNIFORM = 37616
GL_GEOMETRY_SUBROUTINE_UNIFORM = 37617
GL_FRAGMENT_SUBROUTINE_UNIFORM = 37618
GL_COMPUTE_SUBROUTINE_UNIFORM = 37619
GL_TRANSFORM_FEEDBACK_VARYING = 37620
GL_ACTIVE_RESOURCES = 37621
GL_MAX_NAME_LENGTH = 37622
GL_MAX_NUM_ACTIVE_VARIABLES = 37623
GL_MAX_NUM_COMPATIBLE_SUBROUTINES = 37624
GL_NAME_LENGTH = 37625
GL_TYPE = 37626
GL_ARRAY_SIZE = 37627
GL_OFFSET = 37628
GL_BLOCK_INDEX = 37629
GL_ARRAY_STRIDE = 37630
GL_MATRIX_STRIDE = 37631
GL_IS_ROW_MAJOR = 37632
GL_ATOMIC_COUNTER_BUFFER_INDEX = 37633
GL_BUFFER_BINDING = 37634
GL_BUFFER_DATA_SIZE = 37635
GL_NUM_ACTIVE_VARIABLES = 37636
GL_ACTIVE_VARIABLES = 37637
GL_REFERENCED_BY_VERTEX_SHADER = 37638
GL_REFERENCED_BY_TESS_CONTROL_SHADER = 37639
GL_REFERENCED_BY_TESS_EVALUATION_SHADER = 37640
GL_REFERENCED_BY_GEOMETRY_SHADER = 37641
GL_REFERENCED_BY_FRAGMENT_SHADER = 37642
GL_REFERENCED_BY_COMPUTE_SHADER = 37643
GL_TOP_LEVEL_ARRAY_SIZE = 37644
GL_TOP_LEVEL_ARRAY_STRIDE = 37645
GL_LOCATION = 37646
GL_LOCATION_INDEX = 37647
GL_FRAMEBUFFER_DEFAULT_WIDTH = 37648
GL_FRAMEBUFFER_DEFAULT_HEIGHT = 37649
GL_FRAMEBUFFER_DEFAULT_LAYERS = 37650
GL_FRAMEBUFFER_DEFAULT_SAMPLES = 37651
GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS = 37652
GL_MAX_FRAMEBUFFER_WIDTH = 37653
GL_MAX_FRAMEBUFFER_HEIGHT = 37654
GL_MAX_FRAMEBUFFER_LAYERS = 37655
GL_MAX_FRAMEBUFFER_SAMPLES = 37656
GL_LOCATION_COMPONENT = 37706
GL_TRANSFORM_FEEDBACK_BUFFER_INDEX = 37707
GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE = 37708
GL_CLIP_ORIGIN = 37724
GL_CLIP_DEPTH_MODE = 37725
GL_NEGATIVE_ONE_TO_ONE = 37726
GL_ZERO_TO_ONE = 37727
GL_CLEAR_TEXTURE = 37733
GL_NUM_SAMPLE_COUNTS = 37760
GL_SHADER_BINARY_FORMAT_SPIR_V = 38225
GL_SPIR_V_BINARY = 38226
GL_SPIR_V_EXTENSIONS = 38227
GL_NUM_SPIR_V_EXTENSIONS = 38228
GL_EVAL_BIT = 65536
GL_LIST_BIT = 131072
GL_TEXTURE_BIT = 262144
GL_SCISSOR_BIT = 524288
GL_MULTISAMPLE_BIT = 536870912
GL_MULTISAMPLE_BIT_ARB = 536870912
GL_ALL_ATTRIB_BITS = 4294967295
GL_CLIENT_ALL_ATTRIB_BITS = 4294967295
GL_INVALID_INDEX = 4294967295
GL_ALL_SHADER_BITS = 4294967295
GL_ALL_BARRIER_BITS = 4294967295
GL_TIMEOUT_IGNORED = 18446744073709551615
# GL command definitions
glAccum = _link_function('glAccum', None, [GLenum, GLfloat], requires='OpenGL 1.0')
glActiveShaderProgram = _link_function('glActiveShaderProgram', None, [GLuint, GLuint], requires='OpenGL 4.1')
glActiveTexture = _link_function('glActiveTexture', None, [GLenum], requires='OpenGL 1.3')
glAlphaFunc = _link_function('glAlphaFunc', None, [GLenum, GLfloat], requires='OpenGL 1.0')
glAreTexturesResident = _link_function('glAreTexturesResident', GLboolean, [GLsizei, POINTER(GLuint), POINTER(GLboolean)], requires='OpenGL 1.1')
glArrayElement = _link_function('glArrayElement', None, [GLint], requires='OpenGL 1.1')
glAttachShader = _link_function('glAttachShader', None, [GLuint, GLuint], requires='OpenGL 2.0')
glBegin = _link_function('glBegin', None, [GLenum], requires='OpenGL 1.0')
glBeginConditionalRender = _link_function('glBeginConditionalRender', None, [GLuint, GLenum], requires='OpenGL 3.0')
glBeginQuery = _link_function('glBeginQuery', None, [GLenum, GLuint], requires='OpenGL 1.5')
glBeginQueryIndexed = _link_function('glBeginQueryIndexed', None, [GLenum, GLuint, GLuint], requires='OpenGL 4.0')
glBeginTransformFeedback = _link_function('glBeginTransformFeedback', None, [GLenum], requires='OpenGL 3.0')
glBindAttribLocation = _link_function('glBindAttribLocation', None, [GLuint, GLuint, POINTER(GLchar)], requires='OpenGL 2.0')
glBindBuffer = _link_function('glBindBuffer', None, [GLenum, GLuint], requires='OpenGL 1.5')
glBindBufferBase = _link_function('glBindBufferBase', None, [GLenum, GLuint, GLuint], requires='OpenGL 3.1')
glBindBufferRange = _link_function('glBindBufferRange', None, [GLenum, GLuint, GLuint, GLintptr, GLsizeiptr], requires='OpenGL 3.1')
glBindBuffersBase = _link_function('glBindBuffersBase', None, [GLenum, GLuint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.4')
glBindBuffersRange = _link_function('glBindBuffersRange', None, [GLenum, GLuint, GLsizei, POINTER(GLuint), POINTER(GLintptr), POINTER(GLsizeiptr)], requires='OpenGL 4.4')
glBindFragDataLocation = _link_function('glBindFragDataLocation', None, [GLuint, GLuint, POINTER(GLchar)], requires='OpenGL 3.0')
glBindFragDataLocationIndexed = _link_function('glBindFragDataLocationIndexed', None, [GLuint, GLuint, GLuint, POINTER(GLchar)], requires='OpenGL 3.3')
glBindFramebuffer = _link_function('glBindFramebuffer', None, [GLenum, GLuint], requires='OpenGL 3.0')
glBindFramebufferEXT = _link_function('glBindFramebufferEXT', None, [GLenum, GLuint], requires='None')
glBindImageTexture = _link_function('glBindImageTexture', None, [GLuint, GLuint, GLint, GLboolean, GLint, GLenum, GLenum], requires='OpenGL 4.2')
glBindImageTextures = _link_function('glBindImageTextures', None, [GLuint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.4')
glBindProgramPipeline = _link_function('glBindProgramPipeline', None, [GLuint], requires='OpenGL 4.1')
glBindRenderbuffer = _link_function('glBindRenderbuffer', None, [GLenum, GLuint], requires='OpenGL 3.0')
glBindRenderbufferEXT = _link_function('glBindRenderbufferEXT', None, [GLenum, GLuint], requires='None')
glBindSampler = _link_function('glBindSampler', None, [GLuint, GLuint], requires='OpenGL 3.3')
glBindSamplers = _link_function('glBindSamplers', None, [GLuint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.4')
glBindTexture = _link_function('glBindTexture', None, [GLenum, GLuint], requires='OpenGL 1.1')
glBindTextureUnit = _link_function('glBindTextureUnit', None, [GLuint, GLuint], requires='OpenGL 4.5')
glBindTextures = _link_function('glBindTextures', None, [GLuint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.4')
glBindTransformFeedback = _link_function('glBindTransformFeedback', None, [GLenum, GLuint], requires='OpenGL 4.0')
glBindVertexArray = _link_function('glBindVertexArray', None, [GLuint], requires='OpenGL 3.0')
glBindVertexBuffer = _link_function('glBindVertexBuffer', None, [GLuint, GLuint, GLintptr, GLsizei], requires='OpenGL 4.3')
glBindVertexBuffers = _link_function('glBindVertexBuffers', None, [GLuint, GLsizei, POINTER(GLuint), POINTER(GLintptr), POINTER(GLsizei)], requires='OpenGL 4.4')
glBitmap = _link_function('glBitmap', None, [GLsizei, GLsizei, GLfloat, GLfloat, GLfloat, GLfloat, POINTER(GLubyte)], requires='OpenGL 1.0')
glBlendColor = _link_function('glBlendColor', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.4')
glBlendEquation = _link_function('glBlendEquation', None, [GLenum], requires='OpenGL 1.4')
glBlendEquationSeparate = _link_function('glBlendEquationSeparate', None, [GLenum, GLenum], requires='OpenGL 2.0')
glBlendEquationSeparatei = _link_function('glBlendEquationSeparatei', None, [GLuint, GLenum, GLenum], requires='OpenGL 4.0')
glBlendEquationi = _link_function('glBlendEquationi', None, [GLuint, GLenum], requires='OpenGL 4.0')
glBlendFunc = _link_function('glBlendFunc', None, [GLenum, GLenum], requires='OpenGL 1.0')
glBlendFuncSeparate = _link_function('glBlendFuncSeparate', None, [GLenum, GLenum, GLenum, GLenum], requires='OpenGL 1.4')
glBlendFuncSeparatei = _link_function('glBlendFuncSeparatei', None, [GLuint, GLenum, GLenum, GLenum, GLenum], requires='OpenGL 4.0')
glBlendFunci = _link_function('glBlendFunci', None, [GLuint, GLenum, GLenum], requires='OpenGL 4.0')
glBlitFramebuffer = _link_function('glBlitFramebuffer', None, [GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLbitfield, GLenum], requires='OpenGL 3.0')
glBlitNamedFramebuffer = _link_function('glBlitNamedFramebuffer', None, [GLuint, GLuint, GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLint, GLbitfield, GLenum], requires='OpenGL 4.5')
glBufferData = _link_function('glBufferData', None, [GLenum, GLsizeiptr, POINTER(GLvoid), GLenum], requires='OpenGL 1.5')
glBufferStorage = _link_function('glBufferStorage', None, [GLenum, GLsizeiptr, POINTER(GLvoid), GLbitfield], requires='OpenGL 4.4')
glBufferSubData = _link_function('glBufferSubData', None, [GLenum, GLintptr, GLsizeiptr, POINTER(GLvoid)], requires='OpenGL 1.5')
glCallList = _link_function('glCallList', None, [GLuint], requires='OpenGL 1.0')
glCallLists = _link_function('glCallLists', None, [GLsizei, GLenum, POINTER(GLvoid)], requires='OpenGL 1.0')
glCheckFramebufferStatus = _link_function('glCheckFramebufferStatus', GLenum, [GLenum], requires='OpenGL 3.0')
glCheckFramebufferStatusEXT = _link_function('glCheckFramebufferStatusEXT', GLenum, [GLenum], requires='None')
glCheckNamedFramebufferStatus = _link_function('glCheckNamedFramebufferStatus', GLenum, [GLuint, GLenum], requires='OpenGL 4.5')
glClampColor = _link_function('glClampColor', None, [GLenum, GLenum], requires='OpenGL 3.0')
glClear = _link_function('glClear', None, [GLbitfield], requires='OpenGL 1.0')
glClearAccum = _link_function('glClearAccum', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glClearBufferData = _link_function('glClearBufferData', None, [GLenum, GLenum, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.3')
glClearBufferSubData = _link_function('glClearBufferSubData', None, [GLenum, GLenum, GLintptr, GLsizeiptr, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.3')
glClearBufferfi = _link_function('glClearBufferfi', None, [GLenum, GLint, GLfloat, GLint], requires='OpenGL 3.0')
glClearBufferfv = _link_function('glClearBufferfv', None, [GLenum, GLint, POINTER(GLfloat)], requires='OpenGL 3.0')
glClearBufferiv = _link_function('glClearBufferiv', None, [GLenum, GLint, POINTER(GLint)], requires='OpenGL 3.0')
glClearBufferuiv = _link_function('glClearBufferuiv', None, [GLenum, GLint, POINTER(GLuint)], requires='OpenGL 3.0')
glClearColor = _link_function('glClearColor', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glClearDepth = _link_function('glClearDepth', None, [GLdouble], requires='OpenGL 1.0')
glClearDepthf = _link_function('glClearDepthf', None, [GLfloat], requires='OpenGL 4.1')
glClearIndex = _link_function('glClearIndex', None, [GLfloat], requires='OpenGL 1.0')
glClearNamedBufferData = _link_function('glClearNamedBufferData', None, [GLuint, GLenum, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.5')
glClearNamedBufferSubData = _link_function('glClearNamedBufferSubData', None, [GLuint, GLenum, GLintptr, GLsizeiptr, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.5')
glClearNamedFramebufferfi = _link_function('glClearNamedFramebufferfi', None, [GLuint, GLenum, GLint, GLfloat, GLint], requires='OpenGL 4.5')
glClearNamedFramebufferfv = _link_function('glClearNamedFramebufferfv', None, [GLuint, GLenum, GLint, POINTER(GLfloat)], requires='OpenGL 4.5')
glClearNamedFramebufferiv = _link_function('glClearNamedFramebufferiv', None, [GLuint, GLenum, GLint, POINTER(GLint)], requires='OpenGL 4.5')
glClearNamedFramebufferuiv = _link_function('glClearNamedFramebufferuiv', None, [GLuint, GLenum, GLint, POINTER(GLuint)], requires='OpenGL 4.5')
glClearStencil = _link_function('glClearStencil', None, [GLint], requires='OpenGL 1.0')
glClearTexImage = _link_function('glClearTexImage', None, [GLuint, GLint, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.4')
glClearTexSubImage = _link_function('glClearTexSubImage', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.4')
glClientActiveTexture = _link_function('glClientActiveTexture', None, [GLenum], requires='OpenGL 1.3')
glClipControl = _link_function('glClipControl', None, [GLenum, GLenum], requires='OpenGL 4.5')
glClipPlane = _link_function('glClipPlane', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.0')
glColor3b = _link_function('glColor3b', None, [GLbyte, GLbyte, GLbyte], requires='OpenGL 1.0')
glColor3bv = _link_function('glColor3bv', None, [POINTER(GLbyte)], requires='OpenGL 1.0')
glColor3d = _link_function('glColor3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glColor3dv = _link_function('glColor3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glColor3f = _link_function('glColor3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glColor3fv = _link_function('glColor3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glColor3i = _link_function('glColor3i', None, [GLint, GLint, GLint], requires='OpenGL 1.0')
glColor3iv = _link_function('glColor3iv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glColor3s = _link_function('glColor3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.0')
glColor3sv = _link_function('glColor3sv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glColor3ub = _link_function('glColor3ub', None, [GLubyte, GLubyte, GLubyte], requires='OpenGL 1.0')
glColor3ubv = _link_function('glColor3ubv', None, [POINTER(GLubyte)], requires='OpenGL 1.0')
glColor3ui = _link_function('glColor3ui', None, [GLuint, GLuint, GLuint], requires='OpenGL 1.0')
glColor3uiv = _link_function('glColor3uiv', None, [POINTER(GLuint)], requires='OpenGL 1.0')
glColor3us = _link_function('glColor3us', None, [GLushort, GLushort, GLushort], requires='OpenGL 1.0')
glColor3usv = _link_function('glColor3usv', None, [POINTER(GLushort)], requires='OpenGL 1.0')
glColor4b = _link_function('glColor4b', None, [GLbyte, GLbyte, GLbyte, GLbyte], requires='OpenGL 1.0')
glColor4bv = _link_function('glColor4bv', None, [POINTER(GLbyte)], requires='OpenGL 1.0')
glColor4d = _link_function('glColor4d', None, [GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glColor4dv = _link_function('glColor4dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glColor4f = _link_function('glColor4f', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glColor4fv = _link_function('glColor4fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glColor4i = _link_function('glColor4i', None, [GLint, GLint, GLint, GLint], requires='OpenGL 1.0')
glColor4iv = _link_function('glColor4iv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glColor4s = _link_function('glColor4s', None, [GLshort, GLshort, GLshort, GLshort], requires='OpenGL 1.0')
glColor4sv = _link_function('glColor4sv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glColor4ub = _link_function('glColor4ub', None, [GLubyte, GLubyte, GLubyte, GLubyte], requires='OpenGL 1.0')
glColor4ubv = _link_function('glColor4ubv', None, [POINTER(GLubyte)], requires='OpenGL 1.0')
glColor4ui = _link_function('glColor4ui', None, [GLuint, GLuint, GLuint, GLuint], requires='OpenGL 1.0')
glColor4uiv = _link_function('glColor4uiv', None, [POINTER(GLuint)], requires='OpenGL 1.0')
glColor4us = _link_function('glColor4us', None, [GLushort, GLushort, GLushort, GLushort], requires='OpenGL 1.0')
glColor4usv = _link_function('glColor4usv', None, [POINTER(GLushort)], requires='OpenGL 1.0')
glColorMask = _link_function('glColorMask', None, [GLboolean, GLboolean, GLboolean, GLboolean], requires='OpenGL 1.0')
glColorMaski = _link_function('glColorMaski', None, [GLuint, GLboolean, GLboolean, GLboolean, GLboolean], requires='OpenGL 3.0')
glColorMaterial = _link_function('glColorMaterial', None, [GLenum, GLenum], requires='OpenGL 1.0')
glColorP3ui = _link_function('glColorP3ui', None, [GLenum, GLuint], requires='OpenGL 3.3')
glColorP3uiv = _link_function('glColorP3uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glColorP4ui = _link_function('glColorP4ui', None, [GLenum, GLuint], requires='OpenGL 3.3')
glColorP4uiv = _link_function('glColorP4uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glColorPointer = _link_function('glColorPointer', None, [GLint, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1')
glCompileShader = _link_function('glCompileShader', None, [GLuint], requires='OpenGL 2.0')
glCompressedTexImage1D = _link_function('glCompressedTexImage1D', None, [GLenum, GLint, GLenum, GLsizei, GLint, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.3')
glCompressedTexImage2D = _link_function('glCompressedTexImage2D', None, [GLenum, GLint, GLenum, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.3')
glCompressedTexImage3D = _link_function('glCompressedTexImage3D', None, [GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.3')
glCompressedTexSubImage1D = _link_function('glCompressedTexSubImage1D', None, [GLenum, GLint, GLint, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.3')
glCompressedTexSubImage2D = _link_function('glCompressedTexSubImage2D', None, [GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.3')
glCompressedTexSubImage3D = _link_function('glCompressedTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.3')
glCompressedTextureSubImage1D = _link_function('glCompressedTextureSubImage1D', None, [GLuint, GLint, GLint, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glCompressedTextureSubImage2D = _link_function('glCompressedTextureSubImage2D', None, [GLuint, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glCompressedTextureSubImage3D = _link_function('glCompressedTextureSubImage3D', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glCopyBufferSubData = _link_function('glCopyBufferSubData', None, [GLenum, GLenum, GLintptr, GLintptr, GLsizeiptr], requires='OpenGL 3.1')
glCopyImageSubData = _link_function('glCopyImageSubData', None, [GLuint, GLenum, GLint, GLint, GLint, GLint, GLuint, GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei], requires='OpenGL 4.3')
glCopyNamedBufferSubData = _link_function('glCopyNamedBufferSubData', None, [GLuint, GLuint, GLintptr, GLintptr, GLsizeiptr], requires='OpenGL 4.5')
glCopyPixels = _link_function('glCopyPixels', None, [GLint, GLint, GLsizei, GLsizei, GLenum], requires='OpenGL 1.0')
glCopyTexImage1D = _link_function('glCopyTexImage1D', None, [GLenum, GLint, GLenum, GLint, GLint, GLsizei, GLint], requires='OpenGL 1.1')
glCopyTexImage2D = _link_function('glCopyTexImage2D', None, [GLenum, GLint, GLenum, GLint, GLint, GLsizei, GLsizei, GLint], requires='OpenGL 1.1')
glCopyTexSubImage1D = _link_function('glCopyTexSubImage1D', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei], requires='OpenGL 1.1')
glCopyTexSubImage2D = _link_function('glCopyTexSubImage2D', None, [GLenum, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei], requires='OpenGL 1.1')
glCopyTexSubImage3D = _link_function('glCopyTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei], requires='OpenGL 1.2')
glCopyTextureSubImage1D = _link_function('glCopyTextureSubImage1D', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei], requires='OpenGL 4.5')
glCopyTextureSubImage2D = _link_function('glCopyTextureSubImage2D', None, [GLuint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei], requires='OpenGL 4.5')
glCopyTextureSubImage3D = _link_function('glCopyTextureSubImage3D', None, [GLuint, GLint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei], requires='OpenGL 4.5')
glCreateBuffers = _link_function('glCreateBuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5')
glCreateFramebuffers = _link_function('glCreateFramebuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5')
glCreateProgram = _link_function('glCreateProgram', GLuint, [], requires='OpenGL 2.0')
glCreateProgramPipelines = _link_function('glCreateProgramPipelines', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5')
glCreateQueries = _link_function('glCreateQueries', None, [GLenum, GLsizei, POINTER(GLuint)], requires='OpenGL 4.5')
glCreateRenderbuffers = _link_function('glCreateRenderbuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5')
glCreateSamplers = _link_function('glCreateSamplers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5')
glCreateShader = _link_function('glCreateShader', GLuint, [GLenum], requires='OpenGL 2.0')
glCreateShaderProgramv = _link_function('glCreateShaderProgramv', GLuint, [GLenum, GLsizei, POINTER(POINTER(GLchar))], requires='OpenGL 4.1')
glCreateTextures = _link_function('glCreateTextures', None, [GLenum, GLsizei, POINTER(GLuint)], requires='OpenGL 4.5')
glCreateTransformFeedbacks = _link_function('glCreateTransformFeedbacks', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5')
glCreateVertexArrays = _link_function('glCreateVertexArrays', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.5')
glCullFace = _link_function('glCullFace', None, [GLenum], requires='OpenGL 1.0')
glDebugMessageControl = _link_function('glDebugMessageControl', None, [GLenum, GLenum, GLenum, GLsizei, POINTER(GLuint), GLboolean], requires='OpenGL 4.3')
glDebugMessageInsert = _link_function('glDebugMessageInsert', None, [GLenum, GLenum, GLuint, GLenum, GLsizei, POINTER(GLchar)], requires='OpenGL 4.3')
glDeleteBuffers = _link_function('glDeleteBuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.5')
glDeleteFramebuffers = _link_function('glDeleteFramebuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.0')
glDeleteFramebuffersEXT = _link_function('glDeleteFramebuffersEXT', None, [GLsizei, POINTER(GLuint)], requires='None')
glDeleteLists = _link_function('glDeleteLists', None, [GLuint, GLsizei], requires='OpenGL 1.0')
glDeleteProgram = _link_function('glDeleteProgram', None, [GLuint], requires='OpenGL 2.0')
glDeleteProgramPipelines = _link_function('glDeleteProgramPipelines', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.1')
glDeleteQueries = _link_function('glDeleteQueries', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.5')
glDeleteRenderbuffers = _link_function('glDeleteRenderbuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.0')
glDeleteRenderbuffersEXT = _link_function('glDeleteRenderbuffersEXT', None, [GLsizei, POINTER(GLuint)], requires='None')
glDeleteSamplers = _link_function('glDeleteSamplers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.3')
glDeleteShader = _link_function('glDeleteShader', None, [GLuint], requires='OpenGL 2.0')
glDeleteTextures = _link_function('glDeleteTextures', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.1')
glDeleteTransformFeedbacks = _link_function('glDeleteTransformFeedbacks', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.0')
glDeleteVertexArrays = _link_function('glDeleteVertexArrays', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.0')
glDepthFunc = _link_function('glDepthFunc', None, [GLenum], requires='OpenGL 1.0')
glDepthMask = _link_function('glDepthMask', None, [GLboolean], requires='OpenGL 1.0')
glDepthRange = _link_function('glDepthRange', None, [GLdouble, GLdouble], requires='OpenGL 1.0')
glDepthRangeArrayv = _link_function('glDepthRangeArrayv', None, [GLuint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.1')
glDepthRangeIndexed = _link_function('glDepthRangeIndexed', None, [GLuint, GLdouble, GLdouble], requires='OpenGL 4.1')
glDepthRangef = _link_function('glDepthRangef', None, [GLfloat, GLfloat], requires='OpenGL 4.1')
glDetachShader = _link_function('glDetachShader', None, [GLuint, GLuint], requires='OpenGL 2.0')
glDisable = _link_function('glDisable', None, [GLenum], requires='OpenGL 1.0')
glDisableClientState = _link_function('glDisableClientState', None, [GLenum], requires='OpenGL 1.1')
glDisableVertexArrayAttrib = _link_function('glDisableVertexArrayAttrib', None, [GLuint, GLuint], requires='OpenGL 4.5')
glDisableVertexAttribArray = _link_function('glDisableVertexAttribArray', None, [GLuint], requires='OpenGL 2.0')
glDisablei = _link_function('glDisablei', None, [GLenum, GLuint], requires='OpenGL 3.0')
glDispatchCompute = _link_function('glDispatchCompute', None, [GLuint, GLuint, GLuint], requires='OpenGL 4.3')
glDispatchComputeIndirect = _link_function('glDispatchComputeIndirect', None, [GLintptr], requires='OpenGL 4.3')
glDrawArrays = _link_function('glDrawArrays', None, [GLenum, GLint, GLsizei], requires='OpenGL 1.1')
glDrawArraysIndirect = _link_function('glDrawArraysIndirect', None, [GLenum, POINTER(GLvoid)], requires='OpenGL 4.0')
glDrawArraysInstanced = _link_function('glDrawArraysInstanced', None, [GLenum, GLint, GLsizei, GLsizei], requires='OpenGL 3.1')
glDrawArraysInstancedBaseInstance = _link_function('glDrawArraysInstancedBaseInstance', None, [GLenum, GLint, GLsizei, GLsizei, GLuint], requires='OpenGL 4.2')
glDrawBuffer = _link_function('glDrawBuffer', None, [GLenum], requires='OpenGL 1.0')
glDrawBuffers = _link_function('glDrawBuffers', None, [GLsizei, POINTER(GLenum)], requires='OpenGL 2.0')
glDrawElements = _link_function('glDrawElements', None, [GLenum, GLsizei, GLenum, POINTER(GLvoid)], requires='OpenGL 1.1')
glDrawElementsBaseVertex = _link_function('glDrawElementsBaseVertex', None, [GLenum, GLsizei, GLenum, POINTER(GLvoid), GLint], requires='OpenGL 3.2')
glDrawElementsIndirect = _link_function('glDrawElementsIndirect', None, [GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.0')
glDrawElementsInstanced = _link_function('glDrawElementsInstanced', None, [GLenum, GLsizei, GLenum, POINTER(GLvoid), GLsizei], requires='OpenGL 3.1')
glDrawElementsInstancedBaseInstance = _link_function('glDrawElementsInstancedBaseInstance', None, [GLenum, GLsizei, GLenum, POINTER(GLvoid), GLsizei, GLuint], requires='OpenGL 4.2')
glDrawElementsInstancedBaseVertex = _link_function('glDrawElementsInstancedBaseVertex', None, [GLenum, GLsizei, GLenum, POINTER(GLvoid), GLsizei, GLint], requires='OpenGL 3.2')
glDrawElementsInstancedBaseVertexBaseInstance = _link_function('glDrawElementsInstancedBaseVertexBaseInstance', None, [GLenum, GLsizei, GLenum, POINTER(GLvoid), GLsizei, GLint, GLuint], requires='OpenGL 4.2')
glDrawPixels = _link_function('glDrawPixels', None, [GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.0')
glDrawRangeElements = _link_function('glDrawRangeElements', None, [GLenum, GLuint, GLuint, GLsizei, GLenum, POINTER(GLvoid)], requires='OpenGL 1.2')
glDrawRangeElementsBaseVertex = _link_function('glDrawRangeElementsBaseVertex', None, [GLenum, GLuint, GLuint, GLsizei, GLenum, POINTER(GLvoid), GLint], requires='OpenGL 3.2')
glDrawTransformFeedback = _link_function('glDrawTransformFeedback', None, [GLenum, GLuint], requires='OpenGL 4.0')
glDrawTransformFeedbackInstanced = _link_function('glDrawTransformFeedbackInstanced', None, [GLenum, GLuint, GLsizei], requires='OpenGL 4.2')
glDrawTransformFeedbackStream = _link_function('glDrawTransformFeedbackStream', None, [GLenum, GLuint, GLuint], requires='OpenGL 4.0')
glDrawTransformFeedbackStreamInstanced = _link_function('glDrawTransformFeedbackStreamInstanced', None, [GLenum, GLuint, GLuint, GLsizei], requires='OpenGL 4.2')
glEdgeFlag = _link_function('glEdgeFlag', None, [GLboolean], requires='OpenGL 1.0')
glEdgeFlagPointer = _link_function('glEdgeFlagPointer', None, [GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1')
glEdgeFlagv = _link_function('glEdgeFlagv', None, [POINTER(GLboolean)], requires='OpenGL 1.0')
glEnable = _link_function('glEnable', None, [GLenum], requires='OpenGL 1.0')
glEnableClientState = _link_function('glEnableClientState', None, [GLenum], requires='OpenGL 1.1')
glEnableVertexArrayAttrib = _link_function('glEnableVertexArrayAttrib', None, [GLuint, GLuint], requires='OpenGL 4.5')
glEnableVertexAttribArray = _link_function('glEnableVertexAttribArray', None, [GLuint], requires='OpenGL 2.0')
glEnablei = _link_function('glEnablei', None, [GLenum, GLuint], requires='OpenGL 3.0')
glEnd = _link_function('glEnd', None, [], requires='OpenGL 1.0')
glEndConditionalRender = _link_function('glEndConditionalRender', None, [], requires='OpenGL 3.0')
glEndList = _link_function('glEndList', None, [], requires='OpenGL 1.0')
glEndQuery = _link_function('glEndQuery', None, [GLenum], requires='OpenGL 1.5')
glEndQueryIndexed = _link_function('glEndQueryIndexed', None, [GLenum, GLuint], requires='OpenGL 4.0')
glEndTransformFeedback = _link_function('glEndTransformFeedback', None, [], requires='OpenGL 3.0')
glEvalCoord1d = _link_function('glEvalCoord1d', None, [GLdouble], requires='OpenGL 1.0')
glEvalCoord1dv = _link_function('glEvalCoord1dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glEvalCoord1f = _link_function('glEvalCoord1f', None, [GLfloat], requires='OpenGL 1.0')
glEvalCoord1fv = _link_function('glEvalCoord1fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glEvalCoord2d = _link_function('glEvalCoord2d', None, [GLdouble, GLdouble], requires='OpenGL 1.0')
glEvalCoord2dv = _link_function('glEvalCoord2dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glEvalCoord2f = _link_function('glEvalCoord2f', None, [GLfloat, GLfloat], requires='OpenGL 1.0')
glEvalCoord2fv = _link_function('glEvalCoord2fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glEvalMesh1 = _link_function('glEvalMesh1', None, [GLenum, GLint, GLint], requires='OpenGL 1.0')
glEvalMesh2 = _link_function('glEvalMesh2', None, [GLenum, GLint, GLint, GLint, GLint], requires='OpenGL 1.0')
glEvalPoint1 = _link_function('glEvalPoint1', None, [GLint], requires='OpenGL 1.0')
glEvalPoint2 = _link_function('glEvalPoint2', None, [GLint, GLint], requires='OpenGL 1.0')
glFeedbackBuffer = _link_function('glFeedbackBuffer', None, [GLsizei, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glFinish = _link_function('glFinish', None, [], requires='OpenGL 1.0')
glFlush = _link_function('glFlush', None, [], requires='OpenGL 1.0')
glFlushMappedBufferRange = _link_function('glFlushMappedBufferRange', None, [GLenum, GLintptr, GLsizeiptr], requires='OpenGL 3.0')
glFlushMappedNamedBufferRange = _link_function('glFlushMappedNamedBufferRange', None, [GLuint, GLintptr, GLsizeiptr], requires='OpenGL 4.5')
glFogCoordPointer = _link_function('glFogCoordPointer', None, [GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.4')
glFogCoordd = _link_function('glFogCoordd', None, [GLdouble], requires='OpenGL 1.4')
glFogCoorddv = _link_function('glFogCoorddv', None, [POINTER(GLdouble)], requires='OpenGL 1.4')
glFogCoordf = _link_function('glFogCoordf', None, [GLfloat], requires='OpenGL 1.4')
glFogCoordfv = _link_function('glFogCoordfv', None, [POINTER(GLfloat)], requires='OpenGL 1.4')
glFogf = _link_function('glFogf', None, [GLenum, GLfloat], requires='OpenGL 1.0')
glFogfv = _link_function('glFogfv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glFogi = _link_function('glFogi', None, [GLenum, GLint], requires='OpenGL 1.0')
glFogiv = _link_function('glFogiv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glFramebufferParameteri = _link_function('glFramebufferParameteri', None, [GLenum, GLenum, GLint], requires='OpenGL 4.3')
glFramebufferRenderbuffer = _link_function('glFramebufferRenderbuffer', None, [GLenum, GLenum, GLenum, GLuint], requires='OpenGL 3.0')
glFramebufferRenderbufferEXT = _link_function('glFramebufferRenderbufferEXT', None, [GLenum, GLenum, GLenum, GLuint], requires='None')
glFramebufferTexture = _link_function('glFramebufferTexture', None, [GLenum, GLenum, GLuint, GLint], requires='OpenGL 3.2')
glFramebufferTexture1D = _link_function('glFramebufferTexture1D', None, [GLenum, GLenum, GLenum, GLuint, GLint], requires='OpenGL 3.0')
glFramebufferTexture1DEXT = _link_function('glFramebufferTexture1DEXT', None, [GLenum, GLenum, GLenum, GLuint, GLint], requires='None')
glFramebufferTexture2D = _link_function('glFramebufferTexture2D', None, [GLenum, GLenum, GLenum, GLuint, GLint], requires='OpenGL 3.0')
glFramebufferTexture2DEXT = _link_function('glFramebufferTexture2DEXT', None, [GLenum, GLenum, GLenum, GLuint, GLint], requires='None')
glFramebufferTexture3D = _link_function('glFramebufferTexture3D', None, [GLenum, GLenum, GLenum, GLuint, GLint, GLint], requires='OpenGL 3.0')
glFramebufferTexture3DEXT = _link_function('glFramebufferTexture3DEXT', None, [GLenum, GLenum, GLenum, GLuint, GLint, GLint], requires='None')
glFramebufferTextureLayer = _link_function('glFramebufferTextureLayer', None, [GLenum, GLenum, GLuint, GLint, GLint], requires='OpenGL 3.0')
glFrontFace = _link_function('glFrontFace', None, [GLenum], requires='OpenGL 1.0')
glFrustum = _link_function('glFrustum', None, [GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glGenBuffers = _link_function('glGenBuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.5')
glGenFramebuffers = _link_function('glGenFramebuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.0')
glGenFramebuffersEXT = _link_function('glGenFramebuffersEXT', None, [GLsizei, POINTER(GLuint)], requires='None')
glGenLists = _link_function('glGenLists', GLuint, [GLsizei], requires='OpenGL 1.0')
glGenProgramPipelines = _link_function('glGenProgramPipelines', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.1')
glGenQueries = _link_function('glGenQueries', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.5')
glGenRenderbuffers = _link_function('glGenRenderbuffers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.0')
glGenRenderbuffersEXT = _link_function('glGenRenderbuffersEXT', None, [GLsizei, POINTER(GLuint)], requires='None')
glGenSamplers = _link_function('glGenSamplers', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.3')
glGenTextures = _link_function('glGenTextures', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.1')
glGenTransformFeedbacks = _link_function('glGenTransformFeedbacks', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 4.0')
glGenVertexArrays = _link_function('glGenVertexArrays', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 3.0')
glGenerateMipmap = _link_function('glGenerateMipmap', None, [GLenum], requires='OpenGL 3.0')
glGenerateMipmapEXT = _link_function('glGenerateMipmapEXT', None, [GLenum], requires='None')
glGenerateTextureMipmap = _link_function('glGenerateTextureMipmap', None, [GLuint], requires='OpenGL 4.5')
glGetActiveAtomicCounterBufferiv = _link_function('glGetActiveAtomicCounterBufferiv', None, [GLuint, GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.2')
glGetActiveAttrib = _link_function('glGetActiveAttrib', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLchar)], requires='OpenGL 2.0')
glGetActiveSubroutineName = _link_function('glGetActiveSubroutineName', None, [GLuint, GLenum, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.0')
glGetActiveSubroutineUniformName = _link_function('glGetActiveSubroutineUniformName', None, [GLuint, GLenum, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.0')
glGetActiveSubroutineUniformiv = _link_function('glGetActiveSubroutineUniformiv', None, [GLuint, GLenum, GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.0')
glGetActiveUniform = _link_function('glGetActiveUniform', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLchar)], requires='OpenGL 2.0')
glGetActiveUniformBlockName = _link_function('glGetActiveUniformBlockName', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 3.1')
glGetActiveUniformBlockiv = _link_function('glGetActiveUniformBlockiv', None, [GLuint, GLuint, GLenum, POINTER(GLint)], requires='OpenGL 3.1')
glGetActiveUniformName = _link_function('glGetActiveUniformName', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 3.1')
glGetActiveUniformsiv = _link_function('glGetActiveUniformsiv', None, [GLuint, GLsizei, POINTER(GLuint), GLenum, POINTER(GLint)], requires='OpenGL 3.1')
glGetAttachedShaders = _link_function('glGetAttachedShaders', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLuint)], requires='OpenGL 2.0')
glGetAttribLocation = _link_function('glGetAttribLocation', GLint, [GLuint, POINTER(GLchar)], requires='OpenGL 2.0')
glGetBooleani_v = _link_function('glGetBooleani_v', None, [GLenum, GLuint, POINTER(GLboolean)], requires='OpenGL 3.0')
glGetBooleanv = _link_function('glGetBooleanv', None, [GLenum, POINTER(GLboolean)], requires='OpenGL 1.0')
glGetBufferParameteri64v = _link_function('glGetBufferParameteri64v', None, [GLenum, GLenum, POINTER(GLint64)], requires='OpenGL 3.2')
glGetBufferParameteriv = _link_function('glGetBufferParameteriv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.5')
glGetBufferPointerv = _link_function('glGetBufferPointerv', None, [GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.5')
glGetBufferSubData = _link_function('glGetBufferSubData', None, [GLenum, GLintptr, GLsizeiptr, POINTER(GLvoid)], requires='OpenGL 1.5')
glGetClipPlane = _link_function('glGetClipPlane', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.0')
glGetCompressedTexImage = _link_function('glGetCompressedTexImage', None, [GLenum, GLint, POINTER(GLvoid)], requires='OpenGL 1.3')
glGetCompressedTextureImage = _link_function('glGetCompressedTextureImage', None, [GLuint, GLint, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glGetCompressedTextureSubImage = _link_function('glGetCompressedTextureSubImage', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glGetDebugMessageLog = _link_function('glGetDebugMessageLog', GLuint, [GLuint, GLsizei, POINTER(GLenum), POINTER(GLenum), POINTER(GLuint), POINTER(GLenum), POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.3')
glGetDoublei_v = _link_function('glGetDoublei_v', None, [GLenum, GLuint, POINTER(GLdouble)], requires='OpenGL 4.1')
glGetDoublev = _link_function('glGetDoublev', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.0')
glGetError = _link_function('glGetError', GLenum, [], requires='OpenGL 1.0')
glGetFloati_v = _link_function('glGetFloati_v', None, [GLenum, GLuint, POINTER(GLfloat)], requires='OpenGL 4.1')
glGetFloatv = _link_function('glGetFloatv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glGetFragDataIndex = _link_function('glGetFragDataIndex', GLint, [GLuint, POINTER(GLchar)], requires='OpenGL 3.3')
glGetFragDataLocation = _link_function('glGetFragDataLocation', GLint, [GLuint, POINTER(GLchar)], requires='OpenGL 3.0')
glGetFramebufferAttachmentParameteriv = _link_function('glGetFramebufferAttachmentParameteriv', None, [GLenum, GLenum, GLenum, POINTER(GLint)], requires='OpenGL 3.0')
glGetFramebufferAttachmentParameterivEXT = _link_function('glGetFramebufferAttachmentParameterivEXT', None, [GLenum, GLenum, GLenum, POINTER(GLint)], requires='None')
glGetFramebufferParameteriv = _link_function('glGetFramebufferParameteriv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 4.3')
glGetGraphicsResetStatus = _link_function('glGetGraphicsResetStatus', GLenum, [], requires='OpenGL 4.5')
glGetInteger64i_v = _link_function('glGetInteger64i_v', None, [GLenum, GLuint, POINTER(GLint64)], requires='OpenGL 3.2')
glGetInteger64v = _link_function('glGetInteger64v', None, [GLenum, POINTER(GLint64)], requires='OpenGL 3.2')
glGetIntegeri_v = _link_function('glGetIntegeri_v', None, [GLenum, GLuint, POINTER(GLint)], requires='OpenGL 3.1')
glGetIntegerv = _link_function('glGetIntegerv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glGetInternalformati64v = _link_function('glGetInternalformati64v', None, [GLenum, GLenum, GLenum, GLsizei, POINTER(GLint64)], requires='OpenGL 4.3')
glGetInternalformativ = _link_function('glGetInternalformativ', None, [GLenum, GLenum, GLenum, GLsizei, POINTER(GLint)], requires='OpenGL 4.2')
glGetLightfv = _link_function('glGetLightfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glGetLightiv = _link_function('glGetLightiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glGetMapdv = _link_function('glGetMapdv', None, [GLenum, GLenum, POINTER(GLdouble)], requires='OpenGL 1.0')
glGetMapfv = _link_function('glGetMapfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glGetMapiv = _link_function('glGetMapiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glGetMaterialfv = _link_function('glGetMaterialfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glGetMaterialiv = _link_function('glGetMaterialiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glGetMultisamplefv = _link_function('glGetMultisamplefv', None, [GLenum, GLuint, POINTER(GLfloat)], requires='OpenGL 3.2')
glGetNamedBufferParameteri64v = _link_function('glGetNamedBufferParameteri64v', None, [GLuint, GLenum, POINTER(GLint64)], requires='OpenGL 4.5')
glGetNamedBufferParameteriv = _link_function('glGetNamedBufferParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5')
glGetNamedBufferPointerv = _link_function('glGetNamedBufferPointerv', None, [GLuint, GLenum, POINTER(GLvoid)], requires='OpenGL 4.5')
glGetNamedBufferSubData = _link_function('glGetNamedBufferSubData', None, [GLuint, GLintptr, GLsizeiptr, POINTER(GLvoid)], requires='OpenGL 4.5')
glGetNamedFramebufferAttachmentParameteriv = _link_function('glGetNamedFramebufferAttachmentParameteriv', None, [GLuint, GLenum, GLenum, POINTER(GLint)], requires='OpenGL 4.5')
glGetNamedFramebufferParameteriv = _link_function('glGetNamedFramebufferParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5')
glGetNamedRenderbufferParameteriv = _link_function('glGetNamedRenderbufferParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5')
glGetObjectLabel = _link_function('glGetObjectLabel', None, [GLenum, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.3')
glGetObjectPtrLabel = _link_function('glGetObjectPtrLabel', None, [POINTER(GLvoid), GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.3')
glGetPixelMapfv = _link_function('glGetPixelMapfv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glGetPixelMapuiv = _link_function('glGetPixelMapuiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 1.0')
glGetPixelMapusv = _link_function('glGetPixelMapusv', None, [GLenum, POINTER(GLushort)], requires='OpenGL 1.0')
glGetPointerv = _link_function('glGetPointerv', None, [GLenum, POINTER(GLvoid)], requires='OpenGL 4.3')
glGetPolygonStipple = _link_function('glGetPolygonStipple', None, [POINTER(GLubyte)], requires='OpenGL 1.0')
glGetProgramBinary = _link_function('glGetProgramBinary', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLenum), POINTER(GLvoid)], requires='OpenGL 4.1')
glGetProgramInfoLog = _link_function('glGetProgramInfoLog', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 2.0')
glGetProgramInterfaceiv = _link_function('glGetProgramInterfaceiv', None, [GLuint, GLenum, GLenum, POINTER(GLint)], requires='OpenGL 4.3')
glGetProgramPipelineInfoLog = _link_function('glGetProgramPipelineInfoLog', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.1')
glGetProgramPipelineiv = _link_function('glGetProgramPipelineiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.1')
glGetProgramResourceIndex = _link_function('glGetProgramResourceIndex', GLuint, [GLuint, GLenum, POINTER(GLchar)], requires='OpenGL 4.3')
glGetProgramResourceLocation = _link_function('glGetProgramResourceLocation', GLint, [GLuint, GLenum, POINTER(GLchar)], requires='OpenGL 4.3')
glGetProgramResourceLocationIndex = _link_function('glGetProgramResourceLocationIndex', GLint, [GLuint, GLenum, POINTER(GLchar)], requires='OpenGL 4.3')
glGetProgramResourceName = _link_function('glGetProgramResourceName', None, [GLuint, GLenum, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 4.3')
glGetProgramResourceiv = _link_function('glGetProgramResourceiv', None, [GLuint, GLenum, GLuint, GLsizei, POINTER(GLenum), GLsizei, POINTER(GLsizei), POINTER(GLint)], requires='OpenGL 4.3')
glGetProgramStageiv = _link_function('glGetProgramStageiv', None, [GLuint, GLenum, GLenum, POINTER(GLint)], requires='OpenGL 4.0')
glGetProgramiv = _link_function('glGetProgramiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 2.0')
glGetQueryBufferObjecti64v = _link_function('glGetQueryBufferObjecti64v', None, [GLuint, GLuint, GLenum, GLintptr], requires='OpenGL 4.5')
glGetQueryBufferObjectiv = _link_function('glGetQueryBufferObjectiv', None, [GLuint, GLuint, GLenum, GLintptr], requires='OpenGL 4.5')
glGetQueryBufferObjectui64v = _link_function('glGetQueryBufferObjectui64v', None, [GLuint, GLuint, GLenum, GLintptr], requires='OpenGL 4.5')
glGetQueryBufferObjectuiv = _link_function('glGetQueryBufferObjectuiv', None, [GLuint, GLuint, GLenum, GLintptr], requires='OpenGL 4.5')
glGetQueryIndexediv = _link_function('glGetQueryIndexediv', None, [GLenum, GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.0')
glGetQueryObjecti64v = _link_function('glGetQueryObjecti64v', None, [GLuint, GLenum, POINTER(GLint64)], requires='OpenGL 3.3')
glGetQueryObjectiv = _link_function('glGetQueryObjectiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 1.5')
glGetQueryObjectui64v = _link_function('glGetQueryObjectui64v', None, [GLuint, GLenum, POINTER(GLuint64)], requires='OpenGL 3.3')
glGetQueryObjectuiv = _link_function('glGetQueryObjectuiv', None, [GLuint, GLenum, POINTER(GLuint)], requires='OpenGL 1.5')
glGetQueryiv = _link_function('glGetQueryiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.5')
glGetRenderbufferParameteriv = _link_function('glGetRenderbufferParameteriv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 3.0')
glGetRenderbufferParameterivEXT = _link_function('glGetRenderbufferParameterivEXT', None, [GLenum, GLenum, POINTER(GLint)], requires='None')
glGetSamplerParameterIiv = _link_function('glGetSamplerParameterIiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 3.3')
glGetSamplerParameterIuiv = _link_function('glGetSamplerParameterIuiv', None, [GLuint, GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glGetSamplerParameterfv = _link_function('glGetSamplerParameterfv', None, [GLuint, GLenum, POINTER(GLfloat)], requires='OpenGL 3.3')
glGetSamplerParameteriv = _link_function('glGetSamplerParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 3.3')
glGetShaderInfoLog = _link_function('glGetShaderInfoLog', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 2.0')
glGetShaderPrecisionFormat = _link_function('glGetShaderPrecisionFormat', None, [GLenum, GLenum, POINTER(GLint), POINTER(GLint)], requires='OpenGL 4.1')
glGetShaderSource = _link_function('glGetShaderSource', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], requires='OpenGL 2.0')
glGetShaderiv = _link_function('glGetShaderiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 2.0')
glGetString = _link_function('glGetString', POINTER(GLubyte), [GLenum], requires='OpenGL 1.0')
glGetStringi = _link_function('glGetStringi', POINTER(GLubyte), [GLenum, GLuint], requires='OpenGL 3.0')
glGetSubroutineIndex = _link_function('glGetSubroutineIndex', GLuint, [GLuint, GLenum, POINTER(GLchar)], requires='OpenGL 4.0')
glGetSubroutineUniformLocation = _link_function('glGetSubroutineUniformLocation', GLint, [GLuint, GLenum, POINTER(GLchar)], requires='OpenGL 4.0')
glGetTexEnvfv = _link_function('glGetTexEnvfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glGetTexEnviv = _link_function('glGetTexEnviv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glGetTexGendv = _link_function('glGetTexGendv', None, [GLenum, GLenum, POINTER(GLdouble)], requires='OpenGL 1.0')
glGetTexGenfv = _link_function('glGetTexGenfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glGetTexGeniv = _link_function('glGetTexGeniv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glGetTexImage = _link_function('glGetTexImage', None, [GLenum, GLint, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.0')
glGetTexLevelParameterfv = _link_function('glGetTexLevelParameterfv', None, [GLenum, GLint, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glGetTexLevelParameteriv = _link_function('glGetTexLevelParameteriv', None, [GLenum, GLint, GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glGetTexParameterIiv = _link_function('glGetTexParameterIiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 3.0')
glGetTexParameterIuiv = _link_function('glGetTexParameterIuiv', None, [GLenum, GLenum, POINTER(GLuint)], requires='OpenGL 3.0')
glGetTexParameterfv = _link_function('glGetTexParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glGetTexParameteriv = _link_function('glGetTexParameteriv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glGetTextureImage = _link_function('glGetTextureImage', None, [GLuint, GLint, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glGetTextureLevelParameterfv = _link_function('glGetTextureLevelParameterfv', None, [GLuint, GLint, GLenum, POINTER(GLfloat)], requires='OpenGL 4.5')
glGetTextureLevelParameteriv = _link_function('glGetTextureLevelParameteriv', None, [GLuint, GLint, GLenum, POINTER(GLint)], requires='OpenGL 4.5')
glGetTextureParameterIiv = _link_function('glGetTextureParameterIiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5')
glGetTextureParameterIuiv = _link_function('glGetTextureParameterIuiv', None, [GLuint, GLenum, POINTER(GLuint)], requires='OpenGL 4.5')
glGetTextureParameterfv = _link_function('glGetTextureParameterfv', None, [GLuint, GLenum, POINTER(GLfloat)], requires='OpenGL 4.5')
glGetTextureParameteriv = _link_function('glGetTextureParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5')
glGetTextureSubImage = _link_function('glGetTextureSubImage', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glGetTransformFeedbackVarying = _link_function('glGetTransformFeedbackVarying', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLsizei), POINTER(GLenum), POINTER(GLchar)], requires='OpenGL 3.0')
glGetTransformFeedbacki64_v = _link_function('glGetTransformFeedbacki64_v', None, [GLuint, GLenum, GLuint, POINTER(GLint64)], requires='OpenGL 4.5')
glGetTransformFeedbacki_v = _link_function('glGetTransformFeedbacki_v', None, [GLuint, GLenum, GLuint, POINTER(GLint)], requires='OpenGL 4.5')
glGetTransformFeedbackiv = _link_function('glGetTransformFeedbackiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5')
glGetUniformBlockIndex = _link_function('glGetUniformBlockIndex', GLuint, [GLuint, POINTER(GLchar)], requires='OpenGL 3.1')
glGetUniformIndices = _link_function('glGetUniformIndices', None, [GLuint, GLsizei, POINTER(POINTER(GLchar)), POINTER(GLuint)], requires='OpenGL 3.1')
glGetUniformLocation = _link_function('glGetUniformLocation', GLint, [GLuint, POINTER(GLchar)], requires='OpenGL 2.0')
glGetUniformSubroutineuiv = _link_function('glGetUniformSubroutineuiv', None, [GLenum, GLint, POINTER(GLuint)], requires='OpenGL 4.0')
glGetUniformdv = _link_function('glGetUniformdv', None, [GLuint, GLint, POINTER(GLdouble)], requires='OpenGL 4.0')
glGetUniformfv = _link_function('glGetUniformfv', None, [GLuint, GLint, POINTER(GLfloat)], requires='OpenGL 2.0')
glGetUniformiv = _link_function('glGetUniformiv', None, [GLuint, GLint, POINTER(GLint)], requires='OpenGL 2.0')
glGetUniformuiv = _link_function('glGetUniformuiv', None, [GLuint, GLint, POINTER(GLuint)], requires='OpenGL 3.0')
glGetVertexArrayIndexed64iv = _link_function('glGetVertexArrayIndexed64iv', None, [GLuint, GLuint, GLenum, POINTER(GLint64)], requires='OpenGL 4.5')
glGetVertexArrayIndexediv = _link_function('glGetVertexArrayIndexediv', None, [GLuint, GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5')
glGetVertexArrayiv = _link_function('glGetVertexArrayiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5')
glGetVertexAttribIiv = _link_function('glGetVertexAttribIiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 3.0')
glGetVertexAttribIuiv = _link_function('glGetVertexAttribIuiv', None, [GLuint, GLenum, POINTER(GLuint)], requires='OpenGL 3.0')
glGetVertexAttribLdv = _link_function('glGetVertexAttribLdv', None, [GLuint, GLenum, POINTER(GLdouble)], requires='OpenGL 4.1')
glGetVertexAttribPointerv = _link_function('glGetVertexAttribPointerv', None, [GLuint, GLenum, POINTER(GLvoid)], requires='OpenGL 2.0')
glGetVertexAttribdv = _link_function('glGetVertexAttribdv', None, [GLuint, GLenum, POINTER(GLdouble)], requires='OpenGL 2.0')
glGetVertexAttribfv = _link_function('glGetVertexAttribfv', None, [GLuint, GLenum, POINTER(GLfloat)], requires='OpenGL 2.0')
glGetVertexAttribiv = _link_function('glGetVertexAttribiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 2.0')
glGetnColorTable = _link_function('glGetnColorTable', None, [GLenum, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glGetnCompressedTexImage = _link_function('glGetnCompressedTexImage', None, [GLenum, GLint, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glGetnConvolutionFilter = _link_function('glGetnConvolutionFilter', None, [GLenum, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glGetnHistogram = _link_function('glGetnHistogram', None, [GLenum, GLboolean, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glGetnMapdv = _link_function('glGetnMapdv', None, [GLenum, GLenum, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.5')
glGetnMapfv = _link_function('glGetnMapfv', None, [GLenum, GLenum, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.5')
glGetnMapiv = _link_function('glGetnMapiv', None, [GLenum, GLenum, GLsizei, POINTER(GLint)], requires='OpenGL 4.5')
glGetnMinmax = _link_function('glGetnMinmax', None, [GLenum, GLboolean, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glGetnPixelMapfv = _link_function('glGetnPixelMapfv', None, [GLenum, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.5')
glGetnPixelMapuiv = _link_function('glGetnPixelMapuiv', None, [GLenum, GLsizei, POINTER(GLuint)], requires='OpenGL 4.5')
glGetnPixelMapusv = _link_function('glGetnPixelMapusv', None, [GLenum, GLsizei, POINTER(GLushort)], requires='OpenGL 4.5')
glGetnPolygonStipple = _link_function('glGetnPolygonStipple', None, [GLsizei, POINTER(GLubyte)], requires='OpenGL 4.5')
glGetnSeparableFilter = _link_function('glGetnSeparableFilter', None, [GLenum, GLenum, GLenum, GLsizei, POINTER(GLvoid), GLsizei, POINTER(GLvoid), POINTER(GLvoid)], requires='OpenGL 4.5')
glGetnTexImage = _link_function('glGetnTexImage', None, [GLenum, GLint, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glGetnUniformdv = _link_function('glGetnUniformdv', None, [GLuint, GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.5')
glGetnUniformfv = _link_function('glGetnUniformfv', None, [GLuint, GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.5')
glGetnUniformiv = _link_function('glGetnUniformiv', None, [GLuint, GLint, GLsizei, POINTER(GLint)], requires='OpenGL 4.5')
glGetnUniformuiv = _link_function('glGetnUniformuiv', None, [GLuint, GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.5')
glHint = _link_function('glHint', None, [GLenum, GLenum], requires='OpenGL 1.0')
glIndexMask = _link_function('glIndexMask', None, [GLuint], requires='OpenGL 1.0')
glIndexPointer = _link_function('glIndexPointer', None, [GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1')
glIndexd = _link_function('glIndexd', None, [GLdouble], requires='OpenGL 1.0')
glIndexdv = _link_function('glIndexdv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glIndexf = _link_function('glIndexf', None, [GLfloat], requires='OpenGL 1.0')
glIndexfv = _link_function('glIndexfv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glIndexi = _link_function('glIndexi', None, [GLint], requires='OpenGL 1.0')
glIndexiv = _link_function('glIndexiv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glIndexs = _link_function('glIndexs', None, [GLshort], requires='OpenGL 1.0')
glIndexsv = _link_function('glIndexsv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glIndexub = _link_function('glIndexub', None, [GLubyte], requires='OpenGL 1.1')
glIndexubv = _link_function('glIndexubv', None, [POINTER(GLubyte)], requires='OpenGL 1.1')
glInitNames = _link_function('glInitNames', None, [], requires='OpenGL 1.0')
glInterleavedArrays = _link_function('glInterleavedArrays', None, [GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1')
glInvalidateBufferData = _link_function('glInvalidateBufferData', None, [GLuint], requires='OpenGL 4.3')
glInvalidateBufferSubData = _link_function('glInvalidateBufferSubData', None, [GLuint, GLintptr, GLsizeiptr], requires='OpenGL 4.3')
glInvalidateFramebuffer = _link_function('glInvalidateFramebuffer', None, [GLenum, GLsizei, POINTER(GLenum)], requires='OpenGL 4.3')
glInvalidateNamedFramebufferData = _link_function('glInvalidateNamedFramebufferData', None, [GLuint, GLsizei, POINTER(GLenum)], requires='OpenGL 4.5')
glInvalidateNamedFramebufferSubData = _link_function('glInvalidateNamedFramebufferSubData', None, [GLuint, GLsizei, POINTER(GLenum), GLint, GLint, GLsizei, GLsizei], requires='OpenGL 4.5')
glInvalidateSubFramebuffer = _link_function('glInvalidateSubFramebuffer', None, [GLenum, GLsizei, POINTER(GLenum), GLint, GLint, GLsizei, GLsizei], requires='OpenGL 4.3')
glInvalidateTexImage = _link_function('glInvalidateTexImage', None, [GLuint, GLint], requires='OpenGL 4.3')
glInvalidateTexSubImage = _link_function('glInvalidateTexSubImage', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei], requires='OpenGL 4.3')
glIsBuffer = _link_function('glIsBuffer', GLboolean, [GLuint], requires='OpenGL 1.5')
glIsEnabled = _link_function('glIsEnabled', GLboolean, [GLenum], requires='OpenGL 1.0')
glIsEnabledi = _link_function('glIsEnabledi', GLboolean, [GLenum, GLuint], requires='OpenGL 3.0')
glIsFramebuffer = _link_function('glIsFramebuffer', GLboolean, [GLuint], requires='OpenGL 3.0')
glIsFramebufferEXT = _link_function('glIsFramebufferEXT', GLboolean, [GLuint], requires='None')
glIsList = _link_function('glIsList', GLboolean, [GLuint], requires='OpenGL 1.0')
glIsProgram = _link_function('glIsProgram', GLboolean, [GLuint], requires='OpenGL 2.0')
glIsProgramPipeline = _link_function('glIsProgramPipeline', GLboolean, [GLuint], requires='OpenGL 4.1')
glIsQuery = _link_function('glIsQuery', GLboolean, [GLuint], requires='OpenGL 1.5')
glIsRenderbuffer = _link_function('glIsRenderbuffer', GLboolean, [GLuint], requires='OpenGL 3.0')
glIsRenderbufferEXT = _link_function('glIsRenderbufferEXT', GLboolean, [GLuint], requires='None')
glIsSampler = _link_function('glIsSampler', GLboolean, [GLuint], requires='OpenGL 3.3')
glIsShader = _link_function('glIsShader', GLboolean, [GLuint], requires='OpenGL 2.0')
glIsTexture = _link_function('glIsTexture', GLboolean, [GLuint], requires='OpenGL 1.1')
glIsTransformFeedback = _link_function('glIsTransformFeedback', GLboolean, [GLuint], requires='OpenGL 4.0')
glIsVertexArray = _link_function('glIsVertexArray', GLboolean, [GLuint], requires='OpenGL 3.0')
glLightModelf = _link_function('glLightModelf', None, [GLenum, GLfloat], requires='OpenGL 1.0')
glLightModelfv = _link_function('glLightModelfv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glLightModeli = _link_function('glLightModeli', None, [GLenum, GLint], requires='OpenGL 1.0')
glLightModeliv = _link_function('glLightModeliv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glLightf = _link_function('glLightf', None, [GLenum, GLenum, GLfloat], requires='OpenGL 1.0')
glLightfv = _link_function('glLightfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glLighti = _link_function('glLighti', None, [GLenum, GLenum, GLint], requires='OpenGL 1.0')
glLightiv = _link_function('glLightiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glLineStipple = _link_function('glLineStipple', None, [GLint, GLushort], requires='OpenGL 1.0')
glLineWidth = _link_function('glLineWidth', None, [GLfloat], requires='OpenGL 1.0')
glLinkProgram = _link_function('glLinkProgram', None, [GLuint], requires='OpenGL 2.0')
glListBase = _link_function('glListBase', None, [GLuint], requires='OpenGL 1.0')
glLoadIdentity = _link_function('glLoadIdentity', None, [], requires='OpenGL 1.0')
glLoadMatrixd = _link_function('glLoadMatrixd', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glLoadMatrixf = _link_function('glLoadMatrixf', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glLoadName = _link_function('glLoadName', None, [GLuint], requires='OpenGL 1.0')
glLoadTransposeMatrixd = _link_function('glLoadTransposeMatrixd', None, [POINTER(GLdouble)], requires='OpenGL 1.3')
glLoadTransposeMatrixf = _link_function('glLoadTransposeMatrixf', None, [POINTER(GLfloat)], requires='OpenGL 1.3')
glLogicOp = _link_function('glLogicOp', None, [GLenum], requires='OpenGL 1.0')
glMap1d = _link_function('glMap1d', None, [GLenum, GLdouble, GLdouble, GLint, GLint, POINTER(GLdouble)], requires='OpenGL 1.0')
glMap1f = _link_function('glMap1f', None, [GLenum, GLfloat, GLfloat, GLint, GLint, POINTER(GLfloat)], requires='OpenGL 1.0')
glMap2d = _link_function('glMap2d', None, [GLenum, GLdouble, GLdouble, GLint, GLint, GLdouble, GLdouble, GLint, GLint, POINTER(GLdouble)], requires='OpenGL 1.0')
glMap2f = _link_function('glMap2f', None, [GLenum, GLfloat, GLfloat, GLint, GLint, GLfloat, GLfloat, GLint, GLint, POINTER(GLfloat)], requires='OpenGL 1.0')
glMapBuffer = _link_function('glMapBuffer', POINTER(None), [GLenum, GLenum], requires='OpenGL 1.5')
glMapBufferRange = _link_function('glMapBufferRange', POINTER(None), [GLenum, GLintptr, GLsizeiptr, GLbitfield], requires='OpenGL 3.0')
glMapGrid1d = _link_function('glMapGrid1d', None, [GLint, GLdouble, GLdouble], requires='OpenGL 1.0')
glMapGrid1f = _link_function('glMapGrid1f', None, [GLint, GLfloat, GLfloat], requires='OpenGL 1.0')
glMapGrid2d = _link_function('glMapGrid2d', None, [GLint, GLdouble, GLdouble, GLint, GLdouble, GLdouble], requires='OpenGL 1.0')
glMapGrid2f = _link_function('glMapGrid2f', None, [GLint, GLfloat, GLfloat, GLint, GLfloat, GLfloat], requires='OpenGL 1.0')
glMapNamedBuffer = _link_function('glMapNamedBuffer', POINTER(None), [GLuint, GLenum], requires='OpenGL 4.5')
glMapNamedBufferRange = _link_function('glMapNamedBufferRange', POINTER(None), [GLuint, GLintptr, GLsizeiptr, GLbitfield], requires='OpenGL 4.5')
glMaterialf = _link_function('glMaterialf', None, [GLenum, GLenum, GLfloat], requires='OpenGL 1.0')
glMaterialfv = _link_function('glMaterialfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glMateriali = _link_function('glMateriali', None, [GLenum, GLenum, GLint], requires='OpenGL 1.0')
glMaterialiv = _link_function('glMaterialiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glMatrixMode = _link_function('glMatrixMode', None, [GLenum], requires='OpenGL 1.0')
glMemoryBarrier = _link_function('glMemoryBarrier', None, [GLbitfield], requires='OpenGL 4.2')
glMemoryBarrierByRegion = _link_function('glMemoryBarrierByRegion', None, [GLbitfield], requires='OpenGL 4.5')
glMinSampleShading = _link_function('glMinSampleShading', None, [GLfloat], requires='OpenGL 4.0')
glMultMatrixd = _link_function('glMultMatrixd', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glMultMatrixf = _link_function('glMultMatrixf', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glMultTransposeMatrixd = _link_function('glMultTransposeMatrixd', None, [POINTER(GLdouble)], requires='OpenGL 1.3')
glMultTransposeMatrixf = _link_function('glMultTransposeMatrixf', None, [POINTER(GLfloat)], requires='OpenGL 1.3')
glMultiDrawArrays = _link_function('glMultiDrawArrays', None, [GLenum, POINTER(GLint), POINTER(GLsizei), GLsizei], requires='OpenGL 1.4')
glMultiDrawArraysIndirect = _link_function('glMultiDrawArraysIndirect', None, [GLenum, POINTER(GLvoid), GLsizei, GLsizei], requires='OpenGL 4.3')
glMultiDrawArraysIndirectCount = _link_function('glMultiDrawArraysIndirectCount', None, [GLenum, POINTER(GLvoid), GLintptr, GLsizei, GLsizei], requires='OpenGL 4.6')
glMultiDrawElements = _link_function('glMultiDrawElements', None, [GLenum, POINTER(GLsizei), GLenum, POINTER(GLvoid), GLsizei], requires='OpenGL 1.4')
glMultiDrawElementsBaseVertex = _link_function('glMultiDrawElementsBaseVertex', None, [GLenum, POINTER(GLsizei), GLenum, POINTER(GLvoid), GLsizei, POINTER(GLint)], requires='OpenGL 3.2')
glMultiDrawElementsIndirect = _link_function('glMultiDrawElementsIndirect', None, [GLenum, GLenum, POINTER(GLvoid), GLsizei, GLsizei], requires='OpenGL 4.3')
glMultiDrawElementsIndirectCount = _link_function('glMultiDrawElementsIndirectCount', None, [GLenum, GLenum, POINTER(GLvoid), GLintptr, GLsizei, GLsizei], requires='OpenGL 4.6')
glMultiTexCoord1d = _link_function('glMultiTexCoord1d', None, [GLenum, GLdouble], requires='OpenGL 1.3')
glMultiTexCoord1dv = _link_function('glMultiTexCoord1dv', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.3')
glMultiTexCoord1f = _link_function('glMultiTexCoord1f', None, [GLenum, GLfloat], requires='OpenGL 1.3')
glMultiTexCoord1fv = _link_function('glMultiTexCoord1fv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.3')
glMultiTexCoord1i = _link_function('glMultiTexCoord1i', None, [GLenum, GLint], requires='OpenGL 1.3')
glMultiTexCoord1iv = _link_function('glMultiTexCoord1iv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.3')
glMultiTexCoord1s = _link_function('glMultiTexCoord1s', None, [GLenum, GLshort], requires='OpenGL 1.3')
glMultiTexCoord1sv = _link_function('glMultiTexCoord1sv', None, [GLenum, POINTER(GLshort)], requires='OpenGL 1.3')
glMultiTexCoord2d = _link_function('glMultiTexCoord2d', None, [GLenum, GLdouble, GLdouble], requires='OpenGL 1.3')
glMultiTexCoord2dv = _link_function('glMultiTexCoord2dv', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.3')
glMultiTexCoord2f = _link_function('glMultiTexCoord2f', None, [GLenum, GLfloat, GLfloat], requires='OpenGL 1.3')
glMultiTexCoord2fv = _link_function('glMultiTexCoord2fv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.3')
glMultiTexCoord2i = _link_function('glMultiTexCoord2i', None, [GLenum, GLint, GLint], requires='OpenGL 1.3')
glMultiTexCoord2iv = _link_function('glMultiTexCoord2iv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.3')
glMultiTexCoord2s = _link_function('glMultiTexCoord2s', None, [GLenum, GLshort, GLshort], requires='OpenGL 1.3')
glMultiTexCoord2sv = _link_function('glMultiTexCoord2sv', None, [GLenum, POINTER(GLshort)], requires='OpenGL 1.3')
glMultiTexCoord3d = _link_function('glMultiTexCoord3d', None, [GLenum, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.3')
glMultiTexCoord3dv = _link_function('glMultiTexCoord3dv', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.3')
glMultiTexCoord3f = _link_function('glMultiTexCoord3f', None, [GLenum, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.3')
glMultiTexCoord3fv = _link_function('glMultiTexCoord3fv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.3')
glMultiTexCoord3i = _link_function('glMultiTexCoord3i', None, [GLenum, GLint, GLint, GLint], requires='OpenGL 1.3')
glMultiTexCoord3iv = _link_function('glMultiTexCoord3iv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.3')
glMultiTexCoord3s = _link_function('glMultiTexCoord3s', None, [GLenum, GLshort, GLshort, GLshort], requires='OpenGL 1.3')
glMultiTexCoord3sv = _link_function('glMultiTexCoord3sv', None, [GLenum, POINTER(GLshort)], requires='OpenGL 1.3')
glMultiTexCoord4d = _link_function('glMultiTexCoord4d', None, [GLenum, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.3')
glMultiTexCoord4dv = _link_function('glMultiTexCoord4dv', None, [GLenum, POINTER(GLdouble)], requires='OpenGL 1.3')
glMultiTexCoord4f = _link_function('glMultiTexCoord4f', None, [GLenum, GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.3')
glMultiTexCoord4fv = _link_function('glMultiTexCoord4fv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.3')
glMultiTexCoord4i = _link_function('glMultiTexCoord4i', None, [GLenum, GLint, GLint, GLint, GLint], requires='OpenGL 1.3')
glMultiTexCoord4iv = _link_function('glMultiTexCoord4iv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.3')
glMultiTexCoord4s = _link_function('glMultiTexCoord4s', None, [GLenum, GLshort, GLshort, GLshort, GLshort], requires='OpenGL 1.3')
glMultiTexCoord4sv = _link_function('glMultiTexCoord4sv', None, [GLenum, POINTER(GLshort)], requires='OpenGL 1.3')
glMultiTexCoordP1ui = _link_function('glMultiTexCoordP1ui', None, [GLenum, GLenum, GLuint], requires='OpenGL 3.3')
glMultiTexCoordP1uiv = _link_function('glMultiTexCoordP1uiv', None, [GLenum, GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glMultiTexCoordP2ui = _link_function('glMultiTexCoordP2ui', None, [GLenum, GLenum, GLuint], requires='OpenGL 3.3')
glMultiTexCoordP2uiv = _link_function('glMultiTexCoordP2uiv', None, [GLenum, GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glMultiTexCoordP3ui = _link_function('glMultiTexCoordP3ui', None, [GLenum, GLenum, GLuint], requires='OpenGL 3.3')
glMultiTexCoordP3uiv = _link_function('glMultiTexCoordP3uiv', None, [GLenum, GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glMultiTexCoordP4ui = _link_function('glMultiTexCoordP4ui', None, [GLenum, GLenum, GLuint], requires='OpenGL 3.3')
glMultiTexCoordP4uiv = _link_function('glMultiTexCoordP4uiv', None, [GLenum, GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glNamedBufferData = _link_function('glNamedBufferData', None, [GLuint, GLsizeiptr, POINTER(GLvoid), GLenum], requires='OpenGL 4.5')
glNamedBufferStorage = _link_function('glNamedBufferStorage', None, [GLuint, GLsizeiptr, POINTER(GLvoid), GLbitfield], requires='OpenGL 4.5')
glNamedBufferSubData = _link_function('glNamedBufferSubData', None, [GLuint, GLintptr, GLsizeiptr, POINTER(GLvoid)], requires='OpenGL 4.5')
glNamedFramebufferDrawBuffer = _link_function('glNamedFramebufferDrawBuffer', None, [GLuint, GLenum], requires='OpenGL 4.5')
glNamedFramebufferDrawBuffers = _link_function('glNamedFramebufferDrawBuffers', None, [GLuint, GLsizei, POINTER(GLenum)], requires='OpenGL 4.5')
glNamedFramebufferParameteri = _link_function('glNamedFramebufferParameteri', None, [GLuint, GLenum, GLint], requires='OpenGL 4.5')
glNamedFramebufferReadBuffer = _link_function('glNamedFramebufferReadBuffer', None, [GLuint, GLenum], requires='OpenGL 4.5')
glNamedFramebufferRenderbuffer = _link_function('glNamedFramebufferRenderbuffer', None, [GLuint, GLenum, GLenum, GLuint], requires='OpenGL 4.5')
glNamedFramebufferTexture = _link_function('glNamedFramebufferTexture', None, [GLuint, GLenum, GLuint, GLint], requires='OpenGL 4.5')
glNamedFramebufferTextureLayer = _link_function('glNamedFramebufferTextureLayer', None, [GLuint, GLenum, GLuint, GLint, GLint], requires='OpenGL 4.5')
glNamedRenderbufferStorage = _link_function('glNamedRenderbufferStorage', None, [GLuint, GLenum, GLsizei, GLsizei], requires='OpenGL 4.5')
glNamedRenderbufferStorageMultisample = _link_function('glNamedRenderbufferStorageMultisample', None, [GLuint, GLsizei, GLenum, GLsizei, GLsizei], requires='OpenGL 4.5')
glNewList = _link_function('glNewList', None, [GLuint, GLenum], requires='OpenGL 1.0')
glNormal3b = _link_function('glNormal3b', None, [GLbyte, GLbyte, GLbyte], requires='OpenGL 1.0')
glNormal3bv = _link_function('glNormal3bv', None, [POINTER(GLbyte)], requires='OpenGL 1.0')
glNormal3d = _link_function('glNormal3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glNormal3dv = _link_function('glNormal3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glNormal3f = _link_function('glNormal3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glNormal3fv = _link_function('glNormal3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glNormal3i = _link_function('glNormal3i', None, [GLint, GLint, GLint], requires='OpenGL 1.0')
glNormal3iv = _link_function('glNormal3iv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glNormal3s = _link_function('glNormal3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.0')
glNormal3sv = _link_function('glNormal3sv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glNormalP3ui = _link_function('glNormalP3ui', None, [GLenum, GLuint], requires='OpenGL 3.3')
glNormalP3uiv = _link_function('glNormalP3uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glNormalPointer = _link_function('glNormalPointer', None, [GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1')
glObjectLabel = _link_function('glObjectLabel', None, [GLenum, GLuint, GLsizei, POINTER(GLchar)], requires='OpenGL 4.3')
glObjectPtrLabel = _link_function('glObjectPtrLabel', None, [POINTER(GLvoid), GLsizei, POINTER(GLchar)], requires='OpenGL 4.3')
glOrtho = _link_function('glOrtho', None, [GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glPassThrough = _link_function('glPassThrough', None, [GLfloat], requires='OpenGL 1.0')
glPatchParameterfv = _link_function('glPatchParameterfv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 4.0')
glPatchParameteri = _link_function('glPatchParameteri', None, [GLenum, GLint], requires='OpenGL 4.0')
glPauseTransformFeedback = _link_function('glPauseTransformFeedback', None, [], requires='OpenGL 4.0')
glPixelMapfv = _link_function('glPixelMapfv', None, [GLenum, GLsizei, POINTER(GLfloat)], requires='OpenGL 1.0')
glPixelMapuiv = _link_function('glPixelMapuiv', None, [GLenum, GLsizei, POINTER(GLuint)], requires='OpenGL 1.0')
glPixelMapusv = _link_function('glPixelMapusv', None, [GLenum, GLsizei, POINTER(GLushort)], requires='OpenGL 1.0')
glPixelStoref = _link_function('glPixelStoref', None, [GLenum, GLfloat], requires='OpenGL 1.0')
glPixelStorei = _link_function('glPixelStorei', None, [GLenum, GLint], requires='OpenGL 1.0')
glPixelTransferf = _link_function('glPixelTransferf', None, [GLenum, GLfloat], requires='OpenGL 1.0')
glPixelTransferi = _link_function('glPixelTransferi', None, [GLenum, GLint], requires='OpenGL 1.0')
glPixelZoom = _link_function('glPixelZoom', None, [GLfloat, GLfloat], requires='OpenGL 1.0')
glPointParameterf = _link_function('glPointParameterf', None, [GLenum, GLfloat], requires='OpenGL 1.4')
glPointParameterfv = _link_function('glPointParameterfv', None, [GLenum, POINTER(GLfloat)], requires='OpenGL 1.4')
glPointParameteri = _link_function('glPointParameteri', None, [GLenum, GLint], requires='OpenGL 1.4')
glPointParameteriv = _link_function('glPointParameteriv', None, [GLenum, POINTER(GLint)], requires='OpenGL 1.4')
glPointSize = _link_function('glPointSize', None, [GLfloat], requires='OpenGL 1.0')
glPolygonMode = _link_function('glPolygonMode', None, [GLenum, GLenum], requires='OpenGL 1.0')
glPolygonOffset = _link_function('glPolygonOffset', None, [GLfloat, GLfloat], requires='OpenGL 1.1')
glPolygonOffsetClamp = _link_function('glPolygonOffsetClamp', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 4.6')
glPolygonStipple = _link_function('glPolygonStipple', None, [POINTER(GLubyte)], requires='OpenGL 1.0')
glPopAttrib = _link_function('glPopAttrib', None, [], requires='OpenGL 1.0')
glPopClientAttrib = _link_function('glPopClientAttrib', None, [], requires='OpenGL 1.1')
glPopDebugGroup = _link_function('glPopDebugGroup', None, [], requires='OpenGL 4.3')
glPopMatrix = _link_function('glPopMatrix', None, [], requires='OpenGL 1.0')
glPopName = _link_function('glPopName', None, [], requires='OpenGL 1.0')
glPrimitiveRestartIndex = _link_function('glPrimitiveRestartIndex', None, [GLuint], requires='OpenGL 3.1')
glPrioritizeTextures = _link_function('glPrioritizeTextures', None, [GLsizei, POINTER(GLuint), POINTER(GLfloat)], requires='OpenGL 1.1')
glProgramBinary = _link_function('glProgramBinary', None, [GLuint, GLenum, POINTER(GLvoid), GLsizei], requires='OpenGL 4.1')
glProgramParameteri = _link_function('glProgramParameteri', None, [GLuint, GLenum, GLint], requires='OpenGL 4.1')
glProgramUniform1d = _link_function('glProgramUniform1d', None, [GLuint, GLint, GLdouble], requires='OpenGL 4.1')
glProgramUniform1dv = _link_function('glProgramUniform1dv', None, [GLuint, GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.1')
glProgramUniform1f = _link_function('glProgramUniform1f', None, [GLuint, GLint, GLfloat], requires='OpenGL 4.1')
glProgramUniform1fv = _link_function('glProgramUniform1fv', None, [GLuint, GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.1')
glProgramUniform1i = _link_function('glProgramUniform1i', None, [GLuint, GLint, GLint], requires='OpenGL 4.1')
glProgramUniform1iv = _link_function('glProgramUniform1iv', None, [GLuint, GLint, GLsizei, POINTER(GLint)], requires='OpenGL 4.1')
glProgramUniform1ui = _link_function('glProgramUniform1ui', None, [GLuint, GLint, GLuint], requires='OpenGL 4.1')
glProgramUniform1uiv = _link_function('glProgramUniform1uiv', None, [GLuint, GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.1')
glProgramUniform2d = _link_function('glProgramUniform2d', None, [GLuint, GLint, GLdouble, GLdouble], requires='OpenGL 4.1')
glProgramUniform2dv = _link_function('glProgramUniform2dv', None, [GLuint, GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.1')
glProgramUniform2f = _link_function('glProgramUniform2f', None, [GLuint, GLint, GLfloat, GLfloat], requires='OpenGL 4.1')
glProgramUniform2fv = _link_function('glProgramUniform2fv', None, [GLuint, GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.1')
glProgramUniform2i = _link_function('glProgramUniform2i', None, [GLuint, GLint, GLint, GLint], requires='OpenGL 4.1')
glProgramUniform2iv = _link_function('glProgramUniform2iv', None, [GLuint, GLint, GLsizei, POINTER(GLint)], requires='OpenGL 4.1')
glProgramUniform2ui = _link_function('glProgramUniform2ui', None, [GLuint, GLint, GLuint, GLuint], requires='OpenGL 4.1')
glProgramUniform2uiv = _link_function('glProgramUniform2uiv', None, [GLuint, GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.1')
glProgramUniform3d = _link_function('glProgramUniform3d', None, [GLuint, GLint, GLdouble, GLdouble, GLdouble], requires='OpenGL 4.1')
glProgramUniform3dv = _link_function('glProgramUniform3dv', None, [GLuint, GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.1')
glProgramUniform3f = _link_function('glProgramUniform3f', None, [GLuint, GLint, GLfloat, GLfloat, GLfloat], requires='OpenGL 4.1')
glProgramUniform3fv = _link_function('glProgramUniform3fv', None, [GLuint, GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.1')
glProgramUniform3i = _link_function('glProgramUniform3i', None, [GLuint, GLint, GLint, GLint, GLint], requires='OpenGL 4.1')
glProgramUniform3iv = _link_function('glProgramUniform3iv', None, [GLuint, GLint, GLsizei, POINTER(GLint)], requires='OpenGL 4.1')
glProgramUniform3ui = _link_function('glProgramUniform3ui', None, [GLuint, GLint, GLuint, GLuint, GLuint], requires='OpenGL 4.1')
glProgramUniform3uiv = _link_function('glProgramUniform3uiv', None, [GLuint, GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.1')
glProgramUniform4d = _link_function('glProgramUniform4d', None, [GLuint, GLint, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 4.1')
glProgramUniform4dv = _link_function('glProgramUniform4dv', None, [GLuint, GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.1')
glProgramUniform4f = _link_function('glProgramUniform4f', None, [GLuint, GLint, GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 4.1')
glProgramUniform4fv = _link_function('glProgramUniform4fv', None, [GLuint, GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.1')
glProgramUniform4i = _link_function('glProgramUniform4i', None, [GLuint, GLint, GLint, GLint, GLint, GLint], requires='OpenGL 4.1')
glProgramUniform4iv = _link_function('glProgramUniform4iv', None, [GLuint, GLint, GLsizei, POINTER(GLint)], requires='OpenGL 4.1')
glProgramUniform4ui = _link_function('glProgramUniform4ui', None, [GLuint, GLint, GLuint, GLuint, GLuint, GLuint], requires='OpenGL 4.1')
glProgramUniform4uiv = _link_function('glProgramUniform4uiv', None, [GLuint, GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 4.1')
glProgramUniformMatrix2dv = _link_function('glProgramUniformMatrix2dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1')
glProgramUniformMatrix2fv = _link_function('glProgramUniformMatrix2fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1')
glProgramUniformMatrix2x3dv = _link_function('glProgramUniformMatrix2x3dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1')
glProgramUniformMatrix2x3fv = _link_function('glProgramUniformMatrix2x3fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1')
glProgramUniformMatrix2x4dv = _link_function('glProgramUniformMatrix2x4dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1')
glProgramUniformMatrix2x4fv = _link_function('glProgramUniformMatrix2x4fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1')
glProgramUniformMatrix3dv = _link_function('glProgramUniformMatrix3dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1')
glProgramUniformMatrix3fv = _link_function('glProgramUniformMatrix3fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1')
glProgramUniformMatrix3x2dv = _link_function('glProgramUniformMatrix3x2dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1')
glProgramUniformMatrix3x2fv = _link_function('glProgramUniformMatrix3x2fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1')
glProgramUniformMatrix3x4dv = _link_function('glProgramUniformMatrix3x4dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1')
glProgramUniformMatrix3x4fv = _link_function('glProgramUniformMatrix3x4fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1')
glProgramUniformMatrix4dv = _link_function('glProgramUniformMatrix4dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1')
glProgramUniformMatrix4fv = _link_function('glProgramUniformMatrix4fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1')
glProgramUniformMatrix4x2dv = _link_function('glProgramUniformMatrix4x2dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1')
glProgramUniformMatrix4x2fv = _link_function('glProgramUniformMatrix4x2fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1')
glProgramUniformMatrix4x3dv = _link_function('glProgramUniformMatrix4x3dv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.1')
glProgramUniformMatrix4x3fv = _link_function('glProgramUniformMatrix4x3fv', None, [GLuint, GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 4.1')
glProvokingVertex = _link_function('glProvokingVertex', None, [GLenum], requires='OpenGL 3.2')
glPushAttrib = _link_function('glPushAttrib', None, [GLbitfield], requires='OpenGL 1.0')
glPushClientAttrib = _link_function('glPushClientAttrib', None, [GLbitfield], requires='OpenGL 1.1')
glPushDebugGroup = _link_function('glPushDebugGroup', None, [GLenum, GLuint, GLsizei, POINTER(GLchar)], requires='OpenGL 4.3')
glPushMatrix = _link_function('glPushMatrix', None, [], requires='OpenGL 1.0')
glPushName = _link_function('glPushName', None, [GLuint], requires='OpenGL 1.0')
glQueryCounter = _link_function('glQueryCounter', None, [GLuint, GLenum], requires='OpenGL 3.3')
glRasterPos2d = _link_function('glRasterPos2d', None, [GLdouble, GLdouble], requires='OpenGL 1.0')
glRasterPos2dv = _link_function('glRasterPos2dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glRasterPos2f = _link_function('glRasterPos2f', None, [GLfloat, GLfloat], requires='OpenGL 1.0')
glRasterPos2fv = _link_function('glRasterPos2fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glRasterPos2i = _link_function('glRasterPos2i', None, [GLint, GLint], requires='OpenGL 1.0')
glRasterPos2iv = _link_function('glRasterPos2iv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glRasterPos2s = _link_function('glRasterPos2s', None, [GLshort, GLshort], requires='OpenGL 1.0')
glRasterPos2sv = _link_function('glRasterPos2sv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glRasterPos3d = _link_function('glRasterPos3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glRasterPos3dv = _link_function('glRasterPos3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glRasterPos3f = _link_function('glRasterPos3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glRasterPos3fv = _link_function('glRasterPos3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glRasterPos3i = _link_function('glRasterPos3i', None, [GLint, GLint, GLint], requires='OpenGL 1.0')
glRasterPos3iv = _link_function('glRasterPos3iv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glRasterPos3s = _link_function('glRasterPos3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.0')
glRasterPos3sv = _link_function('glRasterPos3sv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glRasterPos4d = _link_function('glRasterPos4d', None, [GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glRasterPos4dv = _link_function('glRasterPos4dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glRasterPos4f = _link_function('glRasterPos4f', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glRasterPos4fv = _link_function('glRasterPos4fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glRasterPos4i = _link_function('glRasterPos4i', None, [GLint, GLint, GLint, GLint], requires='OpenGL 1.0')
glRasterPos4iv = _link_function('glRasterPos4iv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glRasterPos4s = _link_function('glRasterPos4s', None, [GLshort, GLshort, GLshort, GLshort], requires='OpenGL 1.0')
glRasterPos4sv = _link_function('glRasterPos4sv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glReadBuffer = _link_function('glReadBuffer', None, [GLenum], requires='OpenGL 1.0')
glReadPixels = _link_function('glReadPixels', None, [GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.0')
glReadnPixels = _link_function('glReadnPixels', None, [GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.5')
glRectd = _link_function('glRectd', None, [GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glRectdv = _link_function('glRectdv', None, [POINTER(GLdouble), POINTER(GLdouble)], requires='OpenGL 1.0')
glRectf = _link_function('glRectf', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glRectfv = _link_function('glRectfv', None, [POINTER(GLfloat), POINTER(GLfloat)], requires='OpenGL 1.0')
glRecti = _link_function('glRecti', None, [GLint, GLint, GLint, GLint], requires='OpenGL 1.0')
glRectiv = _link_function('glRectiv', None, [POINTER(GLint), POINTER(GLint)], requires='OpenGL 1.0')
glRects = _link_function('glRects', None, [GLshort, GLshort, GLshort, GLshort], requires='OpenGL 1.0')
glRectsv = _link_function('glRectsv', None, [POINTER(GLshort), POINTER(GLshort)], requires='OpenGL 1.0')
glReleaseShaderCompiler = _link_function('glReleaseShaderCompiler', None, [], requires='OpenGL 4.1')
glRenderMode = _link_function('glRenderMode', GLint, [GLenum], requires='OpenGL 1.0')
glRenderbufferStorage = _link_function('glRenderbufferStorage', None, [GLenum, GLenum, GLsizei, GLsizei], requires='OpenGL 3.0')
glRenderbufferStorageEXT = _link_function('glRenderbufferStorageEXT', None, [GLenum, GLenum, GLsizei, GLsizei], requires='None')
glRenderbufferStorageMultisample = _link_function('glRenderbufferStorageMultisample', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei], requires='OpenGL 3.0')
glResumeTransformFeedback = _link_function('glResumeTransformFeedback', None, [], requires='OpenGL 4.0')
glRotated = _link_function('glRotated', None, [GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glRotatef = _link_function('glRotatef', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glSampleCoverage = _link_function('glSampleCoverage', None, [GLfloat, GLboolean], requires='OpenGL 1.3')
glSampleCoverageARB = _link_function('glSampleCoverageARB', None, [GLfloat, GLboolean], requires='None')
glSampleMaski = _link_function('glSampleMaski', None, [GLuint, GLbitfield], requires='OpenGL 3.2')
glSamplerParameterIiv = _link_function('glSamplerParameterIiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 3.3')
glSamplerParameterIuiv = _link_function('glSamplerParameterIuiv', None, [GLuint, GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glSamplerParameterf = _link_function('glSamplerParameterf', None, [GLuint, GLenum, GLfloat], requires='OpenGL 3.3')
glSamplerParameterfv = _link_function('glSamplerParameterfv', None, [GLuint, GLenum, POINTER(GLfloat)], requires='OpenGL 3.3')
glSamplerParameteri = _link_function('glSamplerParameteri', None, [GLuint, GLenum, GLint], requires='OpenGL 3.3')
glSamplerParameteriv = _link_function('glSamplerParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 3.3')
glScaled = _link_function('glScaled', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glScalef = _link_function('glScalef', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glScissor = _link_function('glScissor', None, [GLint, GLint, GLsizei, GLsizei], requires='OpenGL 1.0')
glScissorArrayv = _link_function('glScissorArrayv', None, [GLuint, GLsizei, POINTER(GLint)], requires='OpenGL 4.1')
glScissorIndexed = _link_function('glScissorIndexed', None, [GLuint, GLint, GLint, GLsizei, GLsizei], requires='OpenGL 4.1')
glScissorIndexedv = _link_function('glScissorIndexedv', None, [GLuint, POINTER(GLint)], requires='OpenGL 4.1')
glSecondaryColor3b = _link_function('glSecondaryColor3b', None, [GLbyte, GLbyte, GLbyte], requires='OpenGL 1.4')
glSecondaryColor3bv = _link_function('glSecondaryColor3bv', None, [POINTER(GLbyte)], requires='OpenGL 1.4')
glSecondaryColor3d = _link_function('glSecondaryColor3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.4')
glSecondaryColor3dv = _link_function('glSecondaryColor3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.4')
glSecondaryColor3f = _link_function('glSecondaryColor3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.4')
glSecondaryColor3fv = _link_function('glSecondaryColor3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.4')
glSecondaryColor3i = _link_function('glSecondaryColor3i', None, [GLint, GLint, GLint], requires='OpenGL 1.4')
glSecondaryColor3iv = _link_function('glSecondaryColor3iv', None, [POINTER(GLint)], requires='OpenGL 1.4')
glSecondaryColor3s = _link_function('glSecondaryColor3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.4')
glSecondaryColor3sv = _link_function('glSecondaryColor3sv', None, [POINTER(GLshort)], requires='OpenGL 1.4')
glSecondaryColor3ub = _link_function('glSecondaryColor3ub', None, [GLubyte, GLubyte, GLubyte], requires='OpenGL 1.4')
glSecondaryColor3ubv = _link_function('glSecondaryColor3ubv', None, [POINTER(GLubyte)], requires='OpenGL 1.4')
glSecondaryColor3ui = _link_function('glSecondaryColor3ui', None, [GLuint, GLuint, GLuint], requires='OpenGL 1.4')
glSecondaryColor3uiv = _link_function('glSecondaryColor3uiv', None, [POINTER(GLuint)], requires='OpenGL 1.4')
glSecondaryColor3us = _link_function('glSecondaryColor3us', None, [GLushort, GLushort, GLushort], requires='OpenGL 1.4')
glSecondaryColor3usv = _link_function('glSecondaryColor3usv', None, [POINTER(GLushort)], requires='OpenGL 1.4')
glSecondaryColorP3ui = _link_function('glSecondaryColorP3ui', None, [GLenum, GLuint], requires='OpenGL 3.3')
glSecondaryColorP3uiv = _link_function('glSecondaryColorP3uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glSecondaryColorPointer = _link_function('glSecondaryColorPointer', None, [GLint, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.4')
glSelectBuffer = _link_function('glSelectBuffer', None, [GLsizei, POINTER(GLuint)], requires='OpenGL 1.0')
glShadeModel = _link_function('glShadeModel', None, [GLenum], requires='OpenGL 1.0')
glShaderBinary = _link_function('glShaderBinary', None, [GLsizei, POINTER(GLuint), GLenum, POINTER(GLvoid), GLsizei], requires='OpenGL 4.1')
glShaderSource = _link_function('glShaderSource', None, [GLuint, GLsizei, POINTER(POINTER(GLchar)), POINTER(GLint)], requires='OpenGL 2.0')
glShaderStorageBlockBinding = _link_function('glShaderStorageBlockBinding', None, [GLuint, GLuint, GLuint], requires='OpenGL 4.3')
glSpecializeShader = _link_function('glSpecializeShader', None, [GLuint, POINTER(GLchar), GLuint, POINTER(GLuint), POINTER(GLuint)], requires='OpenGL 4.6')
glStencilFunc = _link_function('glStencilFunc', None, [GLenum, GLint, GLuint], requires='OpenGL 1.0')
glStencilFuncSeparate = _link_function('glStencilFuncSeparate', None, [GLenum, GLenum, GLint, GLuint], requires='OpenGL 2.0')
glStencilMask = _link_function('glStencilMask', None, [GLuint], requires='OpenGL 1.0')
glStencilMaskSeparate = _link_function('glStencilMaskSeparate', None, [GLenum, GLuint], requires='OpenGL 2.0')
glStencilOp = _link_function('glStencilOp', None, [GLenum, GLenum, GLenum], requires='OpenGL 1.0')
glStencilOpSeparate = _link_function('glStencilOpSeparate', None, [GLenum, GLenum, GLenum, GLenum], requires='OpenGL 2.0')
glTexBuffer = _link_function('glTexBuffer', None, [GLenum, GLenum, GLuint], requires='OpenGL 3.1')
glTexBufferRange = _link_function('glTexBufferRange', None, [GLenum, GLenum, GLuint, GLintptr, GLsizeiptr], requires='OpenGL 4.3')
glTexCoord1d = _link_function('glTexCoord1d', None, [GLdouble], requires='OpenGL 1.0')
glTexCoord1dv = _link_function('glTexCoord1dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glTexCoord1f = _link_function('glTexCoord1f', None, [GLfloat], requires='OpenGL 1.0')
glTexCoord1fv = _link_function('glTexCoord1fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glTexCoord1i = _link_function('glTexCoord1i', None, [GLint], requires='OpenGL 1.0')
glTexCoord1iv = _link_function('glTexCoord1iv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glTexCoord1s = _link_function('glTexCoord1s', None, [GLshort], requires='OpenGL 1.0')
glTexCoord1sv = _link_function('glTexCoord1sv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glTexCoord2d = _link_function('glTexCoord2d', None, [GLdouble, GLdouble], requires='OpenGL 1.0')
glTexCoord2dv = _link_function('glTexCoord2dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glTexCoord2f = _link_function('glTexCoord2f', None, [GLfloat, GLfloat], requires='OpenGL 1.0')
glTexCoord2fv = _link_function('glTexCoord2fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glTexCoord2i = _link_function('glTexCoord2i', None, [GLint, GLint], requires='OpenGL 1.0')
glTexCoord2iv = _link_function('glTexCoord2iv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glTexCoord2s = _link_function('glTexCoord2s', None, [GLshort, GLshort], requires='OpenGL 1.0')
glTexCoord2sv = _link_function('glTexCoord2sv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glTexCoord3d = _link_function('glTexCoord3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glTexCoord3dv = _link_function('glTexCoord3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glTexCoord3f = _link_function('glTexCoord3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glTexCoord3fv = _link_function('glTexCoord3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glTexCoord3i = _link_function('glTexCoord3i', None, [GLint, GLint, GLint], requires='OpenGL 1.0')
glTexCoord3iv = _link_function('glTexCoord3iv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glTexCoord3s = _link_function('glTexCoord3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.0')
glTexCoord3sv = _link_function('glTexCoord3sv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glTexCoord4d = _link_function('glTexCoord4d', None, [GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glTexCoord4dv = _link_function('glTexCoord4dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glTexCoord4f = _link_function('glTexCoord4f', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glTexCoord4fv = _link_function('glTexCoord4fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glTexCoord4i = _link_function('glTexCoord4i', None, [GLint, GLint, GLint, GLint], requires='OpenGL 1.0')
glTexCoord4iv = _link_function('glTexCoord4iv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glTexCoord4s = _link_function('glTexCoord4s', None, [GLshort, GLshort, GLshort, GLshort], requires='OpenGL 1.0')
glTexCoord4sv = _link_function('glTexCoord4sv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glTexCoordP1ui = _link_function('glTexCoordP1ui', None, [GLenum, GLuint], requires='OpenGL 3.3')
glTexCoordP1uiv = _link_function('glTexCoordP1uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glTexCoordP2ui = _link_function('glTexCoordP2ui', None, [GLenum, GLuint], requires='OpenGL 3.3')
glTexCoordP2uiv = _link_function('glTexCoordP2uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glTexCoordP3ui = _link_function('glTexCoordP3ui', None, [GLenum, GLuint], requires='OpenGL 3.3')
glTexCoordP3uiv = _link_function('glTexCoordP3uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glTexCoordP4ui = _link_function('glTexCoordP4ui', None, [GLenum, GLuint], requires='OpenGL 3.3')
glTexCoordP4uiv = _link_function('glTexCoordP4uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glTexCoordPointer = _link_function('glTexCoordPointer', None, [GLint, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1')
glTexEnvf = _link_function('glTexEnvf', None, [GLenum, GLenum, GLfloat], requires='OpenGL 1.0')
glTexEnvfv = _link_function('glTexEnvfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glTexEnvi = _link_function('glTexEnvi', None, [GLenum, GLenum, GLint], requires='OpenGL 1.0')
glTexEnviv = _link_function('glTexEnviv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glTexGend = _link_function('glTexGend', None, [GLenum, GLenum, GLdouble], requires='OpenGL 1.0')
glTexGendv = _link_function('glTexGendv', None, [GLenum, GLenum, POINTER(GLdouble)], requires='OpenGL 1.0')
glTexGenf = _link_function('glTexGenf', None, [GLenum, GLenum, GLfloat], requires='OpenGL 1.0')
glTexGenfv = _link_function('glTexGenfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glTexGeni = _link_function('glTexGeni', None, [GLenum, GLenum, GLint], requires='OpenGL 1.0')
glTexGeniv = _link_function('glTexGeniv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glTexImage1D = _link_function('glTexImage1D', None, [GLenum, GLint, GLint, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.0')
glTexImage2D = _link_function('glTexImage2D', None, [GLenum, GLint, GLint, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.0')
glTexImage2DMultisample = _link_function('glTexImage2DMultisample', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei, GLboolean], requires='OpenGL 3.2')
glTexImage3D = _link_function('glTexImage3D', None, [GLenum, GLint, GLint, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.2')
glTexImage3DMultisample = _link_function('glTexImage3DMultisample', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei, GLsizei, GLboolean], requires='OpenGL 3.2')
glTexParameterIiv = _link_function('glTexParameterIiv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 3.0')
glTexParameterIuiv = _link_function('glTexParameterIuiv', None, [GLenum, GLenum, POINTER(GLuint)], requires='OpenGL 3.0')
glTexParameterf = _link_function('glTexParameterf', None, [GLenum, GLenum, GLfloat], requires='OpenGL 1.0')
glTexParameterfv = _link_function('glTexParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], requires='OpenGL 1.0')
glTexParameteri = _link_function('glTexParameteri', None, [GLenum, GLenum, GLint], requires='OpenGL 1.0')
glTexParameteriv = _link_function('glTexParameteriv', None, [GLenum, GLenum, POINTER(GLint)], requires='OpenGL 1.0')
glTexStorage1D = _link_function('glTexStorage1D', None, [GLenum, GLsizei, GLenum, GLsizei], requires='OpenGL 4.2')
glTexStorage2D = _link_function('glTexStorage2D', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei], requires='OpenGL 4.2')
glTexStorage2DMultisample = _link_function('glTexStorage2DMultisample', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei, GLboolean], requires='OpenGL 4.3')
glTexStorage3D = _link_function('glTexStorage3D', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei, GLsizei], requires='OpenGL 4.2')
glTexStorage3DMultisample = _link_function('glTexStorage3DMultisample', None, [GLenum, GLsizei, GLenum, GLsizei, GLsizei, GLsizei, GLboolean], requires='OpenGL 4.3')
glTexSubImage1D = _link_function('glTexSubImage1D', None, [GLenum, GLint, GLint, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.1')
glTexSubImage2D = _link_function('glTexSubImage2D', None, [GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.1')
glTexSubImage3D = _link_function('glTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 1.2')
glTextureBarrier = _link_function('glTextureBarrier', None, [], requires='OpenGL 4.5')
glTextureBuffer = _link_function('glTextureBuffer', None, [GLuint, GLenum, GLuint], requires='OpenGL 4.5')
glTextureBufferRange = _link_function('glTextureBufferRange', None, [GLuint, GLenum, GLuint, GLintptr, GLsizeiptr], requires='OpenGL 4.5')
glTextureParameterIiv = _link_function('glTextureParameterIiv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5')
glTextureParameterIuiv = _link_function('glTextureParameterIuiv', None, [GLuint, GLenum, POINTER(GLuint)], requires='OpenGL 4.5')
glTextureParameterf = _link_function('glTextureParameterf', None, [GLuint, GLenum, GLfloat], requires='OpenGL 4.5')
glTextureParameterfv = _link_function('glTextureParameterfv', None, [GLuint, GLenum, POINTER(GLfloat)], requires='OpenGL 4.5')
glTextureParameteri = _link_function('glTextureParameteri', None, [GLuint, GLenum, GLint], requires='OpenGL 4.5')
glTextureParameteriv = _link_function('glTextureParameteriv', None, [GLuint, GLenum, POINTER(GLint)], requires='OpenGL 4.5')
glTextureStorage1D = _link_function('glTextureStorage1D', None, [GLuint, GLsizei, GLenum, GLsizei], requires='OpenGL 4.5')
glTextureStorage2D = _link_function('glTextureStorage2D', None, [GLuint, GLsizei, GLenum, GLsizei, GLsizei], requires='OpenGL 4.5')
glTextureStorage2DMultisample = _link_function('glTextureStorage2DMultisample', None, [GLuint, GLsizei, GLenum, GLsizei, GLsizei, GLboolean], requires='OpenGL 4.5')
glTextureStorage3D = _link_function('glTextureStorage3D', None, [GLuint, GLsizei, GLenum, GLsizei, GLsizei, GLsizei], requires='OpenGL 4.5')
glTextureStorage3DMultisample = _link_function('glTextureStorage3DMultisample', None, [GLuint, GLsizei, GLenum, GLsizei, GLsizei, GLsizei, GLboolean], requires='OpenGL 4.5')
glTextureSubImage1D = _link_function('glTextureSubImage1D', None, [GLuint, GLint, GLint, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.5')
glTextureSubImage2D = _link_function('glTextureSubImage2D', None, [GLuint, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.5')
glTextureSubImage3D = _link_function('glTextureSubImage3D', None, [GLuint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], requires='OpenGL 4.5')
glTextureView = _link_function('glTextureView', None, [GLuint, GLenum, GLuint, GLenum, GLuint, GLuint, GLuint, GLuint], requires='OpenGL 4.3')
glTransformFeedbackBufferBase = _link_function('glTransformFeedbackBufferBase', None, [GLuint, GLuint, GLuint], requires='OpenGL 4.5')
glTransformFeedbackBufferRange = _link_function('glTransformFeedbackBufferRange', None, [GLuint, GLuint, GLuint, GLintptr, GLsizeiptr], requires='OpenGL 4.5')
glTransformFeedbackVaryings = _link_function('glTransformFeedbackVaryings', None, [GLuint, GLsizei, POINTER(POINTER(GLchar)), GLenum], requires='OpenGL 3.0')
glTranslated = _link_function('glTranslated', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glTranslatef = _link_function('glTranslatef', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glUniform1d = _link_function('glUniform1d', None, [GLint, GLdouble], requires='OpenGL 4.0')
glUniform1dv = _link_function('glUniform1dv', None, [GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.0')
glUniform1f = _link_function('glUniform1f', None, [GLint, GLfloat], requires='OpenGL 2.0')
glUniform1fv = _link_function('glUniform1fv', None, [GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 2.0')
glUniform1i = _link_function('glUniform1i', None, [GLint, GLint], requires='OpenGL 2.0')
glUniform1iv = _link_function('glUniform1iv', None, [GLint, GLsizei, POINTER(GLint)], requires='OpenGL 2.0')
glUniform1ui = _link_function('glUniform1ui', None, [GLint, GLuint], requires='OpenGL 3.0')
glUniform1uiv = _link_function('glUniform1uiv', None, [GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 3.0')
glUniform2d = _link_function('glUniform2d', None, [GLint, GLdouble, GLdouble], requires='OpenGL 4.0')
glUniform2dv = _link_function('glUniform2dv', None, [GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.0')
glUniform2f = _link_function('glUniform2f', None, [GLint, GLfloat, GLfloat], requires='OpenGL 2.0')
glUniform2fv = _link_function('glUniform2fv', None, [GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 2.0')
glUniform2i = _link_function('glUniform2i', None, [GLint, GLint, GLint], requires='OpenGL 2.0')
glUniform2iv = _link_function('glUniform2iv', None, [GLint, GLsizei, POINTER(GLint)], requires='OpenGL 2.0')
glUniform2ui = _link_function('glUniform2ui', None, [GLint, GLuint, GLuint], requires='OpenGL 3.0')
glUniform2uiv = _link_function('glUniform2uiv', None, [GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 3.0')
glUniform3d = _link_function('glUniform3d', None, [GLint, GLdouble, GLdouble, GLdouble], requires='OpenGL 4.0')
glUniform3dv = _link_function('glUniform3dv', None, [GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.0')
glUniform3f = _link_function('glUniform3f', None, [GLint, GLfloat, GLfloat, GLfloat], requires='OpenGL 2.0')
glUniform3fv = _link_function('glUniform3fv', None, [GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 2.0')
glUniform3i = _link_function('glUniform3i', None, [GLint, GLint, GLint, GLint], requires='OpenGL 2.0')
glUniform3iv = _link_function('glUniform3iv', None, [GLint, GLsizei, POINTER(GLint)], requires='OpenGL 2.0')
glUniform3ui = _link_function('glUniform3ui', None, [GLint, GLuint, GLuint, GLuint], requires='OpenGL 3.0')
glUniform3uiv = _link_function('glUniform3uiv', None, [GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 3.0')
glUniform4d = _link_function('glUniform4d', None, [GLint, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 4.0')
glUniform4dv = _link_function('glUniform4dv', None, [GLint, GLsizei, POINTER(GLdouble)], requires='OpenGL 4.0')
glUniform4f = _link_function('glUniform4f', None, [GLint, GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 2.0')
glUniform4fv = _link_function('glUniform4fv', None, [GLint, GLsizei, POINTER(GLfloat)], requires='OpenGL 2.0')
glUniform4i = _link_function('glUniform4i', None, [GLint, GLint, GLint, GLint, GLint], requires='OpenGL 2.0')
glUniform4iv = _link_function('glUniform4iv', None, [GLint, GLsizei, POINTER(GLint)], requires='OpenGL 2.0')
glUniform4ui = _link_function('glUniform4ui', None, [GLint, GLuint, GLuint, GLuint, GLuint], requires='OpenGL 3.0')
glUniform4uiv = _link_function('glUniform4uiv', None, [GLint, GLsizei, POINTER(GLuint)], requires='OpenGL 3.0')
glUniformBlockBinding = _link_function('glUniformBlockBinding', None, [GLuint, GLuint, GLuint], requires='OpenGL 3.1')
glUniformMatrix2dv = _link_function('glUniformMatrix2dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0')
glUniformMatrix2fv = _link_function('glUniformMatrix2fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.0')
glUniformMatrix2x3dv = _link_function('glUniformMatrix2x3dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0')
glUniformMatrix2x3fv = _link_function('glUniformMatrix2x3fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.1')
glUniformMatrix2x4dv = _link_function('glUniformMatrix2x4dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0')
glUniformMatrix2x4fv = _link_function('glUniformMatrix2x4fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.1')
glUniformMatrix3dv = _link_function('glUniformMatrix3dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0')
glUniformMatrix3fv = _link_function('glUniformMatrix3fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.0')
glUniformMatrix3x2dv = _link_function('glUniformMatrix3x2dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0')
glUniformMatrix3x2fv = _link_function('glUniformMatrix3x2fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.1')
glUniformMatrix3x4dv = _link_function('glUniformMatrix3x4dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0')
glUniformMatrix3x4fv = _link_function('glUniformMatrix3x4fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.1')
glUniformMatrix4dv = _link_function('glUniformMatrix4dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0')
glUniformMatrix4fv = _link_function('glUniformMatrix4fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.0')
glUniformMatrix4x2dv = _link_function('glUniformMatrix4x2dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0')
glUniformMatrix4x2fv = _link_function('glUniformMatrix4x2fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.1')
glUniformMatrix4x3dv = _link_function('glUniformMatrix4x3dv', None, [GLint, GLsizei, GLboolean, POINTER(GLdouble)], requires='OpenGL 4.0')
glUniformMatrix4x3fv = _link_function('glUniformMatrix4x3fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], requires='OpenGL 2.1')
glUniformSubroutinesuiv = _link_function('glUniformSubroutinesuiv', None, [GLenum, GLsizei, POINTER(GLuint)], requires='OpenGL 4.0')
glUnmapBuffer = _link_function('glUnmapBuffer', GLboolean, [GLenum], requires='OpenGL 1.5')
glUnmapNamedBuffer = _link_function('glUnmapNamedBuffer', GLboolean, [GLuint], requires='OpenGL 4.5')
glUseProgram = _link_function('glUseProgram', None, [GLuint], requires='OpenGL 2.0')
glUseProgramStages = _link_function('glUseProgramStages', None, [GLuint, GLbitfield, GLuint], requires='OpenGL 4.1')
glValidateProgram = _link_function('glValidateProgram', None, [GLuint], requires='OpenGL 2.0')
glValidateProgramPipeline = _link_function('glValidateProgramPipeline', None, [GLuint], requires='OpenGL 4.1')
glVertex2d = _link_function('glVertex2d', None, [GLdouble, GLdouble], requires='OpenGL 1.0')
glVertex2dv = _link_function('glVertex2dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glVertex2f = _link_function('glVertex2f', None, [GLfloat, GLfloat], requires='OpenGL 1.0')
glVertex2fv = _link_function('glVertex2fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glVertex2i = _link_function('glVertex2i', None, [GLint, GLint], requires='OpenGL 1.0')
glVertex2iv = _link_function('glVertex2iv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glVertex2s = _link_function('glVertex2s', None, [GLshort, GLshort], requires='OpenGL 1.0')
glVertex2sv = _link_function('glVertex2sv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glVertex3d = _link_function('glVertex3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glVertex3dv = _link_function('glVertex3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glVertex3f = _link_function('glVertex3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glVertex3fv = _link_function('glVertex3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glVertex3i = _link_function('glVertex3i', None, [GLint, GLint, GLint], requires='OpenGL 1.0')
glVertex3iv = _link_function('glVertex3iv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glVertex3s = _link_function('glVertex3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.0')
glVertex3sv = _link_function('glVertex3sv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glVertex4d = _link_function('glVertex4d', None, [GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 1.0')
glVertex4dv = _link_function('glVertex4dv', None, [POINTER(GLdouble)], requires='OpenGL 1.0')
glVertex4f = _link_function('glVertex4f', None, [GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 1.0')
glVertex4fv = _link_function('glVertex4fv', None, [POINTER(GLfloat)], requires='OpenGL 1.0')
glVertex4i = _link_function('glVertex4i', None, [GLint, GLint, GLint, GLint], requires='OpenGL 1.0')
glVertex4iv = _link_function('glVertex4iv', None, [POINTER(GLint)], requires='OpenGL 1.0')
glVertex4s = _link_function('glVertex4s', None, [GLshort, GLshort, GLshort, GLshort], requires='OpenGL 1.0')
glVertex4sv = _link_function('glVertex4sv', None, [POINTER(GLshort)], requires='OpenGL 1.0')
glVertexArrayAttribBinding = _link_function('glVertexArrayAttribBinding', None, [GLuint, GLuint, GLuint], requires='OpenGL 4.5')
glVertexArrayAttribFormat = _link_function('glVertexArrayAttribFormat', None, [GLuint, GLuint, GLint, GLenum, GLboolean, GLuint], requires='OpenGL 4.5')
glVertexArrayAttribIFormat = _link_function('glVertexArrayAttribIFormat', None, [GLuint, GLuint, GLint, GLenum, GLuint], requires='OpenGL 4.5')
glVertexArrayAttribLFormat = _link_function('glVertexArrayAttribLFormat', None, [GLuint, GLuint, GLint, GLenum, GLuint], requires='OpenGL 4.5')
glVertexArrayBindingDivisor = _link_function('glVertexArrayBindingDivisor', None, [GLuint, GLuint, GLuint], requires='OpenGL 4.5')
glVertexArrayElementBuffer = _link_function('glVertexArrayElementBuffer', None, [GLuint, GLuint], requires='OpenGL 4.5')
glVertexArrayVertexBuffer = _link_function('glVertexArrayVertexBuffer', None, [GLuint, GLuint, GLuint, GLintptr, GLsizei], requires='OpenGL 4.5')
glVertexArrayVertexBuffers = _link_function('glVertexArrayVertexBuffers', None, [GLuint, GLuint, GLsizei, POINTER(GLuint), POINTER(GLintptr), POINTER(GLsizei)], requires='OpenGL 4.5')
glVertexAttrib1d = _link_function('glVertexAttrib1d', None, [GLuint, GLdouble], requires='OpenGL 2.0')
glVertexAttrib1dv = _link_function('glVertexAttrib1dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 2.0')
glVertexAttrib1f = _link_function('glVertexAttrib1f', None, [GLuint, GLfloat], requires='OpenGL 2.0')
glVertexAttrib1fv = _link_function('glVertexAttrib1fv', None, [GLuint, POINTER(GLfloat)], requires='OpenGL 2.0')
glVertexAttrib1s = _link_function('glVertexAttrib1s', None, [GLuint, GLshort], requires='OpenGL 2.0')
glVertexAttrib1sv = _link_function('glVertexAttrib1sv', None, [GLuint, POINTER(GLshort)], requires='OpenGL 2.0')
glVertexAttrib2d = _link_function('glVertexAttrib2d', None, [GLuint, GLdouble, GLdouble], requires='OpenGL 2.0')
glVertexAttrib2dv = _link_function('glVertexAttrib2dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 2.0')
glVertexAttrib2f = _link_function('glVertexAttrib2f', None, [GLuint, GLfloat, GLfloat], requires='OpenGL 2.0')
glVertexAttrib2fv = _link_function('glVertexAttrib2fv', None, [GLuint, POINTER(GLfloat)], requires='OpenGL 2.0')
glVertexAttrib2s = _link_function('glVertexAttrib2s', None, [GLuint, GLshort, GLshort], requires='OpenGL 2.0')
glVertexAttrib2sv = _link_function('glVertexAttrib2sv', None, [GLuint, POINTER(GLshort)], requires='OpenGL 2.0')
glVertexAttrib3d = _link_function('glVertexAttrib3d', None, [GLuint, GLdouble, GLdouble, GLdouble], requires='OpenGL 2.0')
glVertexAttrib3dv = _link_function('glVertexAttrib3dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 2.0')
glVertexAttrib3f = _link_function('glVertexAttrib3f', None, [GLuint, GLfloat, GLfloat, GLfloat], requires='OpenGL 2.0')
glVertexAttrib3fv = _link_function('glVertexAttrib3fv', None, [GLuint, POINTER(GLfloat)], requires='OpenGL 2.0')
glVertexAttrib3s = _link_function('glVertexAttrib3s', None, [GLuint, GLshort, GLshort, GLshort], requires='OpenGL 2.0')
glVertexAttrib3sv = _link_function('glVertexAttrib3sv', None, [GLuint, POINTER(GLshort)], requires='OpenGL 2.0')
glVertexAttrib4Nbv = _link_function('glVertexAttrib4Nbv', None, [GLuint, POINTER(GLbyte)], requires='OpenGL 2.0')
glVertexAttrib4Niv = _link_function('glVertexAttrib4Niv', None, [GLuint, POINTER(GLint)], requires='OpenGL 2.0')
glVertexAttrib4Nsv = _link_function('glVertexAttrib4Nsv', None, [GLuint, POINTER(GLshort)], requires='OpenGL 2.0')
glVertexAttrib4Nub = _link_function('glVertexAttrib4Nub', None, [GLuint, GLubyte, GLubyte, GLubyte, GLubyte], requires='OpenGL 2.0')
glVertexAttrib4Nubv = _link_function('glVertexAttrib4Nubv', None, [GLuint, POINTER(GLubyte)], requires='OpenGL 2.0')
glVertexAttrib4Nuiv = _link_function('glVertexAttrib4Nuiv', None, [GLuint, POINTER(GLuint)], requires='OpenGL 2.0')
glVertexAttrib4Nusv = _link_function('glVertexAttrib4Nusv', None, [GLuint, POINTER(GLushort)], requires='OpenGL 2.0')
glVertexAttrib4bv = _link_function('glVertexAttrib4bv', None, [GLuint, POINTER(GLbyte)], requires='OpenGL 2.0')
glVertexAttrib4d = _link_function('glVertexAttrib4d', None, [GLuint, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 2.0')
glVertexAttrib4dv = _link_function('glVertexAttrib4dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 2.0')
glVertexAttrib4f = _link_function('glVertexAttrib4f', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 2.0')
glVertexAttrib4fv = _link_function('glVertexAttrib4fv', None, [GLuint, POINTER(GLfloat)], requires='OpenGL 2.0')
glVertexAttrib4iv = _link_function('glVertexAttrib4iv', None, [GLuint, POINTER(GLint)], requires='OpenGL 2.0')
glVertexAttrib4s = _link_function('glVertexAttrib4s', None, [GLuint, GLshort, GLshort, GLshort, GLshort], requires='OpenGL 2.0')
glVertexAttrib4sv = _link_function('glVertexAttrib4sv', None, [GLuint, POINTER(GLshort)], requires='OpenGL 2.0')
glVertexAttrib4ubv = _link_function('glVertexAttrib4ubv', None, [GLuint, POINTER(GLubyte)], requires='OpenGL 2.0')
glVertexAttrib4uiv = _link_function('glVertexAttrib4uiv', None, [GLuint, POINTER(GLuint)], requires='OpenGL 2.0')
glVertexAttrib4usv = _link_function('glVertexAttrib4usv', None, [GLuint, POINTER(GLushort)], requires='OpenGL 2.0')
glVertexAttribBinding = _link_function('glVertexAttribBinding', None, [GLuint, GLuint], requires='OpenGL 4.3')
glVertexAttribDivisor = _link_function('glVertexAttribDivisor', None, [GLuint, GLuint], requires='OpenGL 3.3')
glVertexAttribFormat = _link_function('glVertexAttribFormat', None, [GLuint, GLint, GLenum, GLboolean, GLuint], requires='OpenGL 4.3')
glVertexAttribI1i = _link_function('glVertexAttribI1i', None, [GLuint, GLint], requires='OpenGL 3.0')
glVertexAttribI1iv = _link_function('glVertexAttribI1iv', None, [GLuint, POINTER(GLint)], requires='OpenGL 3.0')
glVertexAttribI1ui = _link_function('glVertexAttribI1ui', None, [GLuint, GLuint], requires='OpenGL 3.0')
glVertexAttribI1uiv = _link_function('glVertexAttribI1uiv', None, [GLuint, POINTER(GLuint)], requires='OpenGL 3.0')
glVertexAttribI2i = _link_function('glVertexAttribI2i', None, [GLuint, GLint, GLint], requires='OpenGL 3.0')
glVertexAttribI2iv = _link_function('glVertexAttribI2iv', None, [GLuint, POINTER(GLint)], requires='OpenGL 3.0')
glVertexAttribI2ui = _link_function('glVertexAttribI2ui', None, [GLuint, GLuint, GLuint], requires='OpenGL 3.0')
glVertexAttribI2uiv = _link_function('glVertexAttribI2uiv', None, [GLuint, POINTER(GLuint)], requires='OpenGL 3.0')
glVertexAttribI3i = _link_function('glVertexAttribI3i', None, [GLuint, GLint, GLint, GLint], requires='OpenGL 3.0')
glVertexAttribI3iv = _link_function('glVertexAttribI3iv', None, [GLuint, POINTER(GLint)], requires='OpenGL 3.0')
glVertexAttribI3ui = _link_function('glVertexAttribI3ui', None, [GLuint, GLuint, GLuint, GLuint], requires='OpenGL 3.0')
glVertexAttribI3uiv = _link_function('glVertexAttribI3uiv', None, [GLuint, POINTER(GLuint)], requires='OpenGL 3.0')
glVertexAttribI4bv = _link_function('glVertexAttribI4bv', None, [GLuint, POINTER(GLbyte)], requires='OpenGL 3.0')
glVertexAttribI4i = _link_function('glVertexAttribI4i', None, [GLuint, GLint, GLint, GLint, GLint], requires='OpenGL 3.0')
glVertexAttribI4iv = _link_function('glVertexAttribI4iv', None, [GLuint, POINTER(GLint)], requires='OpenGL 3.0')
glVertexAttribI4sv = _link_function('glVertexAttribI4sv', None, [GLuint, POINTER(GLshort)], requires='OpenGL 3.0')
glVertexAttribI4ubv = _link_function('glVertexAttribI4ubv', None, [GLuint, POINTER(GLubyte)], requires='OpenGL 3.0')
glVertexAttribI4ui = _link_function('glVertexAttribI4ui', None, [GLuint, GLuint, GLuint, GLuint, GLuint], requires='OpenGL 3.0')
glVertexAttribI4uiv = _link_function('glVertexAttribI4uiv', None, [GLuint, POINTER(GLuint)], requires='OpenGL 3.0')
glVertexAttribI4usv = _link_function('glVertexAttribI4usv', None, [GLuint, POINTER(GLushort)], requires='OpenGL 3.0')
glVertexAttribIFormat = _link_function('glVertexAttribIFormat', None, [GLuint, GLint, GLenum, GLuint], requires='OpenGL 4.3')
glVertexAttribIPointer = _link_function('glVertexAttribIPointer', None, [GLuint, GLint, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 3.0')
glVertexAttribL1d = _link_function('glVertexAttribL1d', None, [GLuint, GLdouble], requires='OpenGL 4.1')
glVertexAttribL1dv = _link_function('glVertexAttribL1dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 4.1')
glVertexAttribL2d = _link_function('glVertexAttribL2d', None, [GLuint, GLdouble, GLdouble], requires='OpenGL 4.1')
glVertexAttribL2dv = _link_function('glVertexAttribL2dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 4.1')
glVertexAttribL3d = _link_function('glVertexAttribL3d', None, [GLuint, GLdouble, GLdouble, GLdouble], requires='OpenGL 4.1')
glVertexAttribL3dv = _link_function('glVertexAttribL3dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 4.1')
glVertexAttribL4d = _link_function('glVertexAttribL4d', None, [GLuint, GLdouble, GLdouble, GLdouble, GLdouble], requires='OpenGL 4.1')
glVertexAttribL4dv = _link_function('glVertexAttribL4dv', None, [GLuint, POINTER(GLdouble)], requires='OpenGL 4.1')
glVertexAttribLFormat = _link_function('glVertexAttribLFormat', None, [GLuint, GLint, GLenum, GLuint], requires='OpenGL 4.3')
glVertexAttribLPointer = _link_function('glVertexAttribLPointer', None, [GLuint, GLint, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 4.1')
glVertexAttribP1ui = _link_function('glVertexAttribP1ui', None, [GLuint, GLenum, GLboolean, GLuint], requires='OpenGL 3.3')
glVertexAttribP1uiv = _link_function('glVertexAttribP1uiv', None, [GLuint, GLenum, GLboolean, POINTER(GLuint)], requires='OpenGL 3.3')
glVertexAttribP2ui = _link_function('glVertexAttribP2ui', None, [GLuint, GLenum, GLboolean, GLuint], requires='OpenGL 3.3')
glVertexAttribP2uiv = _link_function('glVertexAttribP2uiv', None, [GLuint, GLenum, GLboolean, POINTER(GLuint)], requires='OpenGL 3.3')
glVertexAttribP3ui = _link_function('glVertexAttribP3ui', None, [GLuint, GLenum, GLboolean, GLuint], requires='OpenGL 3.3')
glVertexAttribP3uiv = _link_function('glVertexAttribP3uiv', None, [GLuint, GLenum, GLboolean, POINTER(GLuint)], requires='OpenGL 3.3')
glVertexAttribP4ui = _link_function('glVertexAttribP4ui', None, [GLuint, GLenum, GLboolean, GLuint], requires='OpenGL 3.3')
glVertexAttribP4uiv = _link_function('glVertexAttribP4uiv', None, [GLuint, GLenum, GLboolean, POINTER(GLuint)], requires='OpenGL 3.3')
glVertexAttribPointer = _link_function('glVertexAttribPointer', None, [GLuint, GLint, GLenum, GLboolean, GLsizei, POINTER(GLvoid)], requires='OpenGL 2.0')
glVertexBindingDivisor = _link_function('glVertexBindingDivisor', None, [GLuint, GLuint], requires='OpenGL 4.3')
glVertexP2ui = _link_function('glVertexP2ui', None, [GLenum, GLuint], requires='OpenGL 3.3')
glVertexP2uiv = _link_function('glVertexP2uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glVertexP3ui = _link_function('glVertexP3ui', None, [GLenum, GLuint], requires='OpenGL 3.3')
glVertexP3uiv = _link_function('glVertexP3uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glVertexP4ui = _link_function('glVertexP4ui', None, [GLenum, GLuint], requires='OpenGL 3.3')
glVertexP4uiv = _link_function('glVertexP4uiv', None, [GLenum, POINTER(GLuint)], requires='OpenGL 3.3')
glVertexPointer = _link_function('glVertexPointer', None, [GLint, GLenum, GLsizei, POINTER(GLvoid)], requires='OpenGL 1.1')
glViewport = _link_function('glViewport', None, [GLint, GLint, GLsizei, GLsizei], requires='OpenGL 1.0')
glViewportArrayv = _link_function('glViewportArrayv', None, [GLuint, GLsizei, POINTER(GLfloat)], requires='OpenGL 4.1')
glViewportIndexedf = _link_function('glViewportIndexedf', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat], requires='OpenGL 4.1')
glViewportIndexedfv = _link_function('glViewportIndexedfv', None, [GLuint, POINTER(GLfloat)], requires='OpenGL 4.1')
glWindowPos2d = _link_function('glWindowPos2d', None, [GLdouble, GLdouble], requires='OpenGL 1.4')
glWindowPos2dv = _link_function('glWindowPos2dv', None, [POINTER(GLdouble)], requires='OpenGL 1.4')
glWindowPos2f = _link_function('glWindowPos2f', None, [GLfloat, GLfloat], requires='OpenGL 1.4')
glWindowPos2fv = _link_function('glWindowPos2fv', None, [POINTER(GLfloat)], requires='OpenGL 1.4')
glWindowPos2i = _link_function('glWindowPos2i', None, [GLint, GLint], requires='OpenGL 1.4')
glWindowPos2iv = _link_function('glWindowPos2iv', None, [POINTER(GLint)], requires='OpenGL 1.4')
glWindowPos2s = _link_function('glWindowPos2s', None, [GLshort, GLshort], requires='OpenGL 1.4')
glWindowPos2sv = _link_function('glWindowPos2sv', None, [POINTER(GLshort)], requires='OpenGL 1.4')
glWindowPos3d = _link_function('glWindowPos3d', None, [GLdouble, GLdouble, GLdouble], requires='OpenGL 1.4')
glWindowPos3dv = _link_function('glWindowPos3dv', None, [POINTER(GLdouble)], requires='OpenGL 1.4')
glWindowPos3f = _link_function('glWindowPos3f', None, [GLfloat, GLfloat, GLfloat], requires='OpenGL 1.4')
glWindowPos3fv = _link_function('glWindowPos3fv', None, [POINTER(GLfloat)], requires='OpenGL 1.4')
glWindowPos3i = _link_function('glWindowPos3i', None, [GLint, GLint, GLint], requires='OpenGL 1.4')
glWindowPos3iv = _link_function('glWindowPos3iv', None, [POINTER(GLint)], requires='OpenGL 1.4')
glWindowPos3s = _link_function('glWindowPos3s', None, [GLshort, GLshort, GLshort], requires='OpenGL 1.4')
glWindowPos3sv = _link_function('glWindowPos3sv', None, [POINTER(GLshort)], requires='OpenGL 1.4')
__all__ = [
'GLenum',
'GLboolean',
'GLbitfield',
'GLvoid',
'GLbyte',
'GLubyte',
'GLshort',
'GLushort',
'GLint',
'GLuint',
'GLclampx',
'GLsizei',
'GLfloat',
'GLclampf',
'GLdouble',
'GLclampd',
'GLchar',
'GLintptr',
'GLsizeiptr',
'GLint64',
'GLuint64',
'GL_DEPTH_BUFFER_BIT',
'GL_STENCIL_BUFFER_BIT',
'GL_COLOR_BUFFER_BIT',
'GL_FALSE',
'GL_TRUE',
'GL_POINTS',
'GL_LINES',
'GL_LINE_LOOP',
'GL_LINE_STRIP',
'GL_TRIANGLES',
'GL_TRIANGLE_STRIP',
'GL_TRIANGLE_FAN',
'GL_QUADS',
'GL_NEVER',
'GL_LESS',
'GL_EQUAL',
'GL_LEQUAL',
'GL_GREATER',
'GL_NOTEQUAL',
'GL_GEQUAL',
'GL_ALWAYS',
'GL_ZERO',
'GL_ONE',
'GL_SRC_COLOR',
'GL_ONE_MINUS_SRC_COLOR',
'GL_SRC_ALPHA',
'GL_ONE_MINUS_SRC_ALPHA',
'GL_DST_ALPHA',
'GL_ONE_MINUS_DST_ALPHA',
'GL_DST_COLOR',
'GL_ONE_MINUS_DST_COLOR',
'GL_SRC_ALPHA_SATURATE',
'GL_NONE',
'GL_FRONT_LEFT',
'GL_FRONT_RIGHT',
'GL_BACK_LEFT',
'GL_BACK_RIGHT',
'GL_FRONT',
'GL_BACK',
'GL_LEFT',
'GL_RIGHT',
'GL_FRONT_AND_BACK',
'GL_NO_ERROR',
'GL_INVALID_ENUM',
'GL_INVALID_VALUE',
'GL_INVALID_OPERATION',
'GL_OUT_OF_MEMORY',
'GL_CW',
'GL_CCW',
'GL_POINT_SIZE',
'GL_POINT_SIZE_RANGE',
'GL_POINT_SIZE_GRANULARITY',
'GL_LINE_SMOOTH',
'GL_LINE_WIDTH',
'GL_LINE_WIDTH_RANGE',
'GL_LINE_WIDTH_GRANULARITY',
'GL_POLYGON_MODE',
'GL_POLYGON_SMOOTH',
'GL_CULL_FACE',
'GL_CULL_FACE_MODE',
'GL_FRONT_FACE',
'GL_DEPTH_RANGE',
'GL_DEPTH_TEST',
'GL_DEPTH_WRITEMASK',
'GL_DEPTH_CLEAR_VALUE',
'GL_DEPTH_FUNC',
'GL_STENCIL_TEST',
'GL_STENCIL_CLEAR_VALUE',
'GL_STENCIL_FUNC',
'GL_STENCIL_VALUE_MASK',
'GL_STENCIL_FAIL',
'GL_STENCIL_PASS_DEPTH_FAIL',
'GL_STENCIL_PASS_DEPTH_PASS',
'GL_STENCIL_REF',
'GL_STENCIL_WRITEMASK',
'GL_VIEWPORT',
'GL_DITHER',
'GL_BLEND_DST',
'GL_BLEND_SRC',
'GL_BLEND',
'GL_LOGIC_OP_MODE',
'GL_DRAW_BUFFER',
'GL_READ_BUFFER',
'GL_SCISSOR_BOX',
'GL_SCISSOR_TEST',
'GL_COLOR_CLEAR_VALUE',
'GL_COLOR_WRITEMASK',
'GL_DOUBLEBUFFER',
'GL_STEREO',
'GL_LINE_SMOOTH_HINT',
'GL_POLYGON_SMOOTH_HINT',
'GL_UNPACK_SWAP_BYTES',
'GL_UNPACK_LSB_FIRST',
'GL_UNPACK_ROW_LENGTH',
'GL_UNPACK_SKIP_ROWS',
'GL_UNPACK_SKIP_PIXELS',
'GL_UNPACK_ALIGNMENT',
'GL_PACK_SWAP_BYTES',
'GL_PACK_LSB_FIRST',
'GL_PACK_ROW_LENGTH',
'GL_PACK_SKIP_ROWS',
'GL_PACK_SKIP_PIXELS',
'GL_PACK_ALIGNMENT',
'GL_MAX_TEXTURE_SIZE',
'GL_MAX_VIEWPORT_DIMS',
'GL_SUBPIXEL_BITS',
'GL_TEXTURE_1D',
'GL_TEXTURE_2D',
'GL_TEXTURE_WIDTH',
'GL_TEXTURE_HEIGHT',
'GL_TEXTURE_BORDER_COLOR',
'GL_DONT_CARE',
'GL_FASTEST',
'GL_NICEST',
'GL_BYTE',
'GL_UNSIGNED_BYTE',
'GL_SHORT',
'GL_UNSIGNED_SHORT',
'GL_INT',
'GL_UNSIGNED_INT',
'GL_FLOAT',
'GL_STACK_OVERFLOW',
'GL_STACK_UNDERFLOW',
'GL_CLEAR',
'GL_AND',
'GL_AND_REVERSE',
'GL_COPY',
'GL_AND_INVERTED',
'GL_NOOP',
'GL_XOR',
'GL_OR',
'GL_NOR',
'GL_EQUIV',
'GL_INVERT',
'GL_OR_REVERSE',
'GL_COPY_INVERTED',
'GL_OR_INVERTED',
'GL_NAND',
'GL_SET',
'GL_TEXTURE',
'GL_COLOR',
'GL_DEPTH',
'GL_STENCIL',
'GL_STENCIL_INDEX',
'GL_DEPTH_COMPONENT',
'GL_RED',
'GL_GREEN',
'GL_BLUE',
'GL_ALPHA',
'GL_RGB',
'GL_RGBA',
'GL_POINT',
'GL_LINE',
'GL_FILL',
'GL_KEEP',
'GL_REPLACE',
'GL_INCR',
'GL_DECR',
'GL_VENDOR',
'GL_RENDERER',
'GL_VERSION',
'GL_EXTENSIONS',
'GL_NEAREST',
'GL_LINEAR',
'GL_NEAREST_MIPMAP_NEAREST',
'GL_LINEAR_MIPMAP_NEAREST',
'GL_NEAREST_MIPMAP_LINEAR',
'GL_LINEAR_MIPMAP_LINEAR',
'GL_TEXTURE_MAG_FILTER',
'GL_TEXTURE_MIN_FILTER',
'GL_TEXTURE_WRAP_S',
'GL_TEXTURE_WRAP_T',
'GL_REPEAT',
'GL_CURRENT_BIT',
'GL_POINT_BIT',
'GL_LINE_BIT',
'GL_POLYGON_BIT',
'GL_POLYGON_STIPPLE_BIT',
'GL_PIXEL_MODE_BIT',
'GL_LIGHTING_BIT',
'GL_FOG_BIT',
'GL_ACCUM_BUFFER_BIT',
'GL_VIEWPORT_BIT',
'GL_TRANSFORM_BIT',
'GL_ENABLE_BIT',
'GL_HINT_BIT',
'GL_EVAL_BIT',
'GL_LIST_BIT',
'GL_TEXTURE_BIT',
'GL_SCISSOR_BIT',
'GL_ALL_ATTRIB_BITS',
'GL_QUAD_STRIP',
'GL_POLYGON',
'GL_ACCUM',
'GL_LOAD',
'GL_RETURN',
'GL_MULT',
'GL_ADD',
'GL_AUX0',
'GL_AUX1',
'GL_AUX2',
'GL_AUX3',
'GL_2D',
'GL_3D',
'GL_3D_COLOR',
'GL_3D_COLOR_TEXTURE',
'GL_4D_COLOR_TEXTURE',
'GL_PASS_THROUGH_TOKEN',
'GL_POINT_TOKEN',
'GL_LINE_TOKEN',
'GL_POLYGON_TOKEN',
'GL_BITMAP_TOKEN',
'GL_DRAW_PIXEL_TOKEN',
'GL_COPY_PIXEL_TOKEN',
'GL_LINE_RESET_TOKEN',
'GL_EXP',
'GL_EXP2',
'GL_COEFF',
'GL_ORDER',
'GL_DOMAIN',
'GL_PIXEL_MAP_I_TO_I',
'GL_PIXEL_MAP_S_TO_S',
'GL_PIXEL_MAP_I_TO_R',
'GL_PIXEL_MAP_I_TO_G',
'GL_PIXEL_MAP_I_TO_B',
'GL_PIXEL_MAP_I_TO_A',
'GL_PIXEL_MAP_R_TO_R',
'GL_PIXEL_MAP_G_TO_G',
'GL_PIXEL_MAP_B_TO_B',
'GL_PIXEL_MAP_A_TO_A',
'GL_CURRENT_COLOR',
'GL_CURRENT_INDEX',
'GL_CURRENT_NORMAL',
'GL_CURRENT_TEXTURE_COORDS',
'GL_CURRENT_RASTER_COLOR',
'GL_CURRENT_RASTER_INDEX',
'GL_CURRENT_RASTER_TEXTURE_COORDS',
'GL_CURRENT_RASTER_POSITION',
'GL_CURRENT_RASTER_POSITION_VALID',
'GL_CURRENT_RASTER_DISTANCE',
'GL_POINT_SMOOTH',
'GL_LINE_STIPPLE',
'GL_LINE_STIPPLE_PATTERN',
'GL_LINE_STIPPLE_REPEAT',
'GL_LIST_MODE',
'GL_MAX_LIST_NESTING',
'GL_LIST_BASE',
'GL_LIST_INDEX',
'GL_POLYGON_STIPPLE',
'GL_EDGE_FLAG',
'GL_LIGHTING',
'GL_LIGHT_MODEL_LOCAL_VIEWER',
'GL_LIGHT_MODEL_TWO_SIDE',
'GL_LIGHT_MODEL_AMBIENT',
'GL_SHADE_MODEL',
'GL_COLOR_MATERIAL_FACE',
'GL_COLOR_MATERIAL_PARAMETER',
'GL_COLOR_MATERIAL',
'GL_FOG',
'GL_FOG_INDEX',
'GL_FOG_DENSITY',
'GL_FOG_START',
'GL_FOG_END',
'GL_FOG_MODE',
'GL_FOG_COLOR',
'GL_ACCUM_CLEAR_VALUE',
'GL_MATRIX_MODE',
'GL_NORMALIZE',
'GL_MODELVIEW_STACK_DEPTH',
'GL_PROJECTION_STACK_DEPTH',
'GL_TEXTURE_STACK_DEPTH',
'GL_MODELVIEW_MATRIX',
'GL_PROJECTION_MATRIX',
'GL_TEXTURE_MATRIX',
'GL_ATTRIB_STACK_DEPTH',
'GL_ALPHA_TEST',
'GL_ALPHA_TEST_FUNC',
'GL_ALPHA_TEST_REF',
'GL_LOGIC_OP',
'GL_AUX_BUFFERS',
'GL_INDEX_CLEAR_VALUE',
'GL_INDEX_WRITEMASK',
'GL_INDEX_MODE',
'GL_RGBA_MODE',
'GL_RENDER_MODE',
'GL_PERSPECTIVE_CORRECTION_HINT',
'GL_POINT_SMOOTH_HINT',
'GL_FOG_HINT',
'GL_TEXTURE_GEN_S',
'GL_TEXTURE_GEN_T',
'GL_TEXTURE_GEN_R',
'GL_TEXTURE_GEN_Q',
'GL_PIXEL_MAP_I_TO_I_SIZE',
'GL_PIXEL_MAP_S_TO_S_SIZE',
'GL_PIXEL_MAP_I_TO_R_SIZE',
'GL_PIXEL_MAP_I_TO_G_SIZE',
'GL_PIXEL_MAP_I_TO_B_SIZE',
'GL_PIXEL_MAP_I_TO_A_SIZE',
'GL_PIXEL_MAP_R_TO_R_SIZE',
'GL_PIXEL_MAP_G_TO_G_SIZE',
'GL_PIXEL_MAP_B_TO_B_SIZE',
'GL_PIXEL_MAP_A_TO_A_SIZE',
'GL_MAP_COLOR',
'GL_MAP_STENCIL',
'GL_INDEX_SHIFT',
'GL_INDEX_OFFSET',
'GL_RED_SCALE',
'GL_RED_BIAS',
'GL_ZOOM_X',
'GL_ZOOM_Y',
'GL_GREEN_SCALE',
'GL_GREEN_BIAS',
'GL_BLUE_SCALE',
'GL_BLUE_BIAS',
'GL_ALPHA_SCALE',
'GL_ALPHA_BIAS',
'GL_DEPTH_SCALE',
'GL_DEPTH_BIAS',
'GL_MAX_EVAL_ORDER',
'GL_MAX_LIGHTS',
'GL_MAX_CLIP_PLANES',
'GL_MAX_PIXEL_MAP_TABLE',
'GL_MAX_ATTRIB_STACK_DEPTH',
'GL_MAX_MODELVIEW_STACK_DEPTH',
'GL_MAX_NAME_STACK_DEPTH',
'GL_MAX_PROJECTION_STACK_DEPTH',
'GL_MAX_TEXTURE_STACK_DEPTH',
'GL_INDEX_BITS',
'GL_RED_BITS',
'GL_GREEN_BITS',
'GL_BLUE_BITS',
'GL_ALPHA_BITS',
'GL_DEPTH_BITS',
'GL_STENCIL_BITS',
'GL_ACCUM_RED_BITS',
'GL_ACCUM_GREEN_BITS',
'GL_ACCUM_BLUE_BITS',
'GL_ACCUM_ALPHA_BITS',
'GL_NAME_STACK_DEPTH',
'GL_AUTO_NORMAL',
'GL_MAP1_COLOR_4',
'GL_MAP1_INDEX',
'GL_MAP1_NORMAL',
'GL_MAP1_TEXTURE_COORD_1',
'GL_MAP1_TEXTURE_COORD_2',
'GL_MAP1_TEXTURE_COORD_3',
'GL_MAP1_TEXTURE_COORD_4',
'GL_MAP1_VERTEX_3',
'GL_MAP1_VERTEX_4',
'GL_MAP2_COLOR_4',
'GL_MAP2_INDEX',
'GL_MAP2_NORMAL',
'GL_MAP2_TEXTURE_COORD_1',
'GL_MAP2_TEXTURE_COORD_2',
'GL_MAP2_TEXTURE_COORD_3',
'GL_MAP2_TEXTURE_COORD_4',
'GL_MAP2_VERTEX_3',
'GL_MAP2_VERTEX_4',
'GL_MAP1_GRID_DOMAIN',
'GL_MAP1_GRID_SEGMENTS',
'GL_MAP2_GRID_DOMAIN',
'GL_MAP2_GRID_SEGMENTS',
'GL_TEXTURE_COMPONENTS',
'GL_TEXTURE_BORDER',
'GL_AMBIENT',
'GL_DIFFUSE',
'GL_SPECULAR',
'GL_POSITION',
'GL_SPOT_DIRECTION',
'GL_SPOT_EXPONENT',
'GL_SPOT_CUTOFF',
'GL_CONSTANT_ATTENUATION',
'GL_LINEAR_ATTENUATION',
'GL_QUADRATIC_ATTENUATION',
'GL_COMPILE',
'GL_COMPILE_AND_EXECUTE',
'GL_2_BYTES',
'GL_3_BYTES',
'GL_4_BYTES',
'GL_EMISSION',
'GL_SHININESS',
'GL_AMBIENT_AND_DIFFUSE',
'GL_COLOR_INDEXES',
'GL_MODELVIEW',
'GL_PROJECTION',
'GL_COLOR_INDEX',
'GL_LUMINANCE',
'GL_LUMINANCE_ALPHA',
'GL_BITMAP',
'GL_RENDER',
'GL_FEEDBACK',
'GL_SELECT',
'GL_FLAT',
'GL_SMOOTH',
'GL_S',
'GL_T',
'GL_R',
'GL_Q',
'GL_MODULATE',
'GL_DECAL',
'GL_TEXTURE_ENV_MODE',
'GL_TEXTURE_ENV_COLOR',
'GL_TEXTURE_ENV',
'GL_EYE_LINEAR',
'GL_OBJECT_LINEAR',
'GL_SPHERE_MAP',
'GL_TEXTURE_GEN_MODE',
'GL_OBJECT_PLANE',
'GL_EYE_PLANE',
'GL_CLAMP',
'GL_CLIP_PLANE0',
'GL_CLIP_PLANE1',
'GL_CLIP_PLANE2',
'GL_CLIP_PLANE3',
'GL_CLIP_PLANE4',
'GL_CLIP_PLANE5',
'GL_LIGHT0',
'GL_LIGHT1',
'GL_LIGHT2',
'GL_LIGHT3',
'GL_LIGHT4',
'GL_LIGHT5',
'GL_LIGHT6',
'GL_LIGHT7',
'GL_COLOR_LOGIC_OP',
'GL_POLYGON_OFFSET_UNITS',
'GL_POLYGON_OFFSET_POINT',
'GL_POLYGON_OFFSET_LINE',
'GL_POLYGON_OFFSET_FILL',
'GL_POLYGON_OFFSET_FACTOR',
'GL_TEXTURE_BINDING_1D',
'GL_TEXTURE_BINDING_2D',
'GL_TEXTURE_INTERNAL_FORMAT',
'GL_TEXTURE_RED_SIZE',
'GL_TEXTURE_GREEN_SIZE',
'GL_TEXTURE_BLUE_SIZE',
'GL_TEXTURE_ALPHA_SIZE',
'GL_DOUBLE',
'GL_PROXY_TEXTURE_1D',
'GL_PROXY_TEXTURE_2D',
'GL_R3_G3_B2',
'GL_RGB4',
'GL_RGB5',
'GL_RGB8',
'GL_RGB10',
'GL_RGB12',
'GL_RGB16',
'GL_RGBA2',
'GL_RGBA4',
'GL_RGB5_A1',
'GL_RGBA8',
'GL_RGB10_A2',
'GL_RGBA12',
'GL_RGBA16',
'GL_CLIENT_PIXEL_STORE_BIT',
'GL_CLIENT_VERTEX_ARRAY_BIT',
'GL_CLIENT_ALL_ATTRIB_BITS',
'GL_VERTEX_ARRAY_POINTER',
'GL_NORMAL_ARRAY_POINTER',
'GL_COLOR_ARRAY_POINTER',
'GL_INDEX_ARRAY_POINTER',
'GL_TEXTURE_COORD_ARRAY_POINTER',
'GL_EDGE_FLAG_ARRAY_POINTER',
'GL_FEEDBACK_BUFFER_POINTER',
'GL_SELECTION_BUFFER_POINTER',
'GL_CLIENT_ATTRIB_STACK_DEPTH',
'GL_INDEX_LOGIC_OP',
'GL_MAX_CLIENT_ATTRIB_STACK_DEPTH',
'GL_FEEDBACK_BUFFER_SIZE',
'GL_FEEDBACK_BUFFER_TYPE',
'GL_SELECTION_BUFFER_SIZE',
'GL_VERTEX_ARRAY',
'GL_NORMAL_ARRAY',
'GL_COLOR_ARRAY',
'GL_INDEX_ARRAY',
'GL_TEXTURE_COORD_ARRAY',
'GL_EDGE_FLAG_ARRAY',
'GL_VERTEX_ARRAY_SIZE',
'GL_VERTEX_ARRAY_TYPE',
'GL_VERTEX_ARRAY_STRIDE',
'GL_NORMAL_ARRAY_TYPE',
'GL_NORMAL_ARRAY_STRIDE',
'GL_COLOR_ARRAY_SIZE',
'GL_COLOR_ARRAY_TYPE',
'GL_COLOR_ARRAY_STRIDE',
'GL_INDEX_ARRAY_TYPE',
'GL_INDEX_ARRAY_STRIDE',
'GL_TEXTURE_COORD_ARRAY_SIZE',
'GL_TEXTURE_COORD_ARRAY_TYPE',
'GL_TEXTURE_COORD_ARRAY_STRIDE',
'GL_EDGE_FLAG_ARRAY_STRIDE',
'GL_TEXTURE_LUMINANCE_SIZE',
'GL_TEXTURE_INTENSITY_SIZE',
'GL_TEXTURE_PRIORITY',
'GL_TEXTURE_RESIDENT',
'GL_ALPHA4',
'GL_ALPHA8',
'GL_ALPHA12',
'GL_ALPHA16',
'GL_LUMINANCE4',
'GL_LUMINANCE8',
'GL_LUMINANCE12',
'GL_LUMINANCE16',
'GL_LUMINANCE4_ALPHA4',
'GL_LUMINANCE6_ALPHA2',
'GL_LUMINANCE8_ALPHA8',
'GL_LUMINANCE12_ALPHA4',
'GL_LUMINANCE12_ALPHA12',
'GL_LUMINANCE16_ALPHA16',
'GL_INTENSITY',
'GL_INTENSITY4',
'GL_INTENSITY8',
'GL_INTENSITY12',
'GL_INTENSITY16',
'GL_V2F',
'GL_V3F',
'GL_C4UB_V2F',
'GL_C4UB_V3F',
'GL_C3F_V3F',
'GL_N3F_V3F',
'GL_C4F_N3F_V3F',
'GL_T2F_V3F',
'GL_T4F_V4F',
'GL_T2F_C4UB_V3F',
'GL_T2F_C3F_V3F',
'GL_T2F_N3F_V3F',
'GL_T2F_C4F_N3F_V3F',
'GL_T4F_C4F_N3F_V4F',
'GL_UNSIGNED_BYTE_3_3_2',
'GL_UNSIGNED_SHORT_4_4_4_4',
'GL_UNSIGNED_SHORT_5_5_5_1',
'GL_UNSIGNED_INT_8_8_8_8',
'GL_UNSIGNED_INT_10_10_10_2',
'GL_TEXTURE_BINDING_3D',
'GL_PACK_SKIP_IMAGES',
'GL_PACK_IMAGE_HEIGHT',
'GL_UNPACK_SKIP_IMAGES',
'GL_UNPACK_IMAGE_HEIGHT',
'GL_TEXTURE_3D',
'GL_PROXY_TEXTURE_3D',
'GL_TEXTURE_DEPTH',
'GL_TEXTURE_WRAP_R',
'GL_MAX_3D_TEXTURE_SIZE',
'GL_UNSIGNED_BYTE_2_3_3_REV',
'GL_UNSIGNED_SHORT_5_6_5',
'GL_UNSIGNED_SHORT_5_6_5_REV',
'GL_UNSIGNED_SHORT_4_4_4_4_REV',
'GL_UNSIGNED_SHORT_1_5_5_5_REV',
'GL_UNSIGNED_INT_8_8_8_8_REV',
'GL_UNSIGNED_INT_2_10_10_10_REV',
'GL_BGR',
'GL_BGRA',
'GL_MAX_ELEMENTS_VERTICES',
'GL_MAX_ELEMENTS_INDICES',
'GL_CLAMP_TO_EDGE',
'GL_TEXTURE_MIN_LOD',
'GL_TEXTURE_MAX_LOD',
'GL_TEXTURE_BASE_LEVEL',
'GL_TEXTURE_MAX_LEVEL',
'GL_SMOOTH_POINT_SIZE_RANGE',
'GL_SMOOTH_POINT_SIZE_GRANULARITY',
'GL_SMOOTH_LINE_WIDTH_RANGE',
'GL_SMOOTH_LINE_WIDTH_GRANULARITY',
'GL_ALIASED_LINE_WIDTH_RANGE',
'GL_RESCALE_NORMAL',
'GL_LIGHT_MODEL_COLOR_CONTROL',
'GL_SINGLE_COLOR',
'GL_SEPARATE_SPECULAR_COLOR',
'GL_ALIASED_POINT_SIZE_RANGE',
'GL_TEXTURE0',
'GL_TEXTURE1',
'GL_TEXTURE2',
'GL_TEXTURE3',
'GL_TEXTURE4',
'GL_TEXTURE5',
'GL_TEXTURE6',
'GL_TEXTURE7',
'GL_TEXTURE8',
'GL_TEXTURE9',
'GL_TEXTURE10',
'GL_TEXTURE11',
'GL_TEXTURE12',
'GL_TEXTURE13',
'GL_TEXTURE14',
'GL_TEXTURE15',
'GL_TEXTURE16',
'GL_TEXTURE17',
'GL_TEXTURE18',
'GL_TEXTURE19',
'GL_TEXTURE20',
'GL_TEXTURE21',
'GL_TEXTURE22',
'GL_TEXTURE23',
'GL_TEXTURE24',
'GL_TEXTURE25',
'GL_TEXTURE26',
'GL_TEXTURE27',
'GL_TEXTURE28',
'GL_TEXTURE29',
'GL_TEXTURE30',
'GL_TEXTURE31',
'GL_ACTIVE_TEXTURE',
'GL_MULTISAMPLE',
'GL_SAMPLE_ALPHA_TO_COVERAGE',
'GL_SAMPLE_ALPHA_TO_ONE',
'GL_SAMPLE_COVERAGE',
'GL_SAMPLE_BUFFERS',
'GL_SAMPLES',
'GL_SAMPLE_COVERAGE_VALUE',
'GL_SAMPLE_COVERAGE_INVERT',
'GL_TEXTURE_CUBE_MAP',
'GL_TEXTURE_BINDING_CUBE_MAP',
'GL_TEXTURE_CUBE_MAP_POSITIVE_X',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_X',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Y',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Z',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z',
'GL_PROXY_TEXTURE_CUBE_MAP',
'GL_MAX_CUBE_MAP_TEXTURE_SIZE',
'GL_COMPRESSED_RGB',
'GL_COMPRESSED_RGBA',
'GL_TEXTURE_COMPRESSION_HINT',
'GL_TEXTURE_COMPRESSED_IMAGE_SIZE',
'GL_TEXTURE_COMPRESSED',
'GL_NUM_COMPRESSED_TEXTURE_FORMATS',
'GL_COMPRESSED_TEXTURE_FORMATS',
'GL_CLAMP_TO_BORDER',
'GL_CLIENT_ACTIVE_TEXTURE',
'GL_MAX_TEXTURE_UNITS',
'GL_TRANSPOSE_MODELVIEW_MATRIX',
'GL_TRANSPOSE_PROJECTION_MATRIX',
'GL_TRANSPOSE_TEXTURE_MATRIX',
'GL_TRANSPOSE_COLOR_MATRIX',
'GL_MULTISAMPLE_BIT',
'GL_NORMAL_MAP',
'GL_REFLECTION_MAP',
'GL_COMPRESSED_ALPHA',
'GL_COMPRESSED_LUMINANCE',
'GL_COMPRESSED_LUMINANCE_ALPHA',
'GL_COMPRESSED_INTENSITY',
'GL_COMBINE',
'GL_COMBINE_RGB',
'GL_COMBINE_ALPHA',
'GL_SOURCE0_RGB',
'GL_SOURCE1_RGB',
'GL_SOURCE2_RGB',
'GL_SOURCE0_ALPHA',
'GL_SOURCE1_ALPHA',
'GL_SOURCE2_ALPHA',
'GL_OPERAND0_RGB',
'GL_OPERAND1_RGB',
'GL_OPERAND2_RGB',
'GL_OPERAND0_ALPHA',
'GL_OPERAND1_ALPHA',
'GL_OPERAND2_ALPHA',
'GL_RGB_SCALE',
'GL_ADD_SIGNED',
'GL_INTERPOLATE',
'GL_SUBTRACT',
'GL_CONSTANT',
'GL_PRIMARY_COLOR',
'GL_PREVIOUS',
'GL_DOT3_RGB',
'GL_DOT3_RGBA',
'GL_BLEND_DST_RGB',
'GL_BLEND_SRC_RGB',
'GL_BLEND_DST_ALPHA',
'GL_BLEND_SRC_ALPHA',
'GL_POINT_FADE_THRESHOLD_SIZE',
'GL_DEPTH_COMPONENT16',
'GL_DEPTH_COMPONENT24',
'GL_DEPTH_COMPONENT32',
'GL_MIRRORED_REPEAT',
'GL_MAX_TEXTURE_LOD_BIAS',
'GL_TEXTURE_LOD_BIAS',
'GL_INCR_WRAP',
'GL_DECR_WRAP',
'GL_TEXTURE_DEPTH_SIZE',
'GL_TEXTURE_COMPARE_MODE',
'GL_TEXTURE_COMPARE_FUNC',
'GL_POINT_SIZE_MIN',
'GL_POINT_SIZE_MAX',
'GL_POINT_DISTANCE_ATTENUATION',
'GL_GENERATE_MIPMAP',
'GL_GENERATE_MIPMAP_HINT',
'GL_FOG_COORDINATE_SOURCE',
'GL_FOG_COORDINATE',
'GL_FRAGMENT_DEPTH',
'GL_CURRENT_FOG_COORDINATE',
'GL_FOG_COORDINATE_ARRAY_TYPE',
'GL_FOG_COORDINATE_ARRAY_STRIDE',
'GL_FOG_COORDINATE_ARRAY_POINTER',
'GL_FOG_COORDINATE_ARRAY',
'GL_COLOR_SUM',
'GL_CURRENT_SECONDARY_COLOR',
'GL_SECONDARY_COLOR_ARRAY_SIZE',
'GL_SECONDARY_COLOR_ARRAY_TYPE',
'GL_SECONDARY_COLOR_ARRAY_STRIDE',
'GL_SECONDARY_COLOR_ARRAY_POINTER',
'GL_SECONDARY_COLOR_ARRAY',
'GL_TEXTURE_FILTER_CONTROL',
'GL_DEPTH_TEXTURE_MODE',
'GL_COMPARE_R_TO_TEXTURE',
'GL_BLEND_COLOR',
'GL_BLEND_EQUATION',
'GL_CONSTANT_COLOR',
'GL_ONE_MINUS_CONSTANT_COLOR',
'GL_CONSTANT_ALPHA',
'GL_ONE_MINUS_CONSTANT_ALPHA',
'GL_FUNC_ADD',
'GL_FUNC_REVERSE_SUBTRACT',
'GL_FUNC_SUBTRACT',
'GL_MIN',
'GL_MAX',
'GL_BUFFER_SIZE',
'GL_BUFFER_USAGE',
'GL_QUERY_COUNTER_BITS',
'GL_CURRENT_QUERY',
'GL_QUERY_RESULT',
'GL_QUERY_RESULT_AVAILABLE',
'GL_ARRAY_BUFFER',
'GL_ELEMENT_ARRAY_BUFFER',
'GL_ARRAY_BUFFER_BINDING',
'GL_ELEMENT_ARRAY_BUFFER_BINDING',
'GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING',
'GL_READ_ONLY',
'GL_WRITE_ONLY',
'GL_READ_WRITE',
'GL_BUFFER_ACCESS',
'GL_BUFFER_MAPPED',
'GL_BUFFER_MAP_POINTER',
'GL_STREAM_DRAW',
'GL_STREAM_READ',
'GL_STREAM_COPY',
'GL_STATIC_DRAW',
'GL_STATIC_READ',
'GL_STATIC_COPY',
'GL_DYNAMIC_DRAW',
'GL_DYNAMIC_READ',
'GL_DYNAMIC_COPY',
'GL_SAMPLES_PASSED',
'GL_SRC1_ALPHA',
'GL_VERTEX_ARRAY_BUFFER_BINDING',
'GL_NORMAL_ARRAY_BUFFER_BINDING',
'GL_COLOR_ARRAY_BUFFER_BINDING',
'GL_INDEX_ARRAY_BUFFER_BINDING',
'GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING',
'GL_EDGE_FLAG_ARRAY_BUFFER_BINDING',
'GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING',
'GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING',
'GL_WEIGHT_ARRAY_BUFFER_BINDING',
'GL_FOG_COORD_SRC',
'GL_FOG_COORD',
'GL_CURRENT_FOG_COORD',
'GL_FOG_COORD_ARRAY_TYPE',
'GL_FOG_COORD_ARRAY_STRIDE',
'GL_FOG_COORD_ARRAY_POINTER',
'GL_FOG_COORD_ARRAY',
'GL_FOG_COORD_ARRAY_BUFFER_BINDING',
'GL_SRC0_RGB',
'GL_SRC1_RGB',
'GL_SRC2_RGB',
'GL_SRC0_ALPHA',
'GL_SRC2_ALPHA',
'GL_BLEND_EQUATION_RGB',
'GL_VERTEX_ATTRIB_ARRAY_ENABLED',
'GL_VERTEX_ATTRIB_ARRAY_SIZE',
'GL_VERTEX_ATTRIB_ARRAY_STRIDE',
'GL_VERTEX_ATTRIB_ARRAY_TYPE',
'GL_CURRENT_VERTEX_ATTRIB',
'GL_VERTEX_PROGRAM_POINT_SIZE',
'GL_VERTEX_ATTRIB_ARRAY_POINTER',
'GL_STENCIL_BACK_FUNC',
'GL_STENCIL_BACK_FAIL',
'GL_STENCIL_BACK_PASS_DEPTH_FAIL',
'GL_STENCIL_BACK_PASS_DEPTH_PASS',
'GL_MAX_DRAW_BUFFERS',
'GL_DRAW_BUFFER0',
'GL_DRAW_BUFFER1',
'GL_DRAW_BUFFER2',
'GL_DRAW_BUFFER3',
'GL_DRAW_BUFFER4',
'GL_DRAW_BUFFER5',
'GL_DRAW_BUFFER6',
'GL_DRAW_BUFFER7',
'GL_DRAW_BUFFER8',
'GL_DRAW_BUFFER9',
'GL_DRAW_BUFFER10',
'GL_DRAW_BUFFER11',
'GL_DRAW_BUFFER12',
'GL_DRAW_BUFFER13',
'GL_DRAW_BUFFER14',
'GL_DRAW_BUFFER15',
'GL_BLEND_EQUATION_ALPHA',
'GL_MAX_VERTEX_ATTRIBS',
'GL_VERTEX_ATTRIB_ARRAY_NORMALIZED',
'GL_MAX_TEXTURE_IMAGE_UNITS',
'GL_FRAGMENT_SHADER',
'GL_VERTEX_SHADER',
'GL_MAX_FRAGMENT_UNIFORM_COMPONENTS',
'GL_MAX_VERTEX_UNIFORM_COMPONENTS',
'GL_MAX_VARYING_FLOATS',
'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS',
'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS',
'GL_SHADER_TYPE',
'GL_FLOAT_VEC2',
'GL_FLOAT_VEC3',
'GL_FLOAT_VEC4',
'GL_INT_VEC2',
'GL_INT_VEC3',
'GL_INT_VEC4',
'GL_BOOL',
'GL_BOOL_VEC2',
'GL_BOOL_VEC3',
'GL_BOOL_VEC4',
'GL_FLOAT_MAT2',
'GL_FLOAT_MAT3',
'GL_FLOAT_MAT4',
'GL_SAMPLER_1D',
'GL_SAMPLER_2D',
'GL_SAMPLER_3D',
'GL_SAMPLER_CUBE',
'GL_SAMPLER_1D_SHADOW',
'GL_SAMPLER_2D_SHADOW',
'GL_DELETE_STATUS',
'GL_COMPILE_STATUS',
'GL_LINK_STATUS',
'GL_VALIDATE_STATUS',
'GL_INFO_LOG_LENGTH',
'GL_ATTACHED_SHADERS',
'GL_ACTIVE_UNIFORMS',
'GL_ACTIVE_UNIFORM_MAX_LENGTH',
'GL_SHADER_SOURCE_LENGTH',
'GL_ACTIVE_ATTRIBUTES',
'GL_ACTIVE_ATTRIBUTE_MAX_LENGTH',
'GL_FRAGMENT_SHADER_DERIVATIVE_HINT',
'GL_SHADING_LANGUAGE_VERSION',
'GL_CURRENT_PROGRAM',
'GL_POINT_SPRITE_COORD_ORIGIN',
'GL_LOWER_LEFT',
'GL_UPPER_LEFT',
'GL_STENCIL_BACK_REF',
'GL_STENCIL_BACK_VALUE_MASK',
'GL_STENCIL_BACK_WRITEMASK',
'GL_VERTEX_PROGRAM_TWO_SIDE',
'GL_POINT_SPRITE',
'GL_COORD_REPLACE',
'GL_MAX_TEXTURE_COORDS',
'GL_PIXEL_PACK_BUFFER',
'GL_PIXEL_UNPACK_BUFFER',
'GL_PIXEL_PACK_BUFFER_BINDING',
'GL_PIXEL_UNPACK_BUFFER_BINDING',
'GL_FLOAT_MAT2x3',
'GL_FLOAT_MAT2x4',
'GL_FLOAT_MAT3x2',
'GL_FLOAT_MAT3x4',
'GL_FLOAT_MAT4x2',
'GL_FLOAT_MAT4x3',
'GL_SRGB',
'GL_SRGB8',
'GL_SRGB_ALPHA',
'GL_SRGB8_ALPHA8',
'GL_COMPRESSED_SRGB',
'GL_COMPRESSED_SRGB_ALPHA',
'GL_CURRENT_RASTER_SECONDARY_COLOR',
'GL_SLUMINANCE_ALPHA',
'GL_SLUMINANCE8_ALPHA8',
'GL_SLUMINANCE',
'GL_SLUMINANCE8',
'GL_COMPRESSED_SLUMINANCE',
'GL_COMPRESSED_SLUMINANCE_ALPHA',
'GL_COMPARE_REF_TO_TEXTURE',
'GL_CLIP_DISTANCE0',
'GL_CLIP_DISTANCE1',
'GL_CLIP_DISTANCE2',
'GL_CLIP_DISTANCE3',
'GL_CLIP_DISTANCE4',
'GL_CLIP_DISTANCE5',
'GL_CLIP_DISTANCE6',
'GL_CLIP_DISTANCE7',
'GL_MAX_CLIP_DISTANCES',
'GL_MAJOR_VERSION',
'GL_MINOR_VERSION',
'GL_NUM_EXTENSIONS',
'GL_CONTEXT_FLAGS',
'GL_COMPRESSED_RED',
'GL_COMPRESSED_RG',
'GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT',
'GL_RGBA32F',
'GL_RGB32F',
'GL_RGBA16F',
'GL_RGB16F',
'GL_VERTEX_ATTRIB_ARRAY_INTEGER',
'GL_MAX_ARRAY_TEXTURE_LAYERS',
'GL_MIN_PROGRAM_TEXEL_OFFSET',
'GL_MAX_PROGRAM_TEXEL_OFFSET',
'GL_CLAMP_READ_COLOR',
'GL_FIXED_ONLY',
'GL_MAX_VARYING_COMPONENTS',
'GL_TEXTURE_1D_ARRAY',
'GL_PROXY_TEXTURE_1D_ARRAY',
'GL_TEXTURE_2D_ARRAY',
'GL_PROXY_TEXTURE_2D_ARRAY',
'GL_TEXTURE_BINDING_1D_ARRAY',
'GL_TEXTURE_BINDING_2D_ARRAY',
'GL_R11F_G11F_B10F',
'GL_UNSIGNED_INT_10F_11F_11F_REV',
'GL_RGB9_E5',
'GL_UNSIGNED_INT_5_9_9_9_REV',
'GL_TEXTURE_SHARED_SIZE',
'GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH',
'GL_TRANSFORM_FEEDBACK_BUFFER_MODE',
'GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS',
'GL_TRANSFORM_FEEDBACK_VARYINGS',
'GL_TRANSFORM_FEEDBACK_BUFFER_START',
'GL_TRANSFORM_FEEDBACK_BUFFER_SIZE',
'GL_PRIMITIVES_GENERATED',
'GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN',
'GL_RASTERIZER_DISCARD',
'GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS',
'GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS',
'GL_INTERLEAVED_ATTRIBS',
'GL_SEPARATE_ATTRIBS',
'GL_TRANSFORM_FEEDBACK_BUFFER',
'GL_TRANSFORM_FEEDBACK_BUFFER_BINDING',
'GL_RGBA32UI',
'GL_RGB32UI',
'GL_RGBA16UI',
'GL_RGB16UI',
'GL_RGBA8UI',
'GL_RGB8UI',
'GL_RGBA32I',
'GL_RGB32I',
'GL_RGBA16I',
'GL_RGB16I',
'GL_RGBA8I',
'GL_RGB8I',
'GL_RED_INTEGER',
'GL_GREEN_INTEGER',
'GL_BLUE_INTEGER',
'GL_RGB_INTEGER',
'GL_RGBA_INTEGER',
'GL_BGR_INTEGER',
'GL_BGRA_INTEGER',
'GL_SAMPLER_1D_ARRAY',
'GL_SAMPLER_2D_ARRAY',
'GL_SAMPLER_1D_ARRAY_SHADOW',
'GL_SAMPLER_2D_ARRAY_SHADOW',
'GL_SAMPLER_CUBE_SHADOW',
'GL_UNSIGNED_INT_VEC2',
'GL_UNSIGNED_INT_VEC3',
'GL_UNSIGNED_INT_VEC4',
'GL_INT_SAMPLER_1D',
'GL_INT_SAMPLER_2D',
'GL_INT_SAMPLER_3D',
'GL_INT_SAMPLER_CUBE',
'GL_INT_SAMPLER_1D_ARRAY',
'GL_INT_SAMPLER_2D_ARRAY',
'GL_UNSIGNED_INT_SAMPLER_1D',
'GL_UNSIGNED_INT_SAMPLER_2D',
'GL_UNSIGNED_INT_SAMPLER_3D',
'GL_UNSIGNED_INT_SAMPLER_CUBE',
'GL_UNSIGNED_INT_SAMPLER_1D_ARRAY',
'GL_UNSIGNED_INT_SAMPLER_2D_ARRAY',
'GL_QUERY_WAIT',
'GL_QUERY_NO_WAIT',
'GL_QUERY_BY_REGION_WAIT',
'GL_QUERY_BY_REGION_NO_WAIT',
'GL_BUFFER_ACCESS_FLAGS',
'GL_BUFFER_MAP_LENGTH',
'GL_BUFFER_MAP_OFFSET',
'GL_DEPTH_COMPONENT32F',
'GL_DEPTH32F_STENCIL8',
'GL_FLOAT_32_UNSIGNED_INT_24_8_REV',
'GL_INVALID_FRAMEBUFFER_OPERATION',
'GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING',
'GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE',
'GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE',
'GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE',
'GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE',
'GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE',
'GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE',
'GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE',
'GL_FRAMEBUFFER_DEFAULT',
'GL_FRAMEBUFFER_UNDEFINED',
'GL_DEPTH_STENCIL_ATTACHMENT',
'GL_MAX_RENDERBUFFER_SIZE',
'GL_DEPTH_STENCIL',
'GL_UNSIGNED_INT_24_8',
'GL_DEPTH24_STENCIL8',
'GL_TEXTURE_STENCIL_SIZE',
'GL_TEXTURE_RED_TYPE',
'GL_TEXTURE_GREEN_TYPE',
'GL_TEXTURE_BLUE_TYPE',
'GL_TEXTURE_ALPHA_TYPE',
'GL_TEXTURE_DEPTH_TYPE',
'GL_UNSIGNED_NORMALIZED',
'GL_FRAMEBUFFER_BINDING',
'GL_DRAW_FRAMEBUFFER_BINDING',
'GL_RENDERBUFFER_BINDING',
'GL_READ_FRAMEBUFFER',
'GL_DRAW_FRAMEBUFFER',
'GL_READ_FRAMEBUFFER_BINDING',
'GL_RENDERBUFFER_SAMPLES',
'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE',
'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER',
'GL_FRAMEBUFFER_COMPLETE',
'GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT',
'GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT',
'GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER',
'GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER',
'GL_FRAMEBUFFER_UNSUPPORTED',
'GL_MAX_COLOR_ATTACHMENTS',
'GL_COLOR_ATTACHMENT0',
'GL_COLOR_ATTACHMENT1',
'GL_COLOR_ATTACHMENT2',
'GL_COLOR_ATTACHMENT3',
'GL_COLOR_ATTACHMENT4',
'GL_COLOR_ATTACHMENT5',
'GL_COLOR_ATTACHMENT6',
'GL_COLOR_ATTACHMENT7',
'GL_COLOR_ATTACHMENT8',
'GL_COLOR_ATTACHMENT9',
'GL_COLOR_ATTACHMENT10',
'GL_COLOR_ATTACHMENT11',
'GL_COLOR_ATTACHMENT12',
'GL_COLOR_ATTACHMENT13',
'GL_COLOR_ATTACHMENT14',
'GL_COLOR_ATTACHMENT15',
'GL_COLOR_ATTACHMENT16',
'GL_COLOR_ATTACHMENT17',
'GL_COLOR_ATTACHMENT18',
'GL_COLOR_ATTACHMENT19',
'GL_COLOR_ATTACHMENT20',
'GL_COLOR_ATTACHMENT21',
'GL_COLOR_ATTACHMENT22',
'GL_COLOR_ATTACHMENT23',
'GL_COLOR_ATTACHMENT24',
'GL_COLOR_ATTACHMENT25',
'GL_COLOR_ATTACHMENT26',
'GL_COLOR_ATTACHMENT27',
'GL_COLOR_ATTACHMENT28',
'GL_COLOR_ATTACHMENT29',
'GL_COLOR_ATTACHMENT30',
'GL_COLOR_ATTACHMENT31',
'GL_DEPTH_ATTACHMENT',
'GL_STENCIL_ATTACHMENT',
'GL_FRAMEBUFFER',
'GL_RENDERBUFFER',
'GL_RENDERBUFFER_WIDTH',
'GL_RENDERBUFFER_HEIGHT',
'GL_RENDERBUFFER_INTERNAL_FORMAT',
'GL_STENCIL_INDEX1',
'GL_STENCIL_INDEX4',
'GL_STENCIL_INDEX8',
'GL_STENCIL_INDEX16',
'GL_RENDERBUFFER_RED_SIZE',
'GL_RENDERBUFFER_GREEN_SIZE',
'GL_RENDERBUFFER_BLUE_SIZE',
'GL_RENDERBUFFER_ALPHA_SIZE',
'GL_RENDERBUFFER_DEPTH_SIZE',
'GL_RENDERBUFFER_STENCIL_SIZE',
'GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE',
'GL_MAX_SAMPLES',
'GL_INDEX',
'GL_TEXTURE_LUMINANCE_TYPE',
'GL_TEXTURE_INTENSITY_TYPE',
'GL_FRAMEBUFFER_SRGB',
'GL_HALF_FLOAT',
'GL_MAP_READ_BIT',
'GL_MAP_WRITE_BIT',
'GL_MAP_INVALIDATE_RANGE_BIT',
'GL_MAP_INVALIDATE_BUFFER_BIT',
'GL_MAP_FLUSH_EXPLICIT_BIT',
'GL_MAP_UNSYNCHRONIZED_BIT',
'GL_COMPRESSED_RED_RGTC1',
'GL_COMPRESSED_SIGNED_RED_RGTC1',
'GL_COMPRESSED_RG_RGTC2',
'GL_COMPRESSED_SIGNED_RG_RGTC2',
'GL_RG',
'GL_RG_INTEGER',
'GL_R8',
'GL_R16',
'GL_RG8',
'GL_RG16',
'GL_R16F',
'GL_R32F',
'GL_RG16F',
'GL_RG32F',
'GL_R8I',
'GL_R8UI',
'GL_R16I',
'GL_R16UI',
'GL_R32I',
'GL_R32UI',
'GL_RG8I',
'GL_RG8UI',
'GL_RG16I',
'GL_RG16UI',
'GL_RG32I',
'GL_RG32UI',
'GL_VERTEX_ARRAY_BINDING',
'GL_CLAMP_VERTEX_COLOR',
'GL_CLAMP_FRAGMENT_COLOR',
'GL_ALPHA_INTEGER',
'GL_SAMPLER_2D_RECT',
'GL_SAMPLER_2D_RECT_SHADOW',
'GL_SAMPLER_BUFFER',
'GL_INT_SAMPLER_2D_RECT',
'GL_INT_SAMPLER_BUFFER',
'GL_UNSIGNED_INT_SAMPLER_2D_RECT',
'GL_UNSIGNED_INT_SAMPLER_BUFFER',
'GL_TEXTURE_BUFFER',
'GL_MAX_TEXTURE_BUFFER_SIZE',
'GL_TEXTURE_BINDING_BUFFER',
'GL_TEXTURE_BUFFER_DATA_STORE_BINDING',
'GL_TEXTURE_RECTANGLE',
'GL_TEXTURE_BINDING_RECTANGLE',
'GL_PROXY_TEXTURE_RECTANGLE',
'GL_MAX_RECTANGLE_TEXTURE_SIZE',
'GL_R8_SNORM',
'GL_RG8_SNORM',
'GL_RGB8_SNORM',
'GL_RGBA8_SNORM',
'GL_R16_SNORM',
'GL_RG16_SNORM',
'GL_RGB16_SNORM',
'GL_RGBA16_SNORM',
'GL_SIGNED_NORMALIZED',
'GL_PRIMITIVE_RESTART',
'GL_PRIMITIVE_RESTART_INDEX',
'GL_COPY_READ_BUFFER',
'GL_COPY_WRITE_BUFFER',
'GL_UNIFORM_BUFFER',
'GL_UNIFORM_BUFFER_BINDING',
'GL_UNIFORM_BUFFER_START',
'GL_UNIFORM_BUFFER_SIZE',
'GL_MAX_VERTEX_UNIFORM_BLOCKS',
'GL_MAX_GEOMETRY_UNIFORM_BLOCKS',
'GL_MAX_FRAGMENT_UNIFORM_BLOCKS',
'GL_MAX_COMBINED_UNIFORM_BLOCKS',
'GL_MAX_UNIFORM_BUFFER_BINDINGS',
'GL_MAX_UNIFORM_BLOCK_SIZE',
'GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS',
'GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS',
'GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS',
'GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT',
'GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH',
'GL_ACTIVE_UNIFORM_BLOCKS',
'GL_UNIFORM_TYPE',
'GL_UNIFORM_SIZE',
'GL_UNIFORM_NAME_LENGTH',
'GL_UNIFORM_BLOCK_INDEX',
'GL_UNIFORM_OFFSET',
'GL_UNIFORM_ARRAY_STRIDE',
'GL_UNIFORM_MATRIX_STRIDE',
'GL_UNIFORM_IS_ROW_MAJOR',
'GL_UNIFORM_BLOCK_BINDING',
'GL_UNIFORM_BLOCK_DATA_SIZE',
'GL_UNIFORM_BLOCK_NAME_LENGTH',
'GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS',
'GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES',
'GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER',
'GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER',
'GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER',
'GL_INVALID_INDEX',
'GL_CONTEXT_CORE_PROFILE_BIT',
'GL_CONTEXT_COMPATIBILITY_PROFILE_BIT',
'GL_LINES_ADJACENCY',
'GL_LINE_STRIP_ADJACENCY',
'GL_TRIANGLES_ADJACENCY',
'GL_TRIANGLE_STRIP_ADJACENCY',
'GL_PROGRAM_POINT_SIZE',
'GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS',
'GL_FRAMEBUFFER_ATTACHMENT_LAYERED',
'GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS',
'GL_GEOMETRY_SHADER',
'GL_GEOMETRY_VERTICES_OUT',
'GL_GEOMETRY_INPUT_TYPE',
'GL_GEOMETRY_OUTPUT_TYPE',
'GL_MAX_GEOMETRY_UNIFORM_COMPONENTS',
'GL_MAX_GEOMETRY_OUTPUT_VERTICES',
'GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS',
'GL_MAX_VERTEX_OUTPUT_COMPONENTS',
'GL_MAX_GEOMETRY_INPUT_COMPONENTS',
'GL_MAX_GEOMETRY_OUTPUT_COMPONENTS',
'GL_MAX_FRAGMENT_INPUT_COMPONENTS',
'GL_CONTEXT_PROFILE_MASK',
'GL_DEPTH_CLAMP',
'GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION',
'GL_FIRST_VERTEX_CONVENTION',
'GL_LAST_VERTEX_CONVENTION',
'GL_PROVOKING_VERTEX',
'GL_TEXTURE_CUBE_MAP_SEAMLESS',
'GL_MAX_SERVER_WAIT_TIMEOUT',
'GL_OBJECT_TYPE',
'GL_SYNC_CONDITION',
'GL_SYNC_STATUS',
'GL_SYNC_FLAGS',
'GL_SYNC_FENCE',
'GL_SYNC_GPU_COMMANDS_COMPLETE',
'GL_UNSIGNALED',
'GL_SIGNALED',
'GL_ALREADY_SIGNALED',
'GL_TIMEOUT_EXPIRED',
'GL_CONDITION_SATISFIED',
'GL_WAIT_FAILED',
'GL_TIMEOUT_IGNORED',
'GL_SYNC_FLUSH_COMMANDS_BIT',
'GL_SAMPLE_POSITION',
'GL_SAMPLE_MASK',
'GL_SAMPLE_MASK_VALUE',
'GL_MAX_SAMPLE_MASK_WORDS',
'GL_TEXTURE_2D_MULTISAMPLE',
'GL_PROXY_TEXTURE_2D_MULTISAMPLE',
'GL_TEXTURE_2D_MULTISAMPLE_ARRAY',
'GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY',
'GL_TEXTURE_BINDING_2D_MULTISAMPLE',
'GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY',
'GL_TEXTURE_SAMPLES',
'GL_TEXTURE_FIXED_SAMPLE_LOCATIONS',
'GL_SAMPLER_2D_MULTISAMPLE',
'GL_INT_SAMPLER_2D_MULTISAMPLE',
'GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE',
'GL_SAMPLER_2D_MULTISAMPLE_ARRAY',
'GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY',
'GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY',
'GL_MAX_COLOR_TEXTURE_SAMPLES',
'GL_MAX_DEPTH_TEXTURE_SAMPLES',
'GL_MAX_INTEGER_SAMPLES',
'GL_VERTEX_ATTRIB_ARRAY_DIVISOR',
'GL_SRC1_COLOR',
'GL_ONE_MINUS_SRC1_COLOR',
'GL_ONE_MINUS_SRC1_ALPHA',
'GL_MAX_DUAL_SOURCE_DRAW_BUFFERS',
'GL_ANY_SAMPLES_PASSED',
'GL_SAMPLER_BINDING',
'GL_RGB10_A2UI',
'GL_TEXTURE_SWIZZLE_R',
'GL_TEXTURE_SWIZZLE_G',
'GL_TEXTURE_SWIZZLE_B',
'GL_TEXTURE_SWIZZLE_A',
'GL_TEXTURE_SWIZZLE_RGBA',
'GL_TIME_ELAPSED',
'GL_TIMESTAMP',
'GL_INT_2_10_10_10_REV',
'GL_SAMPLE_SHADING',
'GL_MIN_SAMPLE_SHADING_VALUE',
'GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET',
'GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET',
'GL_TEXTURE_CUBE_MAP_ARRAY',
'GL_TEXTURE_BINDING_CUBE_MAP_ARRAY',
'GL_PROXY_TEXTURE_CUBE_MAP_ARRAY',
'GL_SAMPLER_CUBE_MAP_ARRAY',
'GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW',
'GL_INT_SAMPLER_CUBE_MAP_ARRAY',
'GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY',
'GL_DRAW_INDIRECT_BUFFER',
'GL_DRAW_INDIRECT_BUFFER_BINDING',
'GL_GEOMETRY_SHADER_INVOCATIONS',
'GL_MAX_GEOMETRY_SHADER_INVOCATIONS',
'GL_MIN_FRAGMENT_INTERPOLATION_OFFSET',
'GL_MAX_FRAGMENT_INTERPOLATION_OFFSET',
'GL_FRAGMENT_INTERPOLATION_OFFSET_BITS',
'GL_MAX_VERTEX_STREAMS',
'GL_DOUBLE_VEC2',
'GL_DOUBLE_VEC3',
'GL_DOUBLE_VEC4',
'GL_DOUBLE_MAT2',
'GL_DOUBLE_MAT3',
'GL_DOUBLE_MAT4',
'GL_DOUBLE_MAT2x3',
'GL_DOUBLE_MAT2x4',
'GL_DOUBLE_MAT3x2',
'GL_DOUBLE_MAT3x4',
'GL_DOUBLE_MAT4x2',
'GL_DOUBLE_MAT4x3',
'GL_ACTIVE_SUBROUTINES',
'GL_ACTIVE_SUBROUTINE_UNIFORMS',
'GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS',
'GL_ACTIVE_SUBROUTINE_MAX_LENGTH',
'GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH',
'GL_MAX_SUBROUTINES',
'GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS',
'GL_NUM_COMPATIBLE_SUBROUTINES',
'GL_COMPATIBLE_SUBROUTINES',
'GL_PATCHES',
'GL_PATCH_VERTICES',
'GL_PATCH_DEFAULT_INNER_LEVEL',
'GL_PATCH_DEFAULT_OUTER_LEVEL',
'GL_TESS_CONTROL_OUTPUT_VERTICES',
'GL_TESS_GEN_MODE',
'GL_TESS_GEN_SPACING',
'GL_TESS_GEN_VERTEX_ORDER',
'GL_TESS_GEN_POINT_MODE',
'GL_ISOLINES',
'GL_FRACTIONAL_ODD',
'GL_FRACTIONAL_EVEN',
'GL_MAX_PATCH_VERTICES',
'GL_MAX_TESS_GEN_LEVEL',
'GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS',
'GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS',
'GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS',
'GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS',
'GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS',
'GL_MAX_TESS_PATCH_COMPONENTS',
'GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS',
'GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS',
'GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS',
'GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS',
'GL_MAX_TESS_CONTROL_INPUT_COMPONENTS',
'GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS',
'GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS',
'GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS',
'GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER',
'GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER',
'GL_TESS_EVALUATION_SHADER',
'GL_TESS_CONTROL_SHADER',
'GL_TRANSFORM_FEEDBACK',
'GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED',
'GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE',
'GL_TRANSFORM_FEEDBACK_BINDING',
'GL_MAX_TRANSFORM_FEEDBACK_BUFFERS',
'GL_FIXED',
'GL_IMPLEMENTATION_COLOR_READ_TYPE',
'GL_IMPLEMENTATION_COLOR_READ_FORMAT',
'GL_LOW_FLOAT',
'GL_MEDIUM_FLOAT',
'GL_HIGH_FLOAT',
'GL_LOW_INT',
'GL_MEDIUM_INT',
'GL_HIGH_INT',
'GL_SHADER_COMPILER',
'GL_SHADER_BINARY_FORMATS',
'GL_NUM_SHADER_BINARY_FORMATS',
'GL_MAX_VERTEX_UNIFORM_VECTORS',
'GL_MAX_VARYING_VECTORS',
'GL_MAX_FRAGMENT_UNIFORM_VECTORS',
'GL_RGB565',
'GL_PROGRAM_BINARY_RETRIEVABLE_HINT',
'GL_PROGRAM_BINARY_LENGTH',
'GL_NUM_PROGRAM_BINARY_FORMATS',
'GL_PROGRAM_BINARY_FORMATS',
'GL_VERTEX_SHADER_BIT',
'GL_FRAGMENT_SHADER_BIT',
'GL_GEOMETRY_SHADER_BIT',
'GL_TESS_CONTROL_SHADER_BIT',
'GL_TESS_EVALUATION_SHADER_BIT',
'GL_ALL_SHADER_BITS',
'GL_PROGRAM_SEPARABLE',
'GL_ACTIVE_PROGRAM',
'GL_PROGRAM_PIPELINE_BINDING',
'GL_MAX_VIEWPORTS',
'GL_VIEWPORT_SUBPIXEL_BITS',
'GL_VIEWPORT_BOUNDS_RANGE',
'GL_LAYER_PROVOKING_VERTEX',
'GL_VIEWPORT_INDEX_PROVOKING_VERTEX',
'GL_UNDEFINED_VERTEX',
'GL_COPY_READ_BUFFER_BINDING',
'GL_COPY_WRITE_BUFFER_BINDING',
'GL_TRANSFORM_FEEDBACK_ACTIVE',
'GL_TRANSFORM_FEEDBACK_PAUSED',
'GL_UNPACK_COMPRESSED_BLOCK_WIDTH',
'GL_UNPACK_COMPRESSED_BLOCK_HEIGHT',
'GL_UNPACK_COMPRESSED_BLOCK_DEPTH',
'GL_UNPACK_COMPRESSED_BLOCK_SIZE',
'GL_PACK_COMPRESSED_BLOCK_WIDTH',
'GL_PACK_COMPRESSED_BLOCK_HEIGHT',
'GL_PACK_COMPRESSED_BLOCK_DEPTH',
'GL_PACK_COMPRESSED_BLOCK_SIZE',
'GL_NUM_SAMPLE_COUNTS',
'GL_MIN_MAP_BUFFER_ALIGNMENT',
'GL_ATOMIC_COUNTER_BUFFER',
'GL_ATOMIC_COUNTER_BUFFER_BINDING',
'GL_ATOMIC_COUNTER_BUFFER_START',
'GL_ATOMIC_COUNTER_BUFFER_SIZE',
'GL_ATOMIC_COUNTER_BUFFER_DATA_SIZE',
'GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTERS',
'GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTER_INDICES',
'GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_VERTEX_SHADER',
'GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_CONTROL_SHADER',
'GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_EVALUATION_SHADER',
'GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_GEOMETRY_SHADER',
'GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_FRAGMENT_SHADER',
'GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS',
'GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS',
'GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS',
'GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS',
'GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS',
'GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS',
'GL_MAX_VERTEX_ATOMIC_COUNTERS',
'GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS',
'GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS',
'GL_MAX_GEOMETRY_ATOMIC_COUNTERS',
'GL_MAX_FRAGMENT_ATOMIC_COUNTERS',
'GL_MAX_COMBINED_ATOMIC_COUNTERS',
'GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE',
'GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS',
'GL_ACTIVE_ATOMIC_COUNTER_BUFFERS',
'GL_UNIFORM_ATOMIC_COUNTER_BUFFER_INDEX',
'GL_UNSIGNED_INT_ATOMIC_COUNTER',
'GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT',
'GL_ELEMENT_ARRAY_BARRIER_BIT',
'GL_UNIFORM_BARRIER_BIT',
'GL_TEXTURE_FETCH_BARRIER_BIT',
'GL_SHADER_IMAGE_ACCESS_BARRIER_BIT',
'GL_COMMAND_BARRIER_BIT',
'GL_PIXEL_BUFFER_BARRIER_BIT',
'GL_TEXTURE_UPDATE_BARRIER_BIT',
'GL_BUFFER_UPDATE_BARRIER_BIT',
'GL_FRAMEBUFFER_BARRIER_BIT',
'GL_TRANSFORM_FEEDBACK_BARRIER_BIT',
'GL_ATOMIC_COUNTER_BARRIER_BIT',
'GL_ALL_BARRIER_BITS',
'GL_MAX_IMAGE_UNITS',
'GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS',
'GL_IMAGE_BINDING_NAME',
'GL_IMAGE_BINDING_LEVEL',
'GL_IMAGE_BINDING_LAYERED',
'GL_IMAGE_BINDING_LAYER',
'GL_IMAGE_BINDING_ACCESS',
'GL_IMAGE_1D',
'GL_IMAGE_2D',
'GL_IMAGE_3D',
'GL_IMAGE_2D_RECT',
'GL_IMAGE_CUBE',
'GL_IMAGE_BUFFER',
'GL_IMAGE_1D_ARRAY',
'GL_IMAGE_2D_ARRAY',
'GL_IMAGE_CUBE_MAP_ARRAY',
'GL_IMAGE_2D_MULTISAMPLE',
'GL_IMAGE_2D_MULTISAMPLE_ARRAY',
'GL_INT_IMAGE_1D',
'GL_INT_IMAGE_2D',
'GL_INT_IMAGE_3D',
'GL_INT_IMAGE_2D_RECT',
'GL_INT_IMAGE_CUBE',
'GL_INT_IMAGE_BUFFER',
'GL_INT_IMAGE_1D_ARRAY',
'GL_INT_IMAGE_2D_ARRAY',
'GL_INT_IMAGE_CUBE_MAP_ARRAY',
'GL_INT_IMAGE_2D_MULTISAMPLE',
'GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY',
'GL_UNSIGNED_INT_IMAGE_1D',
'GL_UNSIGNED_INT_IMAGE_2D',
'GL_UNSIGNED_INT_IMAGE_3D',
'GL_UNSIGNED_INT_IMAGE_2D_RECT',
'GL_UNSIGNED_INT_IMAGE_CUBE',
'GL_UNSIGNED_INT_IMAGE_BUFFER',
'GL_UNSIGNED_INT_IMAGE_1D_ARRAY',
'GL_UNSIGNED_INT_IMAGE_2D_ARRAY',
'GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY',
'GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE',
'GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY',
'GL_MAX_IMAGE_SAMPLES',
'GL_IMAGE_BINDING_FORMAT',
'GL_IMAGE_FORMAT_COMPATIBILITY_TYPE',
'GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE',
'GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS',
'GL_MAX_VERTEX_IMAGE_UNIFORMS',
'GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS',
'GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS',
'GL_MAX_GEOMETRY_IMAGE_UNIFORMS',
'GL_MAX_FRAGMENT_IMAGE_UNIFORMS',
'GL_MAX_COMBINED_IMAGE_UNIFORMS',
'GL_COMPRESSED_RGBA_BPTC_UNORM',
'GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM',
'GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT',
'GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT',
'GL_TEXTURE_IMMUTABLE_FORMAT',
'GL_NUM_SHADING_LANGUAGE_VERSIONS',
'GL_VERTEX_ATTRIB_ARRAY_LONG',
'GL_COMPRESSED_RGB8_ETC2',
'GL_COMPRESSED_SRGB8_ETC2',
'GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2',
'GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2',
'GL_COMPRESSED_RGBA8_ETC2_EAC',
'GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC',
'GL_COMPRESSED_R11_EAC',
'GL_COMPRESSED_SIGNED_R11_EAC',
'GL_COMPRESSED_RG11_EAC',
'GL_COMPRESSED_SIGNED_RG11_EAC',
'GL_PRIMITIVE_RESTART_FIXED_INDEX',
'GL_ANY_SAMPLES_PASSED_CONSERVATIVE',
'GL_MAX_ELEMENT_INDEX',
'GL_COMPUTE_SHADER',
'GL_MAX_COMPUTE_UNIFORM_BLOCKS',
'GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS',
'GL_MAX_COMPUTE_IMAGE_UNIFORMS',
'GL_MAX_COMPUTE_SHARED_MEMORY_SIZE',
'GL_MAX_COMPUTE_UNIFORM_COMPONENTS',
'GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS',
'GL_MAX_COMPUTE_ATOMIC_COUNTERS',
'GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS',
'GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS',
'GL_MAX_COMPUTE_WORK_GROUP_COUNT',
'GL_MAX_COMPUTE_WORK_GROUP_SIZE',
'GL_COMPUTE_WORK_GROUP_SIZE',
'GL_UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER',
'GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER',
'GL_DISPATCH_INDIRECT_BUFFER',
'GL_DISPATCH_INDIRECT_BUFFER_BINDING',
'GL_COMPUTE_SHADER_BIT',
'GL_DEBUG_OUTPUT_SYNCHRONOUS',
'GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH',
'GL_DEBUG_CALLBACK_FUNCTION',
'GL_DEBUG_CALLBACK_USER_PARAM',
'GL_DEBUG_SOURCE_API',
'GL_DEBUG_SOURCE_WINDOW_SYSTEM',
'GL_DEBUG_SOURCE_SHADER_COMPILER',
'GL_DEBUG_SOURCE_THIRD_PARTY',
'GL_DEBUG_SOURCE_APPLICATION',
'GL_DEBUG_SOURCE_OTHER',
'GL_DEBUG_TYPE_ERROR',
'GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR',
'GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR',
'GL_DEBUG_TYPE_PORTABILITY',
'GL_DEBUG_TYPE_PERFORMANCE',
'GL_DEBUG_TYPE_OTHER',
'GL_MAX_DEBUG_MESSAGE_LENGTH',
'GL_MAX_DEBUG_LOGGED_MESSAGES',
'GL_DEBUG_LOGGED_MESSAGES',
'GL_DEBUG_SEVERITY_HIGH',
'GL_DEBUG_SEVERITY_MEDIUM',
'GL_DEBUG_SEVERITY_LOW',
'GL_DEBUG_TYPE_MARKER',
'GL_DEBUG_TYPE_PUSH_GROUP',
'GL_DEBUG_TYPE_POP_GROUP',
'GL_DEBUG_SEVERITY_NOTIFICATION',
'GL_MAX_DEBUG_GROUP_STACK_DEPTH',
'GL_DEBUG_GROUP_STACK_DEPTH',
'GL_BUFFER',
'GL_SHADER',
'GL_PROGRAM',
'GL_QUERY',
'GL_PROGRAM_PIPELINE',
'GL_SAMPLER',
'GL_MAX_LABEL_LENGTH',
'GL_DEBUG_OUTPUT',
'GL_CONTEXT_FLAG_DEBUG_BIT',
'GL_MAX_UNIFORM_LOCATIONS',
'GL_FRAMEBUFFER_DEFAULT_WIDTH',
'GL_FRAMEBUFFER_DEFAULT_HEIGHT',
'GL_FRAMEBUFFER_DEFAULT_LAYERS',
'GL_FRAMEBUFFER_DEFAULT_SAMPLES',
'GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS',
'GL_MAX_FRAMEBUFFER_WIDTH',
'GL_MAX_FRAMEBUFFER_HEIGHT',
'GL_MAX_FRAMEBUFFER_LAYERS',
'GL_MAX_FRAMEBUFFER_SAMPLES',
'GL_INTERNALFORMAT_SUPPORTED',
'GL_INTERNALFORMAT_PREFERRED',
'GL_INTERNALFORMAT_RED_SIZE',
'GL_INTERNALFORMAT_GREEN_SIZE',
'GL_INTERNALFORMAT_BLUE_SIZE',
'GL_INTERNALFORMAT_ALPHA_SIZE',
'GL_INTERNALFORMAT_DEPTH_SIZE',
'GL_INTERNALFORMAT_STENCIL_SIZE',
'GL_INTERNALFORMAT_SHARED_SIZE',
'GL_INTERNALFORMAT_RED_TYPE',
'GL_INTERNALFORMAT_GREEN_TYPE',
'GL_INTERNALFORMAT_BLUE_TYPE',
'GL_INTERNALFORMAT_ALPHA_TYPE',
'GL_INTERNALFORMAT_DEPTH_TYPE',
'GL_INTERNALFORMAT_STENCIL_TYPE',
'GL_MAX_WIDTH',
'GL_MAX_HEIGHT',
'GL_MAX_DEPTH',
'GL_MAX_LAYERS',
'GL_MAX_COMBINED_DIMENSIONS',
'GL_COLOR_COMPONENTS',
'GL_DEPTH_COMPONENTS',
'GL_STENCIL_COMPONENTS',
'GL_COLOR_RENDERABLE',
'GL_DEPTH_RENDERABLE',
'GL_STENCIL_RENDERABLE',
'GL_FRAMEBUFFER_RENDERABLE',
'GL_FRAMEBUFFER_RENDERABLE_LAYERED',
'GL_FRAMEBUFFER_BLEND',
'GL_READ_PIXELS',
'GL_READ_PIXELS_FORMAT',
'GL_READ_PIXELS_TYPE',
'GL_TEXTURE_IMAGE_FORMAT',
'GL_TEXTURE_IMAGE_TYPE',
'GL_GET_TEXTURE_IMAGE_FORMAT',
'GL_GET_TEXTURE_IMAGE_TYPE',
'GL_MIPMAP',
'GL_MANUAL_GENERATE_MIPMAP',
'GL_AUTO_GENERATE_MIPMAP',
'GL_COLOR_ENCODING',
'GL_SRGB_READ',
'GL_SRGB_WRITE',
'GL_FILTER',
'GL_VERTEX_TEXTURE',
'GL_TESS_CONTROL_TEXTURE',
'GL_TESS_EVALUATION_TEXTURE',
'GL_GEOMETRY_TEXTURE',
'GL_FRAGMENT_TEXTURE',
'GL_COMPUTE_TEXTURE',
'GL_TEXTURE_SHADOW',
'GL_TEXTURE_GATHER',
'GL_TEXTURE_GATHER_SHADOW',
'GL_SHADER_IMAGE_LOAD',
'GL_SHADER_IMAGE_STORE',
'GL_SHADER_IMAGE_ATOMIC',
'GL_IMAGE_TEXEL_SIZE',
'GL_IMAGE_COMPATIBILITY_CLASS',
'GL_IMAGE_PIXEL_FORMAT',
'GL_IMAGE_PIXEL_TYPE',
'GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST',
'GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST',
'GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE',
'GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE',
'GL_TEXTURE_COMPRESSED_BLOCK_WIDTH',
'GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT',
'GL_TEXTURE_COMPRESSED_BLOCK_SIZE',
'GL_CLEAR_BUFFER',
'GL_TEXTURE_VIEW',
'GL_VIEW_COMPATIBILITY_CLASS',
'GL_FULL_SUPPORT',
'GL_CAVEAT_SUPPORT',
'GL_IMAGE_CLASS_4_X_32',
'GL_IMAGE_CLASS_2_X_32',
'GL_IMAGE_CLASS_1_X_32',
'GL_IMAGE_CLASS_4_X_16',
'GL_IMAGE_CLASS_2_X_16',
'GL_IMAGE_CLASS_1_X_16',
'GL_IMAGE_CLASS_4_X_8',
'GL_IMAGE_CLASS_2_X_8',
'GL_IMAGE_CLASS_1_X_8',
'GL_IMAGE_CLASS_11_11_10',
'GL_IMAGE_CLASS_10_10_10_2',
'GL_VIEW_CLASS_128_BITS',
'GL_VIEW_CLASS_96_BITS',
'GL_VIEW_CLASS_64_BITS',
'GL_VIEW_CLASS_48_BITS',
'GL_VIEW_CLASS_32_BITS',
'GL_VIEW_CLASS_24_BITS',
'GL_VIEW_CLASS_16_BITS',
'GL_VIEW_CLASS_8_BITS',
'GL_VIEW_CLASS_S3TC_DXT1_RGB',
'GL_VIEW_CLASS_S3TC_DXT1_RGBA',
'GL_VIEW_CLASS_S3TC_DXT3_RGBA',
'GL_VIEW_CLASS_S3TC_DXT5_RGBA',
'GL_VIEW_CLASS_RGTC1_RED',
'GL_VIEW_CLASS_RGTC2_RG',
'GL_VIEW_CLASS_BPTC_UNORM',
'GL_VIEW_CLASS_BPTC_FLOAT',
'GL_UNIFORM',
'GL_UNIFORM_BLOCK',
'GL_PROGRAM_INPUT',
'GL_PROGRAM_OUTPUT',
'GL_BUFFER_VARIABLE',
'GL_SHADER_STORAGE_BLOCK',
'GL_VERTEX_SUBROUTINE',
'GL_TESS_CONTROL_SUBROUTINE',
'GL_TESS_EVALUATION_SUBROUTINE',
'GL_GEOMETRY_SUBROUTINE',
'GL_FRAGMENT_SUBROUTINE',
'GL_COMPUTE_SUBROUTINE',
'GL_VERTEX_SUBROUTINE_UNIFORM',
'GL_TESS_CONTROL_SUBROUTINE_UNIFORM',
'GL_TESS_EVALUATION_SUBROUTINE_UNIFORM',
'GL_GEOMETRY_SUBROUTINE_UNIFORM',
'GL_FRAGMENT_SUBROUTINE_UNIFORM',
'GL_COMPUTE_SUBROUTINE_UNIFORM',
'GL_TRANSFORM_FEEDBACK_VARYING',
'GL_ACTIVE_RESOURCES',
'GL_MAX_NAME_LENGTH',
'GL_MAX_NUM_ACTIVE_VARIABLES',
'GL_MAX_NUM_COMPATIBLE_SUBROUTINES',
'GL_NAME_LENGTH',
'GL_TYPE',
'GL_ARRAY_SIZE',
'GL_OFFSET',
'GL_BLOCK_INDEX',
'GL_ARRAY_STRIDE',
'GL_MATRIX_STRIDE',
'GL_IS_ROW_MAJOR',
'GL_ATOMIC_COUNTER_BUFFER_INDEX',
'GL_BUFFER_BINDING',
'GL_BUFFER_DATA_SIZE',
'GL_NUM_ACTIVE_VARIABLES',
'GL_ACTIVE_VARIABLES',
'GL_REFERENCED_BY_VERTEX_SHADER',
'GL_REFERENCED_BY_TESS_CONTROL_SHADER',
'GL_REFERENCED_BY_TESS_EVALUATION_SHADER',
'GL_REFERENCED_BY_GEOMETRY_SHADER',
'GL_REFERENCED_BY_FRAGMENT_SHADER',
'GL_REFERENCED_BY_COMPUTE_SHADER',
'GL_TOP_LEVEL_ARRAY_SIZE',
'GL_TOP_LEVEL_ARRAY_STRIDE',
'GL_LOCATION',
'GL_LOCATION_INDEX',
'GL_IS_PER_PATCH',
'GL_SHADER_STORAGE_BUFFER',
'GL_SHADER_STORAGE_BUFFER_BINDING',
'GL_SHADER_STORAGE_BUFFER_START',
'GL_SHADER_STORAGE_BUFFER_SIZE',
'GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS',
'GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS',
'GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS',
'GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS',
'GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS',
'GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS',
'GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS',
'GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS',
'GL_MAX_SHADER_STORAGE_BLOCK_SIZE',
'GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT',
'GL_SHADER_STORAGE_BARRIER_BIT',
'GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES',
'GL_DEPTH_STENCIL_TEXTURE_MODE',
'GL_TEXTURE_BUFFER_OFFSET',
'GL_TEXTURE_BUFFER_SIZE',
'GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT',
'GL_TEXTURE_VIEW_MIN_LEVEL',
'GL_TEXTURE_VIEW_NUM_LEVELS',
'GL_TEXTURE_VIEW_MIN_LAYER',
'GL_TEXTURE_VIEW_NUM_LAYERS',
'GL_TEXTURE_IMMUTABLE_LEVELS',
'GL_VERTEX_ATTRIB_BINDING',
'GL_VERTEX_ATTRIB_RELATIVE_OFFSET',
'GL_VERTEX_BINDING_DIVISOR',
'GL_VERTEX_BINDING_OFFSET',
'GL_VERTEX_BINDING_STRIDE',
'GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET',
'GL_MAX_VERTEX_ATTRIB_BINDINGS',
'GL_VERTEX_BINDING_BUFFER',
'GL_DISPLAY_LIST',
'GL_MAX_VERTEX_ATTRIB_STRIDE',
'GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED',
'GL_TEXTURE_BUFFER_BINDING',
'GL_MAP_PERSISTENT_BIT',
'GL_MAP_COHERENT_BIT',
'GL_DYNAMIC_STORAGE_BIT',
'GL_CLIENT_STORAGE_BIT',
'GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT',
'GL_BUFFER_IMMUTABLE_STORAGE',
'GL_BUFFER_STORAGE_FLAGS',
'GL_CLEAR_TEXTURE',
'GL_LOCATION_COMPONENT',
'GL_TRANSFORM_FEEDBACK_BUFFER_INDEX',
'GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE',
'GL_QUERY_BUFFER',
'GL_QUERY_BUFFER_BARRIER_BIT',
'GL_QUERY_BUFFER_BINDING',
'GL_QUERY_RESULT_NO_WAIT',
'GL_MIRROR_CLAMP_TO_EDGE',
'GL_CONTEXT_LOST',
'GL_NEGATIVE_ONE_TO_ONE',
'GL_ZERO_TO_ONE',
'GL_CLIP_ORIGIN',
'GL_CLIP_DEPTH_MODE',
'GL_QUERY_WAIT_INVERTED',
'GL_QUERY_NO_WAIT_INVERTED',
'GL_QUERY_BY_REGION_WAIT_INVERTED',
'GL_QUERY_BY_REGION_NO_WAIT_INVERTED',
'GL_MAX_CULL_DISTANCES',
'GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES',
'GL_TEXTURE_TARGET',
'GL_QUERY_TARGET',
'GL_GUILTY_CONTEXT_RESET',
'GL_INNOCENT_CONTEXT_RESET',
'GL_UNKNOWN_CONTEXT_RESET',
'GL_RESET_NOTIFICATION_STRATEGY',
'GL_LOSE_CONTEXT_ON_RESET',
'GL_NO_RESET_NOTIFICATION',
'GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT',
'GL_COLOR_TABLE',
'GL_POST_CONVOLUTION_COLOR_TABLE',
'GL_POST_COLOR_MATRIX_COLOR_TABLE',
'GL_PROXY_COLOR_TABLE',
'GL_PROXY_POST_CONVOLUTION_COLOR_TABLE',
'GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE',
'GL_CONVOLUTION_1D',
'GL_CONVOLUTION_2D',
'GL_SEPARABLE_2D',
'GL_HISTOGRAM',
'GL_PROXY_HISTOGRAM',
'GL_MINMAX',
'GL_CONTEXT_RELEASE_BEHAVIOR',
'GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH',
'GL_SHADER_BINARY_FORMAT_SPIR_V',
'GL_SPIR_V_BINARY',
'GL_PARAMETER_BUFFER',
'GL_PARAMETER_BUFFER_BINDING',
'GL_CONTEXT_FLAG_NO_ERROR_BIT',
'GL_VERTICES_SUBMITTED',
'GL_PRIMITIVES_SUBMITTED',
'GL_VERTEX_SHADER_INVOCATIONS',
'GL_TESS_CONTROL_SHADER_PATCHES',
'GL_TESS_EVALUATION_SHADER_INVOCATIONS',
'GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED',
'GL_FRAGMENT_SHADER_INVOCATIONS',
'GL_COMPUTE_SHADER_INVOCATIONS',
'GL_CLIPPING_INPUT_PRIMITIVES',
'GL_CLIPPING_OUTPUT_PRIMITIVES',
'GL_POLYGON_OFFSET_CLAMP',
'GL_SPIR_V_EXTENSIONS',
'GL_NUM_SPIR_V_EXTENSIONS',
'GL_TEXTURE_MAX_ANISOTROPY',
'GL_MAX_TEXTURE_MAX_ANISOTROPY',
'GL_TRANSFORM_FEEDBACK_OVERFLOW',
'GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW',
'GL_MULTISAMPLE_ARB',
'GL_SAMPLE_ALPHA_TO_COVERAGE_ARB',
'GL_SAMPLE_ALPHA_TO_ONE_ARB',
'GL_SAMPLE_COVERAGE_ARB',
'GL_SAMPLE_BUFFERS_ARB',
'GL_SAMPLES_ARB',
'GL_SAMPLE_COVERAGE_VALUE_ARB',
'GL_SAMPLE_COVERAGE_INVERT_ARB',
'GL_MULTISAMPLE_BIT_ARB',
'GL_COMPRESSED_RGB_S3TC_DXT1_EXT',
'GL_COMPRESSED_RGBA_S3TC_DXT1_EXT',
'GL_COMPRESSED_RGBA_S3TC_DXT3_EXT',
'GL_COMPRESSED_RGBA_S3TC_DXT5_EXT',
'GL_INVALID_FRAMEBUFFER_OPERATION_EXT',
'GL_MAX_RENDERBUFFER_SIZE_EXT',
'GL_FRAMEBUFFER_BINDING_EXT',
'GL_RENDERBUFFER_BINDING_EXT',
'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_EXT',
'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_EXT',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_EXT',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_EXT',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_EXT',
'GL_FRAMEBUFFER_COMPLETE_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT',
'GL_FRAMEBUFFER_UNSUPPORTED_EXT',
'GL_MAX_COLOR_ATTACHMENTS_EXT',
'GL_COLOR_ATTACHMENT0_EXT',
'GL_COLOR_ATTACHMENT1_EXT',
'GL_COLOR_ATTACHMENT2_EXT',
'GL_COLOR_ATTACHMENT3_EXT',
'GL_COLOR_ATTACHMENT4_EXT',
'GL_COLOR_ATTACHMENT5_EXT',
'GL_COLOR_ATTACHMENT6_EXT',
'GL_COLOR_ATTACHMENT7_EXT',
'GL_COLOR_ATTACHMENT8_EXT',
'GL_COLOR_ATTACHMENT9_EXT',
'GL_COLOR_ATTACHMENT10_EXT',
'GL_COLOR_ATTACHMENT11_EXT',
'GL_COLOR_ATTACHMENT12_EXT',
'GL_COLOR_ATTACHMENT13_EXT',
'GL_COLOR_ATTACHMENT14_EXT',
'GL_COLOR_ATTACHMENT15_EXT',
'GL_DEPTH_ATTACHMENT_EXT',
'GL_STENCIL_ATTACHMENT_EXT',
'GL_FRAMEBUFFER_EXT',
'GL_RENDERBUFFER_EXT',
'GL_RENDERBUFFER_WIDTH_EXT',
'GL_RENDERBUFFER_HEIGHT_EXT',
'GL_RENDERBUFFER_INTERNAL_FORMAT_EXT',
'GL_STENCIL_INDEX1_EXT',
'GL_STENCIL_INDEX4_EXT',
'GL_STENCIL_INDEX8_EXT',
'GL_STENCIL_INDEX16_EXT',
'GL_RENDERBUFFER_RED_SIZE_EXT',
'GL_RENDERBUFFER_GREEN_SIZE_EXT',
'GL_RENDERBUFFER_BLUE_SIZE_EXT',
'GL_RENDERBUFFER_ALPHA_SIZE_EXT',
'GL_RENDERBUFFER_DEPTH_SIZE_EXT',
'GL_RENDERBUFFER_STENCIL_SIZE_EXT',
'glAccum',
'glActiveShaderProgram',
'glActiveTexture',
'glAlphaFunc',
'glAreTexturesResident',
'glArrayElement',
'glAttachShader',
'glBegin',
'glBeginConditionalRender',
'glBeginQuery',
'glBeginQueryIndexed',
'glBeginTransformFeedback',
'glBindAttribLocation',
'glBindBuffer',
'glBindBufferBase',
'glBindBufferRange',
'glBindBuffersBase',
'glBindBuffersRange',
'glBindFragDataLocation',
'glBindFragDataLocationIndexed',
'glBindFramebuffer',
'glBindFramebufferEXT',
'glBindImageTexture',
'glBindImageTextures',
'glBindProgramPipeline',
'glBindRenderbuffer',
'glBindRenderbufferEXT',
'glBindSampler',
'glBindSamplers',
'glBindTexture',
'glBindTextureUnit',
'glBindTextures',
'glBindTransformFeedback',
'glBindVertexArray',
'glBindVertexBuffer',
'glBindVertexBuffers',
'glBitmap',
'glBlendColor',
'glBlendEquation',
'glBlendEquationSeparate',
'glBlendEquationSeparatei',
'glBlendEquationi',
'glBlendFunc',
'glBlendFuncSeparate',
'glBlendFuncSeparatei',
'glBlendFunci',
'glBlitFramebuffer',
'glBlitNamedFramebuffer',
'glBufferData',
'glBufferStorage',
'glBufferSubData',
'glCallList',
'glCallLists',
'glCheckFramebufferStatus',
'glCheckFramebufferStatusEXT',
'glCheckNamedFramebufferStatus',
'glClampColor',
'glClear',
'glClearAccum',
'glClearBufferData',
'glClearBufferSubData',
'glClearBufferfi',
'glClearBufferfv',
'glClearBufferiv',
'glClearBufferuiv',
'glClearColor',
'glClearDepth',
'glClearDepthf',
'glClearIndex',
'glClearNamedBufferData',
'glClearNamedBufferSubData',
'glClearNamedFramebufferfi',
'glClearNamedFramebufferfv',
'glClearNamedFramebufferiv',
'glClearNamedFramebufferuiv',
'glClearStencil',
'glClearTexImage',
'glClearTexSubImage',
'glClientActiveTexture',
'glClipControl',
'glClipPlane',
'glColor3b',
'glColor3bv',
'glColor3d',
'glColor3dv',
'glColor3f',
'glColor3fv',
'glColor3i',
'glColor3iv',
'glColor3s',
'glColor3sv',
'glColor3ub',
'glColor3ubv',
'glColor3ui',
'glColor3uiv',
'glColor3us',
'glColor3usv',
'glColor4b',
'glColor4bv',
'glColor4d',
'glColor4dv',
'glColor4f',
'glColor4fv',
'glColor4i',
'glColor4iv',
'glColor4s',
'glColor4sv',
'glColor4ub',
'glColor4ubv',
'glColor4ui',
'glColor4uiv',
'glColor4us',
'glColor4usv',
'glColorMask',
'glColorMaski',
'glColorMaterial',
'glColorP3ui',
'glColorP3uiv',
'glColorP4ui',
'glColorP4uiv',
'glColorPointer',
'glCompileShader',
'glCompressedTexImage1D',
'glCompressedTexImage2D',
'glCompressedTexImage3D',
'glCompressedTexSubImage1D',
'glCompressedTexSubImage2D',
'glCompressedTexSubImage3D',
'glCompressedTextureSubImage1D',
'glCompressedTextureSubImage2D',
'glCompressedTextureSubImage3D',
'glCopyBufferSubData',
'glCopyImageSubData',
'glCopyNamedBufferSubData',
'glCopyPixels',
'glCopyTexImage1D',
'glCopyTexImage2D',
'glCopyTexSubImage1D',
'glCopyTexSubImage2D',
'glCopyTexSubImage3D',
'glCopyTextureSubImage1D',
'glCopyTextureSubImage2D',
'glCopyTextureSubImage3D',
'glCreateBuffers',
'glCreateFramebuffers',
'glCreateProgram',
'glCreateProgramPipelines',
'glCreateQueries',
'glCreateRenderbuffers',
'glCreateSamplers',
'glCreateShader',
'glCreateShaderProgramv',
'glCreateTextures',
'glCreateTransformFeedbacks',
'glCreateVertexArrays',
'glCullFace',
'glDebugMessageControl',
'glDebugMessageInsert',
'glDeleteBuffers',
'glDeleteFramebuffers',
'glDeleteFramebuffersEXT',
'glDeleteLists',
'glDeleteProgram',
'glDeleteProgramPipelines',
'glDeleteQueries',
'glDeleteRenderbuffers',
'glDeleteRenderbuffersEXT',
'glDeleteSamplers',
'glDeleteShader',
'glDeleteTextures',
'glDeleteTransformFeedbacks',
'glDeleteVertexArrays',
'glDepthFunc',
'glDepthMask',
'glDepthRange',
'glDepthRangeArrayv',
'glDepthRangeIndexed',
'glDepthRangef',
'glDetachShader',
'glDisable',
'glDisableClientState',
'glDisableVertexArrayAttrib',
'glDisableVertexAttribArray',
'glDisablei',
'glDispatchCompute',
'glDispatchComputeIndirect',
'glDrawArrays',
'glDrawArraysIndirect',
'glDrawArraysInstanced',
'glDrawArraysInstancedBaseInstance',
'glDrawBuffer',
'glDrawBuffers',
'glDrawElements',
'glDrawElementsBaseVertex',
'glDrawElementsIndirect',
'glDrawElementsInstanced',
'glDrawElementsInstancedBaseInstance',
'glDrawElementsInstancedBaseVertex',
'glDrawElementsInstancedBaseVertexBaseInstance',
'glDrawPixels',
'glDrawRangeElements',
'glDrawRangeElementsBaseVertex',
'glDrawTransformFeedback',
'glDrawTransformFeedbackInstanced',
'glDrawTransformFeedbackStream',
'glDrawTransformFeedbackStreamInstanced',
'glEdgeFlag',
'glEdgeFlagPointer',
'glEdgeFlagv',
'glEnable',
'glEnableClientState',
'glEnableVertexArrayAttrib',
'glEnableVertexAttribArray',
'glEnablei',
'glEnd',
'glEndConditionalRender',
'glEndList',
'glEndQuery',
'glEndQueryIndexed',
'glEndTransformFeedback',
'glEvalCoord1d',
'glEvalCoord1dv',
'glEvalCoord1f',
'glEvalCoord1fv',
'glEvalCoord2d',
'glEvalCoord2dv',
'glEvalCoord2f',
'glEvalCoord2fv',
'glEvalMesh1',
'glEvalMesh2',
'glEvalPoint1',
'glEvalPoint2',
'glFeedbackBuffer',
'glFinish',
'glFlush',
'glFlushMappedBufferRange',
'glFlushMappedNamedBufferRange',
'glFogCoordPointer',
'glFogCoordd',
'glFogCoorddv',
'glFogCoordf',
'glFogCoordfv',
'glFogf',
'glFogfv',
'glFogi',
'glFogiv',
'glFramebufferParameteri',
'glFramebufferRenderbuffer',
'glFramebufferRenderbufferEXT',
'glFramebufferTexture',
'glFramebufferTexture1D',
'glFramebufferTexture1DEXT',
'glFramebufferTexture2D',
'glFramebufferTexture2DEXT',
'glFramebufferTexture3D',
'glFramebufferTexture3DEXT',
'glFramebufferTextureLayer',
'glFrontFace',
'glFrustum',
'glGenBuffers',
'glGenFramebuffers',
'glGenFramebuffersEXT',
'glGenLists',
'glGenProgramPipelines',
'glGenQueries',
'glGenRenderbuffers',
'glGenRenderbuffersEXT',
'glGenSamplers',
'glGenTextures',
'glGenTransformFeedbacks',
'glGenVertexArrays',
'glGenerateMipmap',
'glGenerateMipmapEXT',
'glGenerateTextureMipmap',
'glGetActiveAtomicCounterBufferiv',
'glGetActiveAttrib',
'glGetActiveSubroutineName',
'glGetActiveSubroutineUniformName',
'glGetActiveSubroutineUniformiv',
'glGetActiveUniform',
'glGetActiveUniformBlockName',
'glGetActiveUniformBlockiv',
'glGetActiveUniformName',
'glGetActiveUniformsiv',
'glGetAttachedShaders',
'glGetAttribLocation',
'glGetBooleani_v',
'glGetBooleanv',
'glGetBufferParameteri64v',
'glGetBufferParameteriv',
'glGetBufferPointerv',
'glGetBufferSubData',
'glGetClipPlane',
'glGetCompressedTexImage',
'glGetCompressedTextureImage',
'glGetCompressedTextureSubImage',
'glGetDebugMessageLog',
'glGetDoublei_v',
'glGetDoublev',
'glGetError',
'glGetFloati_v',
'glGetFloatv',
'glGetFragDataIndex',
'glGetFragDataLocation',
'glGetFramebufferAttachmentParameteriv',
'glGetFramebufferAttachmentParameterivEXT',
'glGetFramebufferParameteriv',
'glGetGraphicsResetStatus',
'glGetInteger64i_v',
'glGetInteger64v',
'glGetIntegeri_v',
'glGetIntegerv',
'glGetInternalformati64v',
'glGetInternalformativ',
'glGetLightfv',
'glGetLightiv',
'glGetMapdv',
'glGetMapfv',
'glGetMapiv',
'glGetMaterialfv',
'glGetMaterialiv',
'glGetMultisamplefv',
'glGetNamedBufferParameteri64v',
'glGetNamedBufferParameteriv',
'glGetNamedBufferPointerv',
'glGetNamedBufferSubData',
'glGetNamedFramebufferAttachmentParameteriv',
'glGetNamedFramebufferParameteriv',
'glGetNamedRenderbufferParameteriv',
'glGetObjectLabel',
'glGetObjectPtrLabel',
'glGetPixelMapfv',
'glGetPixelMapuiv',
'glGetPixelMapusv',
'glGetPointerv',
'glGetPolygonStipple',
'glGetProgramBinary',
'glGetProgramInfoLog',
'glGetProgramInterfaceiv',
'glGetProgramPipelineInfoLog',
'glGetProgramPipelineiv',
'glGetProgramResourceIndex',
'glGetProgramResourceLocation',
'glGetProgramResourceLocationIndex',
'glGetProgramResourceName',
'glGetProgramResourceiv',
'glGetProgramStageiv',
'glGetProgramiv',
'glGetQueryBufferObjecti64v',
'glGetQueryBufferObjectiv',
'glGetQueryBufferObjectui64v',
'glGetQueryBufferObjectuiv',
'glGetQueryIndexediv',
'glGetQueryObjecti64v',
'glGetQueryObjectiv',
'glGetQueryObjectui64v',
'glGetQueryObjectuiv',
'glGetQueryiv',
'glGetRenderbufferParameteriv',
'glGetRenderbufferParameterivEXT',
'glGetSamplerParameterIiv',
'glGetSamplerParameterIuiv',
'glGetSamplerParameterfv',
'glGetSamplerParameteriv',
'glGetShaderInfoLog',
'glGetShaderPrecisionFormat',
'glGetShaderSource',
'glGetShaderiv',
'glGetString',
'glGetStringi',
'glGetSubroutineIndex',
'glGetSubroutineUniformLocation',
'glGetTexEnvfv',
'glGetTexEnviv',
'glGetTexGendv',
'glGetTexGenfv',
'glGetTexGeniv',
'glGetTexImage',
'glGetTexLevelParameterfv',
'glGetTexLevelParameteriv',
'glGetTexParameterIiv',
'glGetTexParameterIuiv',
'glGetTexParameterfv',
'glGetTexParameteriv',
'glGetTextureImage',
'glGetTextureLevelParameterfv',
'glGetTextureLevelParameteriv',
'glGetTextureParameterIiv',
'glGetTextureParameterIuiv',
'glGetTextureParameterfv',
'glGetTextureParameteriv',
'glGetTextureSubImage',
'glGetTransformFeedbackVarying',
'glGetTransformFeedbacki64_v',
'glGetTransformFeedbacki_v',
'glGetTransformFeedbackiv',
'glGetUniformBlockIndex',
'glGetUniformIndices',
'glGetUniformLocation',
'glGetUniformSubroutineuiv',
'glGetUniformdv',
'glGetUniformfv',
'glGetUniformiv',
'glGetUniformuiv',
'glGetVertexArrayIndexed64iv',
'glGetVertexArrayIndexediv',
'glGetVertexArrayiv',
'glGetVertexAttribIiv',
'glGetVertexAttribIuiv',
'glGetVertexAttribLdv',
'glGetVertexAttribPointerv',
'glGetVertexAttribdv',
'glGetVertexAttribfv',
'glGetVertexAttribiv',
'glGetnColorTable',
'glGetnCompressedTexImage',
'glGetnConvolutionFilter',
'glGetnHistogram',
'glGetnMapdv',
'glGetnMapfv',
'glGetnMapiv',
'glGetnMinmax',
'glGetnPixelMapfv',
'glGetnPixelMapuiv',
'glGetnPixelMapusv',
'glGetnPolygonStipple',
'glGetnSeparableFilter',
'glGetnTexImage',
'glGetnUniformdv',
'glGetnUniformfv',
'glGetnUniformiv',
'glGetnUniformuiv',
'glHint',
'glIndexMask',
'glIndexPointer',
'glIndexd',
'glIndexdv',
'glIndexf',
'glIndexfv',
'glIndexi',
'glIndexiv',
'glIndexs',
'glIndexsv',
'glIndexub',
'glIndexubv',
'glInitNames',
'glInterleavedArrays',
'glInvalidateBufferData',
'glInvalidateBufferSubData',
'glInvalidateFramebuffer',
'glInvalidateNamedFramebufferData',
'glInvalidateNamedFramebufferSubData',
'glInvalidateSubFramebuffer',
'glInvalidateTexImage',
'glInvalidateTexSubImage',
'glIsBuffer',
'glIsEnabled',
'glIsEnabledi',
'glIsFramebuffer',
'glIsFramebufferEXT',
'glIsList',
'glIsProgram',
'glIsProgramPipeline',
'glIsQuery',
'glIsRenderbuffer',
'glIsRenderbufferEXT',
'glIsSampler',
'glIsShader',
'glIsTexture',
'glIsTransformFeedback',
'glIsVertexArray',
'glLightModelf',
'glLightModelfv',
'glLightModeli',
'glLightModeliv',
'glLightf',
'glLightfv',
'glLighti',
'glLightiv',
'glLineStipple',
'glLineWidth',
'glLinkProgram',
'glListBase',
'glLoadIdentity',
'glLoadMatrixd',
'glLoadMatrixf',
'glLoadName',
'glLoadTransposeMatrixd',
'glLoadTransposeMatrixf',
'glLogicOp',
'glMap1d',
'glMap1f',
'glMap2d',
'glMap2f',
'glMapBuffer',
'glMapBufferRange',
'glMapGrid1d',
'glMapGrid1f',
'glMapGrid2d',
'glMapGrid2f',
'glMapNamedBuffer',
'glMapNamedBufferRange',
'glMaterialf',
'glMaterialfv',
'glMateriali',
'glMaterialiv',
'glMatrixMode',
'glMemoryBarrier',
'glMemoryBarrierByRegion',
'glMinSampleShading',
'glMultMatrixd',
'glMultMatrixf',
'glMultTransposeMatrixd',
'glMultTransposeMatrixf',
'glMultiDrawArrays',
'glMultiDrawArraysIndirect',
'glMultiDrawArraysIndirectCount',
'glMultiDrawElements',
'glMultiDrawElementsBaseVertex',
'glMultiDrawElementsIndirect',
'glMultiDrawElementsIndirectCount',
'glMultiTexCoord1d',
'glMultiTexCoord1dv',
'glMultiTexCoord1f',
'glMultiTexCoord1fv',
'glMultiTexCoord1i',
'glMultiTexCoord1iv',
'glMultiTexCoord1s',
'glMultiTexCoord1sv',
'glMultiTexCoord2d',
'glMultiTexCoord2dv',
'glMultiTexCoord2f',
'glMultiTexCoord2fv',
'glMultiTexCoord2i',
'glMultiTexCoord2iv',
'glMultiTexCoord2s',
'glMultiTexCoord2sv',
'glMultiTexCoord3d',
'glMultiTexCoord3dv',
'glMultiTexCoord3f',
'glMultiTexCoord3fv',
'glMultiTexCoord3i',
'glMultiTexCoord3iv',
'glMultiTexCoord3s',
'glMultiTexCoord3sv',
'glMultiTexCoord4d',
'glMultiTexCoord4dv',
'glMultiTexCoord4f',
'glMultiTexCoord4fv',
'glMultiTexCoord4i',
'glMultiTexCoord4iv',
'glMultiTexCoord4s',
'glMultiTexCoord4sv',
'glMultiTexCoordP1ui',
'glMultiTexCoordP1uiv',
'glMultiTexCoordP2ui',
'glMultiTexCoordP2uiv',
'glMultiTexCoordP3ui',
'glMultiTexCoordP3uiv',
'glMultiTexCoordP4ui',
'glMultiTexCoordP4uiv',
'glNamedBufferData',
'glNamedBufferStorage',
'glNamedBufferSubData',
'glNamedFramebufferDrawBuffer',
'glNamedFramebufferDrawBuffers',
'glNamedFramebufferParameteri',
'glNamedFramebufferReadBuffer',
'glNamedFramebufferRenderbuffer',
'glNamedFramebufferTexture',
'glNamedFramebufferTextureLayer',
'glNamedRenderbufferStorage',
'glNamedRenderbufferStorageMultisample',
'glNewList',
'glNormal3b',
'glNormal3bv',
'glNormal3d',
'glNormal3dv',
'glNormal3f',
'glNormal3fv',
'glNormal3i',
'glNormal3iv',
'glNormal3s',
'glNormal3sv',
'glNormalP3ui',
'glNormalP3uiv',
'glNormalPointer',
'glObjectLabel',
'glObjectPtrLabel',
'glOrtho',
'glPassThrough',
'glPatchParameterfv',
'glPatchParameteri',
'glPauseTransformFeedback',
'glPixelMapfv',
'glPixelMapuiv',
'glPixelMapusv',
'glPixelStoref',
'glPixelStorei',
'glPixelTransferf',
'glPixelTransferi',
'glPixelZoom',
'glPointParameterf',
'glPointParameterfv',
'glPointParameteri',
'glPointParameteriv',
'glPointSize',
'glPolygonMode',
'glPolygonOffset',
'glPolygonOffsetClamp',
'glPolygonStipple',
'glPopAttrib',
'glPopClientAttrib',
'glPopDebugGroup',
'glPopMatrix',
'glPopName',
'glPrimitiveRestartIndex',
'glPrioritizeTextures',
'glProgramBinary',
'glProgramParameteri',
'glProgramUniform1d',
'glProgramUniform1dv',
'glProgramUniform1f',
'glProgramUniform1fv',
'glProgramUniform1i',
'glProgramUniform1iv',
'glProgramUniform1ui',
'glProgramUniform1uiv',
'glProgramUniform2d',
'glProgramUniform2dv',
'glProgramUniform2f',
'glProgramUniform2fv',
'glProgramUniform2i',
'glProgramUniform2iv',
'glProgramUniform2ui',
'glProgramUniform2uiv',
'glProgramUniform3d',
'glProgramUniform3dv',
'glProgramUniform3f',
'glProgramUniform3fv',
'glProgramUniform3i',
'glProgramUniform3iv',
'glProgramUniform3ui',
'glProgramUniform3uiv',
'glProgramUniform4d',
'glProgramUniform4dv',
'glProgramUniform4f',
'glProgramUniform4fv',
'glProgramUniform4i',
'glProgramUniform4iv',
'glProgramUniform4ui',
'glProgramUniform4uiv',
'glProgramUniformMatrix2dv',
'glProgramUniformMatrix2fv',
'glProgramUniformMatrix2x3dv',
'glProgramUniformMatrix2x3fv',
'glProgramUniformMatrix2x4dv',
'glProgramUniformMatrix2x4fv',
'glProgramUniformMatrix3dv',
'glProgramUniformMatrix3fv',
'glProgramUniformMatrix3x2dv',
'glProgramUniformMatrix3x2fv',
'glProgramUniformMatrix3x4dv',
'glProgramUniformMatrix3x4fv',
'glProgramUniformMatrix4dv',
'glProgramUniformMatrix4fv',
'glProgramUniformMatrix4x2dv',
'glProgramUniformMatrix4x2fv',
'glProgramUniformMatrix4x3dv',
'glProgramUniformMatrix4x3fv',
'glProvokingVertex',
'glPushAttrib',
'glPushClientAttrib',
'glPushDebugGroup',
'glPushMatrix',
'glPushName',
'glQueryCounter',
'glRasterPos2d',
'glRasterPos2dv',
'glRasterPos2f',
'glRasterPos2fv',
'glRasterPos2i',
'glRasterPos2iv',
'glRasterPos2s',
'glRasterPos2sv',
'glRasterPos3d',
'glRasterPos3dv',
'glRasterPos3f',
'glRasterPos3fv',
'glRasterPos3i',
'glRasterPos3iv',
'glRasterPos3s',
'glRasterPos3sv',
'glRasterPos4d',
'glRasterPos4dv',
'glRasterPos4f',
'glRasterPos4fv',
'glRasterPos4i',
'glRasterPos4iv',
'glRasterPos4s',
'glRasterPos4sv',
'glReadBuffer',
'glReadPixels',
'glReadnPixels',
'glRectd',
'glRectdv',
'glRectf',
'glRectfv',
'glRecti',
'glRectiv',
'glRects',
'glRectsv',
'glReleaseShaderCompiler',
'glRenderMode',
'glRenderbufferStorage',
'glRenderbufferStorageEXT',
'glRenderbufferStorageMultisample',
'glResumeTransformFeedback',
'glRotated',
'glRotatef',
'glSampleCoverage',
'glSampleCoverageARB',
'glSampleMaski',
'glSamplerParameterIiv',
'glSamplerParameterIuiv',
'glSamplerParameterf',
'glSamplerParameterfv',
'glSamplerParameteri',
'glSamplerParameteriv',
'glScaled',
'glScalef',
'glScissor',
'glScissorArrayv',
'glScissorIndexed',
'glScissorIndexedv',
'glSecondaryColor3b',
'glSecondaryColor3bv',
'glSecondaryColor3d',
'glSecondaryColor3dv',
'glSecondaryColor3f',
'glSecondaryColor3fv',
'glSecondaryColor3i',
'glSecondaryColor3iv',
'glSecondaryColor3s',
'glSecondaryColor3sv',
'glSecondaryColor3ub',
'glSecondaryColor3ubv',
'glSecondaryColor3ui',
'glSecondaryColor3uiv',
'glSecondaryColor3us',
'glSecondaryColor3usv',
'glSecondaryColorP3ui',
'glSecondaryColorP3uiv',
'glSecondaryColorPointer',
'glSelectBuffer',
'glShadeModel',
'glShaderBinary',
'glShaderSource',
'glShaderStorageBlockBinding',
'glSpecializeShader',
'glStencilFunc',
'glStencilFuncSeparate',
'glStencilMask',
'glStencilMaskSeparate',
'glStencilOp',
'glStencilOpSeparate',
'glTexBuffer',
'glTexBufferRange',
'glTexCoord1d',
'glTexCoord1dv',
'glTexCoord1f',
'glTexCoord1fv',
'glTexCoord1i',
'glTexCoord1iv',
'glTexCoord1s',
'glTexCoord1sv',
'glTexCoord2d',
'glTexCoord2dv',
'glTexCoord2f',
'glTexCoord2fv',
'glTexCoord2i',
'glTexCoord2iv',
'glTexCoord2s',
'glTexCoord2sv',
'glTexCoord3d',
'glTexCoord3dv',
'glTexCoord3f',
'glTexCoord3fv',
'glTexCoord3i',
'glTexCoord3iv',
'glTexCoord3s',
'glTexCoord3sv',
'glTexCoord4d',
'glTexCoord4dv',
'glTexCoord4f',
'glTexCoord4fv',
'glTexCoord4i',
'glTexCoord4iv',
'glTexCoord4s',
'glTexCoord4sv',
'glTexCoordP1ui',
'glTexCoordP1uiv',
'glTexCoordP2ui',
'glTexCoordP2uiv',
'glTexCoordP3ui',
'glTexCoordP3uiv',
'glTexCoordP4ui',
'glTexCoordP4uiv',
'glTexCoordPointer',
'glTexEnvf',
'glTexEnvfv',
'glTexEnvi',
'glTexEnviv',
'glTexGend',
'glTexGendv',
'glTexGenf',
'glTexGenfv',
'glTexGeni',
'glTexGeniv',
'glTexImage1D',
'glTexImage2D',
'glTexImage2DMultisample',
'glTexImage3D',
'glTexImage3DMultisample',
'glTexParameterIiv',
'glTexParameterIuiv',
'glTexParameterf',
'glTexParameterfv',
'glTexParameteri',
'glTexParameteriv',
'glTexStorage1D',
'glTexStorage2D',
'glTexStorage2DMultisample',
'glTexStorage3D',
'glTexStorage3DMultisample',
'glTexSubImage1D',
'glTexSubImage2D',
'glTexSubImage3D',
'glTextureBarrier',
'glTextureBuffer',
'glTextureBufferRange',
'glTextureParameterIiv',
'glTextureParameterIuiv',
'glTextureParameterf',
'glTextureParameterfv',
'glTextureParameteri',
'glTextureParameteriv',
'glTextureStorage1D',
'glTextureStorage2D',
'glTextureStorage2DMultisample',
'glTextureStorage3D',
'glTextureStorage3DMultisample',
'glTextureSubImage1D',
'glTextureSubImage2D',
'glTextureSubImage3D',
'glTextureView',
'glTransformFeedbackBufferBase',
'glTransformFeedbackBufferRange',
'glTransformFeedbackVaryings',
'glTranslated',
'glTranslatef',
'glUniform1d',
'glUniform1dv',
'glUniform1f',
'glUniform1fv',
'glUniform1i',
'glUniform1iv',
'glUniform1ui',
'glUniform1uiv',
'glUniform2d',
'glUniform2dv',
'glUniform2f',
'glUniform2fv',
'glUniform2i',
'glUniform2iv',
'glUniform2ui',
'glUniform2uiv',
'glUniform3d',
'glUniform3dv',
'glUniform3f',
'glUniform3fv',
'glUniform3i',
'glUniform3iv',
'glUniform3ui',
'glUniform3uiv',
'glUniform4d',
'glUniform4dv',
'glUniform4f',
'glUniform4fv',
'glUniform4i',
'glUniform4iv',
'glUniform4ui',
'glUniform4uiv',
'glUniformBlockBinding',
'glUniformMatrix2dv',
'glUniformMatrix2fv',
'glUniformMatrix2x3dv',
'glUniformMatrix2x3fv',
'glUniformMatrix2x4dv',
'glUniformMatrix2x4fv',
'glUniformMatrix3dv',
'glUniformMatrix3fv',
'glUniformMatrix3x2dv',
'glUniformMatrix3x2fv',
'glUniformMatrix3x4dv',
'glUniformMatrix3x4fv',
'glUniformMatrix4dv',
'glUniformMatrix4fv',
'glUniformMatrix4x2dv',
'glUniformMatrix4x2fv',
'glUniformMatrix4x3dv',
'glUniformMatrix4x3fv',
'glUniformSubroutinesuiv',
'glUnmapBuffer',
'glUnmapNamedBuffer',
'glUseProgram',
'glUseProgramStages',
'glValidateProgram',
'glValidateProgramPipeline',
'glVertex2d',
'glVertex2dv',
'glVertex2f',
'glVertex2fv',
'glVertex2i',
'glVertex2iv',
'glVertex2s',
'glVertex2sv',
'glVertex3d',
'glVertex3dv',
'glVertex3f',
'glVertex3fv',
'glVertex3i',
'glVertex3iv',
'glVertex3s',
'glVertex3sv',
'glVertex4d',
'glVertex4dv',
'glVertex4f',
'glVertex4fv',
'glVertex4i',
'glVertex4iv',
'glVertex4s',
'glVertex4sv',
'glVertexArrayAttribBinding',
'glVertexArrayAttribFormat',
'glVertexArrayAttribIFormat',
'glVertexArrayAttribLFormat',
'glVertexArrayBindingDivisor',
'glVertexArrayElementBuffer',
'glVertexArrayVertexBuffer',
'glVertexArrayVertexBuffers',
'glVertexAttrib1d',
'glVertexAttrib1dv',
'glVertexAttrib1f',
'glVertexAttrib1fv',
'glVertexAttrib1s',
'glVertexAttrib1sv',
'glVertexAttrib2d',
'glVertexAttrib2dv',
'glVertexAttrib2f',
'glVertexAttrib2fv',
'glVertexAttrib2s',
'glVertexAttrib2sv',
'glVertexAttrib3d',
'glVertexAttrib3dv',
'glVertexAttrib3f',
'glVertexAttrib3fv',
'glVertexAttrib3s',
'glVertexAttrib3sv',
'glVertexAttrib4Nbv',
'glVertexAttrib4Niv',
'glVertexAttrib4Nsv',
'glVertexAttrib4Nub',
'glVertexAttrib4Nubv',
'glVertexAttrib4Nuiv',
'glVertexAttrib4Nusv',
'glVertexAttrib4bv',
'glVertexAttrib4d',
'glVertexAttrib4dv',
'glVertexAttrib4f',
'glVertexAttrib4fv',
'glVertexAttrib4iv',
'glVertexAttrib4s',
'glVertexAttrib4sv',
'glVertexAttrib4ubv',
'glVertexAttrib4uiv',
'glVertexAttrib4usv',
'glVertexAttribBinding',
'glVertexAttribDivisor',
'glVertexAttribFormat',
'glVertexAttribI1i',
'glVertexAttribI1iv',
'glVertexAttribI1ui',
'glVertexAttribI1uiv',
'glVertexAttribI2i',
'glVertexAttribI2iv',
'glVertexAttribI2ui',
'glVertexAttribI2uiv',
'glVertexAttribI3i',
'glVertexAttribI3iv',
'glVertexAttribI3ui',
'glVertexAttribI3uiv',
'glVertexAttribI4bv',
'glVertexAttribI4i',
'glVertexAttribI4iv',
'glVertexAttribI4sv',
'glVertexAttribI4ubv',
'glVertexAttribI4ui',
'glVertexAttribI4uiv',
'glVertexAttribI4usv',
'glVertexAttribIFormat',
'glVertexAttribIPointer',
'glVertexAttribL1d',
'glVertexAttribL1dv',
'glVertexAttribL2d',
'glVertexAttribL2dv',
'glVertexAttribL3d',
'glVertexAttribL3dv',
'glVertexAttribL4d',
'glVertexAttribL4dv',
'glVertexAttribLFormat',
'glVertexAttribLPointer',
'glVertexAttribP1ui',
'glVertexAttribP1uiv',
'glVertexAttribP2ui',
'glVertexAttribP2uiv',
'glVertexAttribP3ui',
'glVertexAttribP3uiv',
'glVertexAttribP4ui',
'glVertexAttribP4uiv',
'glVertexAttribPointer',
'glVertexBindingDivisor',
'glVertexP2ui',
'glVertexP2uiv',
'glVertexP3ui',
'glVertexP3uiv',
'glVertexP4ui',
'glVertexP4uiv',
'glVertexPointer',
'glViewport',
'glViewportArrayv',
'glViewportIndexedf',
'glViewportIndexedfv',
'glWindowPos2d',
'glWindowPos2dv',
'glWindowPos2f',
'glWindowPos2fv',
'glWindowPos2i',
'glWindowPos2iv',
'glWindowPos2s',
'glWindowPos2sv',
'glWindowPos3d',
'glWindowPos3dv',
'glWindowPos3f',
'glWindowPos3fv',
'glWindowPos3i',
'glWindowPos3iv',
'glWindowPos3s',
'glWindowPos3sv',
]
| []
| []
| []
| [] | [] | python | null | null | null |
src/main/java/se/kth/dd2480/grp25/ci/NotifyJob.java | package se.kth.dd2480.grp25.ci;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.Optional;
/** A job for notifying Github about builds and testing. */
public class NotifyJob implements Runnable {
private Event event;
private EventQueue queue;
private String token = "";
public static class Examiner extends JobExaminer {
public Examiner(EventQueue queue) {
super(queue);
}
@Override
public Optional<Runnable> offer(Event event) {
if (event.getType() == Event.Type.TEST || event.getType() == Event.Type.BUILD) {
return Optional.of(new NotifyJob(event, super.queue));
} else {
return Optional.empty();
}
}
}
/**
* Create an instance of {@linkplain NotifyJob}.
*
* @param event the event that this job should process.
* @param queue the queue that this job may append new events to.
*/
public NotifyJob(Event event, EventQueue queue) {
this.event = event;
this.queue = queue;
}
@Override
public void run() {
try {
token = System.getenv("TOKEN");
if (token == null) {
Event notifyEvent =
new Event(event.getId(), Event.Type.NOTIFY, Event.Status.FAIL, "Invalid token.");
queue.insert(notifyEvent);
return;
}
String buildSuccessful = "\"All tests passed!\"";
String buildFail = "\"Build fail\"";
String testFail = "\"Tests failed\"";
URL url =
new URL(
"https://api.github.com/repos/DD2480-Project-group-25/Continuous-Integration/statuses/"
+ event.getId()
+ "?access_token="
+ token);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestProperty("Content-Type", "application/json");
conn.setDoOutput(true);
try (OutputStream os = conn.getOutputStream()) {
String params = "";
Event notifyEvent = null;
if (event.getType() == Event.Type.BUILD) {
if (event.getStatus() == Event.Status.FAIL) {
params = createParams(buildFail, "failure");
notifyEvent =
new Event(
event.getId(),
Event.Type.NOTIFY,
Event.Status.SUCCESSFUL,
"Notified that build failed.",
event);
} else {
// We don't want to notify the user about a successful build
return;
}
}
if (event.getType() == Event.Type.TEST) {
if (event.getStatus() == Event.Status.SUCCESSFUL) {
params = createParams(buildSuccessful, "success");
notifyEvent =
new Event(
event.getId(),
Event.Type.NOTIFY,
Event.Status.SUCCESSFUL,
"Notified that build was successful.",
event);
} else {
params = createParams(testFail, "failure");
notifyEvent =
new Event(
event.getId(),
Event.Type.NOTIFY,
Event.Status.SUCCESSFUL,
"Notified that tests failed.",
event);
}
}
os.write(params.getBytes(StandardCharsets.UTF_8));
if (conn.getResponseCode() == 422) {
queue.insert(
new Event(
event.getId(),
Event.Type.NOTIFY,
Event.Status.FAIL,
"Unprocessable entity",
event));
} else {
queue.insert(notifyEvent);
}
os.flush();
}
} catch (Exception e) {
System.out.println(e);
}
}
/**
* Create a notify message to github. Uses hardcoded URL to server, should be updated in
* production
*
* @param message to github notification
* @param state of the notification
* @return properly formatted github notification
*/
private String createParams(String message, String state) {
String params =
"{\"state\":\""
+ state
+ "\",\"target_url\":\"http://157.230.31.10:8080/logs/"
+ event.getId()
+ "\",\"description\":"
+ message
+ ",\"context\":\"DD2480 CI\"}";
return params;
}
}
| [
"\"TOKEN\""
]
| []
| [
"TOKEN"
]
| [] | ["TOKEN"] | java | 1 | 0 | |
libpod/oci_conmon_linux.go | //go:build linux
// +build linux
package libpod
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"text/template"
"time"
"github.com/containers/common/pkg/cgroups"
"github.com/containers/common/pkg/config"
conmonConfig "github.com/containers/conmon/runner/config"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/logs"
"github.com/containers/podman/v4/pkg/checkpoint/crutils"
"github.com/containers/podman/v4/pkg/errorhandling"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/specgenutil"
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/podman/v4/utils"
"github.com/containers/storage/pkg/homedir"
pmount "github.com/containers/storage/pkg/mount"
"github.com/coreos/go-systemd/v22/daemon"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
const (
// This is Conmon's STDIO_BUF_SIZE. I don't believe we have access to it
// directly from the Go code, so const it here
// Important: The conmon attach socket uses an extra byte at the beginning of each
// message to specify the STREAM so we have to increase the buffer size by one
bufferSize = conmonConfig.BufSize + 1
)
// ConmonOCIRuntime is an OCI runtime managed by Conmon.
// TODO: Make all calls to OCI runtime have a timeout.
type ConmonOCIRuntime struct {
name string
path string
conmonPath string
conmonEnv []string
tmpDir string
exitsDir string
logSizeMax int64
noPivot bool
reservePorts bool
runtimeFlags []string
supportsJSON bool
supportsKVM bool
supportsNoCgroups bool
enableKeyring bool
}
// Make a new Conmon-based OCI runtime with the given options.
// Conmon will wrap the given OCI runtime, which can be `runc`, `crun`, or
// any runtime with a runc-compatible CLI.
// The first path that points to a valid executable will be used.
// Deliberately private. Someone should not be able to construct this outside of
// libpod.
func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeFlags []string, runtimeCfg *config.Config) (OCIRuntime, error) {
if name == "" {
return nil, errors.Wrapf(define.ErrInvalidArg, "the OCI runtime must be provided a non-empty name")
}
// Make lookup tables for runtime support
supportsJSON := make(map[string]bool, len(runtimeCfg.Engine.RuntimeSupportsJSON))
supportsNoCgroups := make(map[string]bool, len(runtimeCfg.Engine.RuntimeSupportsNoCgroups))
supportsKVM := make(map[string]bool, len(runtimeCfg.Engine.RuntimeSupportsKVM))
for _, r := range runtimeCfg.Engine.RuntimeSupportsJSON {
supportsJSON[r] = true
}
for _, r := range runtimeCfg.Engine.RuntimeSupportsNoCgroups {
supportsNoCgroups[r] = true
}
for _, r := range runtimeCfg.Engine.RuntimeSupportsKVM {
supportsKVM[r] = true
}
runtime := new(ConmonOCIRuntime)
runtime.name = name
runtime.conmonPath = conmonPath
runtime.runtimeFlags = runtimeFlags
runtime.conmonEnv = runtimeCfg.Engine.ConmonEnvVars
runtime.tmpDir = runtimeCfg.Engine.TmpDir
runtime.logSizeMax = runtimeCfg.Containers.LogSizeMax
runtime.noPivot = runtimeCfg.Engine.NoPivotRoot
runtime.reservePorts = runtimeCfg.Engine.EnablePortReservation
runtime.enableKeyring = runtimeCfg.Containers.EnableKeyring
// TODO: probe OCI runtime for feature and enable automatically if
// available.
base := filepath.Base(name)
runtime.supportsJSON = supportsJSON[base]
runtime.supportsNoCgroups = supportsNoCgroups[base]
runtime.supportsKVM = supportsKVM[base]
foundPath := false
for _, path := range paths {
stat, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
continue
}
return nil, errors.Wrapf(err, "cannot stat OCI runtime %s path", name)
}
if !stat.Mode().IsRegular() {
continue
}
foundPath = true
logrus.Tracef("found runtime %q", runtime.path)
runtime.path = path
break
}
// Search the $PATH as last fallback
if !foundPath {
if foundRuntime, err := exec.LookPath(name); err == nil {
foundPath = true
runtime.path = foundRuntime
logrus.Debugf("using runtime %q from $PATH: %q", name, foundRuntime)
}
}
if !foundPath {
return nil, errors.Wrapf(define.ErrInvalidArg, "no valid executable found for OCI runtime %s", name)
}
runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits")
// Create the exit files and attach sockets directories
if err := os.MkdirAll(runtime.exitsDir, 0750); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return nil, errors.Wrapf(err, "error creating OCI runtime exit files directory")
}
}
return runtime, nil
}
// Name returns the name of the runtime being wrapped by Conmon.
func (r *ConmonOCIRuntime) Name() string {
return r.name
}
// Path returns the path of the OCI runtime being wrapped by Conmon.
func (r *ConmonOCIRuntime) Path() string {
return r.path
}
// hasCurrentUserMapped checks whether the current user is mapped inside the container user namespace
func hasCurrentUserMapped(ctr *Container) bool {
if len(ctr.config.IDMappings.UIDMap) == 0 && len(ctr.config.IDMappings.GIDMap) == 0 {
return true
}
uid := os.Geteuid()
for _, m := range ctr.config.IDMappings.UIDMap {
if uid >= m.HostID && uid < m.HostID+m.Size {
return true
}
}
return false
}
// CreateContainer creates a container.
func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) {
// always make the run dir accessible to the current user so that the PID files can be read without
// being in the rootless user namespace.
if err := makeAccessible(ctr.state.RunDir, 0, 0); err != nil {
return 0, err
}
if !hasCurrentUserMapped(ctr) {
for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.Engine.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.Engine.VolumePath} {
if err := makeAccessible(i, ctr.RootUID(), ctr.RootGID()); err != nil {
return 0, err
}
}
// if we are running a non privileged container, be sure to umount some kernel paths so they are not
// bind mounted inside the container at all.
if !ctr.config.Privileged && !rootless.IsRootless() {
type result struct {
restoreDuration int64
err error
}
ch := make(chan result)
go func() {
runtime.LockOSThread()
restoreDuration, err := func() (int64, error) {
fd, err := os.Open(fmt.Sprintf("/proc/%d/task/%d/ns/mnt", os.Getpid(), unix.Gettid()))
if err != nil {
return 0, err
}
defer errorhandling.CloseQuiet(fd)
// create a new mountns on the current thread
if err = unix.Unshare(unix.CLONE_NEWNS); err != nil {
return 0, err
}
defer func() {
if err := unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS); err != nil {
logrus.Errorf("Unable to clone new namespace: %q", err)
}
}()
// don't spread our mounts around. We are setting only /sys to be slave
// so that the cleanup process is still able to umount the storage and the
// changes are propagated to the host.
err = unix.Mount("/sys", "/sys", "none", unix.MS_REC|unix.MS_SLAVE, "")
if err != nil {
return 0, errors.Wrapf(err, "cannot make /sys slave")
}
mounts, err := pmount.GetMounts()
if err != nil {
return 0, err
}
for _, m := range mounts {
if !strings.HasPrefix(m.Mountpoint, "/sys/kernel") {
continue
}
err = unix.Unmount(m.Mountpoint, 0)
if err != nil && !os.IsNotExist(err) {
return 0, errors.Wrapf(err, "cannot unmount %s", m.Mountpoint)
}
}
return r.createOCIContainer(ctr, restoreOptions)
}()
ch <- result{
restoreDuration: restoreDuration,
err: err,
}
}()
r := <-ch
return r.restoreDuration, r.err
}
}
return r.createOCIContainer(ctr, restoreOptions)
}
// UpdateContainerStatus retrieves the current status of the container from the
// runtime. It updates the container's state but does not save it.
// If useRuntime is false, we will not directly hit runc to see the container's
// status, but will instead only check for the existence of the conmon exit file
// and update state to stopped if it exists.
func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
exitFile, err := r.ExitFilePath(ctr)
if err != nil {
return err
}
runtimeDir, err := util.GetRuntimeDir()
if err != nil {
return err
}
// Store old state so we know if we were already stopped
oldState := ctr.state.State
state := new(spec.State)
cmd := exec.Command(r.path, "state", ctr.ID())
cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
outPipe, err := cmd.StdoutPipe()
if err != nil {
return errors.Wrapf(err, "getting stdout pipe")
}
errPipe, err := cmd.StderrPipe()
if err != nil {
return errors.Wrapf(err, "getting stderr pipe")
}
if err := cmd.Start(); err != nil {
out, err2 := ioutil.ReadAll(errPipe)
if err2 != nil {
return errors.Wrapf(err, "error getting container %s state", ctr.ID())
}
if strings.Contains(string(out), "does not exist") || strings.Contains(string(out), "No such file") {
if err := ctr.removeConmonFiles(); err != nil {
logrus.Debugf("unable to remove conmon files for container %s", ctr.ID())
}
ctr.state.ExitCode = -1
ctr.state.FinishedTime = time.Now()
ctr.state.State = define.ContainerStateExited
return nil
}
return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out)
}
defer func() {
_ = cmd.Wait()
}()
if err := errPipe.Close(); err != nil {
return err
}
out, err := ioutil.ReadAll(outPipe)
if err != nil {
return errors.Wrapf(err, "error reading stdout: %s", ctr.ID())
}
if err := json.NewDecoder(bytes.NewBuffer(out)).Decode(state); err != nil {
return errors.Wrapf(err, "error decoding container status for container %s", ctr.ID())
}
ctr.state.PID = state.Pid
switch state.Status {
case "created":
ctr.state.State = define.ContainerStateCreated
case "paused":
ctr.state.State = define.ContainerStatePaused
case "running":
ctr.state.State = define.ContainerStateRunning
case "stopped":
ctr.state.State = define.ContainerStateStopped
default:
return errors.Wrapf(define.ErrInternal, "unrecognized status returned by runtime for container %s: %s",
ctr.ID(), state.Status)
}
// Only grab exit status if we were not already stopped
// If we were, it should already be in the database
if ctr.state.State == define.ContainerStateStopped && oldState != define.ContainerStateStopped {
var fi os.FileInfo
chWait := make(chan error)
defer close(chWait)
_, err := WaitForFile(exitFile, chWait, time.Second*5)
if err == nil {
fi, err = os.Stat(exitFile)
}
if err != nil {
ctr.state.ExitCode = -1
ctr.state.FinishedTime = time.Now()
logrus.Errorf("No exit file for container %s found: %v", ctr.ID(), err)
return nil
}
return ctr.handleExitFile(exitFile, fi)
}
// Handle ContainerStateStopping - keep it unless the container
// transitioned to no longer running.
if oldState == define.ContainerStateStopping && (ctr.state.State == define.ContainerStatePaused || ctr.state.State == define.ContainerStateRunning) {
ctr.state.State = define.ContainerStateStopping
}
return nil
}
// StartContainer starts the given container.
// Sets time the container was started, but does not save it.
func (r *ConmonOCIRuntime) StartContainer(ctr *Container) error {
// TODO: streams should probably *not* be our STDIN/OUT/ERR - redirect to buffers?
runtimeDir, err := util.GetRuntimeDir()
if err != nil {
return err
}
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
if path, ok := os.LookupEnv("PATH"); ok {
env = append(env, fmt.Sprintf("PATH=%s", path))
}
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, append(r.runtimeFlags, "start", ctr.ID())...); err != nil {
return err
}
ctr.state.StartedTime = time.Now()
return nil
}
// KillContainer sends the given signal to the given container.
// If all is set, send to all PIDs in the container.
// All is only supported if the container created cgroups.
func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool) error {
logrus.Debugf("Sending signal %d to container %s", signal, ctr.ID())
runtimeDir, err := util.GetRuntimeDir()
if err != nil {
return err
}
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
var args []string
args = append(args, r.runtimeFlags...)
if all {
args = append(args, "kill", "--all", ctr.ID(), fmt.Sprintf("%d", signal))
} else {
args = append(args, "kill", ctr.ID(), fmt.Sprintf("%d", signal))
}
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, args...); err != nil {
// Update container state - there's a chance we failed because
// the container exited in the meantime.
if err2 := r.UpdateContainerStatus(ctr); err2 != nil {
logrus.Infof("Error updating status for container %s: %v", ctr.ID(), err2)
}
if ctr.state.State == define.ContainerStateExited {
return nil
}
return errors.Wrapf(err, "error sending signal to container %s", ctr.ID())
}
return nil
}
// StopContainer stops a container, first using its given stop signal (or
// SIGTERM if no signal was specified), then using SIGKILL.
// Timeout is given in seconds. If timeout is 0, the container will be
// immediately kill with SIGKILL.
// Does not set finished time for container, assumes you will run updateStatus
// after to pull the exit code.
func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool) error {
logrus.Debugf("Stopping container %s (PID %d)", ctr.ID(), ctr.state.PID)
// Ping the container to see if it's alive
// If it's not, it's already stopped, return
err := unix.Kill(ctr.state.PID, 0)
if err == unix.ESRCH {
return nil
}
stopSignal := ctr.config.StopSignal
if stopSignal == 0 {
stopSignal = uint(syscall.SIGTERM)
}
if timeout > 0 {
if err := r.KillContainer(ctr, stopSignal, all); err != nil {
// Is the container gone?
// If so, it probably died between the first check and
// our sending the signal
// The container is stopped, so exit cleanly
err := unix.Kill(ctr.state.PID, 0)
if err == unix.ESRCH {
return nil
}
return err
}
if err := waitContainerStop(ctr, time.Duration(timeout)*time.Second); err != nil {
logrus.Debugf("Timed out stopping container %s with %s, resorting to SIGKILL: %v", ctr.ID(), unix.SignalName(syscall.Signal(stopSignal)), err)
logrus.Warnf("StopSignal %s failed to stop container %s in %d seconds, resorting to SIGKILL", unix.SignalName(syscall.Signal(stopSignal)), ctr.Name(), timeout)
} else {
// No error, the container is dead
return nil
}
}
if err := r.KillContainer(ctr, 9, all); err != nil {
// Again, check if the container is gone. If it is, exit cleanly.
err := unix.Kill(ctr.state.PID, 0)
if err == unix.ESRCH {
return nil
}
return errors.Wrapf(err, "error sending SIGKILL to container %s", ctr.ID())
}
// Give runtime a few seconds to make it happen
if err := waitContainerStop(ctr, killContainerTimeout); err != nil {
return err
}
return nil
}
// DeleteContainer deletes a container from the OCI runtime.
func (r *ConmonOCIRuntime) DeleteContainer(ctr *Container) error {
runtimeDir, err := util.GetRuntimeDir()
if err != nil {
return err
}
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, append(r.runtimeFlags, "delete", "--force", ctr.ID())...)
}
// PauseContainer pauses the given container.
func (r *ConmonOCIRuntime) PauseContainer(ctr *Container) error {
runtimeDir, err := util.GetRuntimeDir()
if err != nil {
return err
}
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, append(r.runtimeFlags, "pause", ctr.ID())...)
}
// UnpauseContainer unpauses the given container.
func (r *ConmonOCIRuntime) UnpauseContainer(ctr *Container) error {
runtimeDir, err := util.GetRuntimeDir()
if err != nil {
return err
}
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, append(r.runtimeFlags, "resume", ctr.ID())...)
}
// HTTPAttach performs an attach for the HTTP API.
// The caller must handle closing the HTTP connection after this returns.
// The cancel channel is not closed; it is up to the caller to do so after
// this function returns.
// If this is a container with a terminal, we will stream raw. If it is not, we
// will stream with an 8-byte header to multiplex STDOUT and STDERR.
// Returns any errors that occurred, and whether the connection was successfully
// hijacked before that error occurred.
func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, streamAttach, streamLogs bool) (deferredErr error) {
isTerminal := false
if ctr.config.Spec.Process != nil {
isTerminal = ctr.config.Spec.Process.Terminal
}
if streams != nil {
if !streams.Stdin && !streams.Stdout && !streams.Stderr {
return errors.Wrapf(define.ErrInvalidArg, "must specify at least one stream to attach to")
}
}
attachSock, err := r.AttachSocketPath(ctr)
if err != nil {
return err
}
var conn *net.UnixConn
if streamAttach {
newConn, err := openUnixSocket(attachSock)
if err != nil {
return errors.Wrapf(err, "failed to connect to container's attach socket: %v", attachSock)
}
conn = newConn
defer func() {
if err := conn.Close(); err != nil {
logrus.Errorf("Unable to close container %s attach socket: %q", ctr.ID(), err)
}
}()
logrus.Debugf("Successfully connected to container %s attach socket %s", ctr.ID(), attachSock)
}
detachString := ctr.runtime.config.Engine.DetachKeys
if detachKeys != nil {
detachString = *detachKeys
}
detach, err := processDetachKeys(detachString)
if err != nil {
return err
}
attachStdout := true
attachStderr := true
attachStdin := true
if streams != nil {
attachStdout = streams.Stdout
attachStderr = streams.Stderr
attachStdin = streams.Stdin
}
logrus.Debugf("Going to hijack container %s attach connection", ctr.ID())
// Alright, let's hijack.
hijacker, ok := w.(http.Hijacker)
if !ok {
return errors.Errorf("unable to hijack connection")
}
httpCon, httpBuf, err := hijacker.Hijack()
if err != nil {
return errors.Wrapf(err, "error hijacking connection")
}
hijackDone <- true
writeHijackHeader(req, httpBuf)
// Force a flush after the header is written.
if err := httpBuf.Flush(); err != nil {
return errors.Wrapf(err, "error flushing HTTP hijack header")
}
defer func() {
hijackWriteErrorAndClose(deferredErr, ctr.ID(), isTerminal, httpCon, httpBuf)
}()
logrus.Debugf("Hijack for container %s attach session done, ready to stream", ctr.ID())
// TODO: This is gross. Really, really gross.
// I want to say we should read all the logs into an array before
// calling this, in container_api.go, but that could take a lot of
// memory...
// On the whole, we need to figure out a better way of doing this,
// though.
logSize := 0
if streamLogs {
logrus.Debugf("Will stream logs for container %s attach session", ctr.ID())
// Get all logs for the container
logChan := make(chan *logs.LogLine)
logOpts := new(logs.LogOptions)
logOpts.Tail = -1
logOpts.WaitGroup = new(sync.WaitGroup)
errChan := make(chan error)
go func() {
var err error
// In non-terminal mode we need to prepend with the
// stream header.
logrus.Debugf("Writing logs for container %s to HTTP attach", ctr.ID())
for logLine := range logChan {
if !isTerminal {
device := logLine.Device
var header []byte
headerLen := uint32(len(logLine.Msg))
logSize += len(logLine.Msg)
switch strings.ToLower(device) {
case "stdin":
header = makeHTTPAttachHeader(0, headerLen)
case "stdout":
header = makeHTTPAttachHeader(1, headerLen)
case "stderr":
header = makeHTTPAttachHeader(2, headerLen)
default:
logrus.Errorf("Unknown device for log line: %s", device)
header = makeHTTPAttachHeader(1, headerLen)
}
_, err = httpBuf.Write(header)
if err != nil {
break
}
}
_, err = httpBuf.Write([]byte(logLine.Msg))
if err != nil {
break
}
if !logLine.Partial() {
_, err = httpBuf.Write([]byte("\n"))
if err != nil {
break
}
}
err = httpBuf.Flush()
if err != nil {
break
}
}
errChan <- err
}()
if err := ctr.ReadLog(context.Background(), logOpts, logChan); err != nil {
return err
}
go func() {
logOpts.WaitGroup.Wait()
close(logChan)
}()
logrus.Debugf("Done reading logs for container %s, %d bytes", ctr.ID(), logSize)
if err := <-errChan; err != nil {
return err
}
}
if !streamAttach {
logrus.Debugf("Done streaming logs for container %s attach, exiting as attach streaming not requested", ctr.ID())
return nil
}
logrus.Debugf("Forwarding attach output for container %s", ctr.ID())
stdoutChan := make(chan error)
stdinChan := make(chan error)
// Handle STDOUT/STDERR
go func() {
var err error
if isTerminal {
// Hack: return immediately if attachStdout not set to
// emulate Docker.
// Basically, when terminal is set, STDERR goes nowhere.
// Everything does over STDOUT.
// Therefore, if not attaching STDOUT - we'll never copy
// anything from here.
logrus.Debugf("Performing terminal HTTP attach for container %s", ctr.ID())
if attachStdout {
err = httpAttachTerminalCopy(conn, httpBuf, ctr.ID())
}
} else {
logrus.Debugf("Performing non-terminal HTTP attach for container %s", ctr.ID())
err = httpAttachNonTerminalCopy(conn, httpBuf, ctr.ID(), attachStdin, attachStdout, attachStderr)
}
stdoutChan <- err
logrus.Debugf("STDOUT/ERR copy completed")
}()
// Next, STDIN. Avoid entirely if attachStdin unset.
if attachStdin {
go func() {
_, err := utils.CopyDetachable(conn, httpBuf, detach)
logrus.Debugf("STDIN copy completed")
stdinChan <- err
}()
}
for {
select {
case err := <-stdoutChan:
if err != nil {
return err
}
return nil
case err := <-stdinChan:
if err != nil {
return err
}
// copy stdin is done, close it
if connErr := conn.CloseWrite(); connErr != nil {
logrus.Errorf("Unable to close conn: %v", connErr)
}
case <-cancel:
return nil
}
}
}
// isRetryable returns whether the error was caused by a blocked syscall or the
// specified operation on a non blocking file descriptor wasn't ready for completion.
func isRetryable(err error) bool {
if errno, isErrno := errors.Cause(err).(syscall.Errno); isErrno {
return errno == syscall.EINTR || errno == syscall.EAGAIN
}
return false
}
// openControlFile opens the terminal control file.
func openControlFile(ctr *Container, parentDir string) (*os.File, error) {
controlPath := filepath.Join(parentDir, "ctl")
for i := 0; i < 600; i++ {
controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY|unix.O_NONBLOCK, 0)
if err == nil {
return controlFile, nil
}
if !isRetryable(err) {
return nil, errors.Wrapf(err, "could not open ctl file for terminal resize for container %s", ctr.ID())
}
time.Sleep(time.Second / 10)
}
return nil, errors.Errorf("timeout waiting for %q", controlPath)
}
// AttachResize resizes the terminal used by the given container.
func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize define.TerminalSize) error {
controlFile, err := openControlFile(ctr, ctr.bundlePath())
if err != nil {
return err
}
defer controlFile.Close()
logrus.Debugf("Received a resize event for container %s: %+v", ctr.ID(), newSize)
if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
}
return nil
}
// CheckpointContainer checkpoints the given container.
func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) (int64, error) {
// imagePath is used by CRIU to store the actual checkpoint files
imagePath := ctr.CheckpointPath()
if options.PreCheckPoint {
imagePath = ctr.PreCheckPointPath()
}
// workPath will be used to store dump.log and stats-dump
workPath := ctr.bundlePath()
logrus.Debugf("Writing checkpoint to %s", imagePath)
logrus.Debugf("Writing checkpoint logs to %s", workPath)
logrus.Debugf("Pre-dump the container %t", options.PreCheckPoint)
args := []string{}
args = append(args, r.runtimeFlags...)
args = append(args, "checkpoint")
args = append(args, "--image-path")
args = append(args, imagePath)
args = append(args, "--work-path")
args = append(args, workPath)
if options.KeepRunning {
args = append(args, "--leave-running")
}
if options.TCPEstablished {
args = append(args, "--tcp-established")
}
if options.FileLocks {
args = append(args, "--file-locks")
}
if !options.PreCheckPoint && options.KeepRunning {
args = append(args, "--leave-running")
}
if options.PreCheckPoint {
args = append(args, "--pre-dump")
}
if !options.PreCheckPoint && options.WithPrevious {
args = append(
args,
"--parent-path",
filepath.Join("..", preCheckpointDir),
)
}
args = append(args, ctr.ID())
logrus.Debugf("the args to checkpoint: %s %s", r.path, strings.Join(args, " "))
runtimeDir, err := util.GetRuntimeDir()
if err != nil {
return 0, err
}
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
if path, ok := os.LookupEnv("PATH"); ok {
env = append(env, fmt.Sprintf("PATH=%s", path))
}
runtime.LockOSThread()
if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil {
return 0, err
}
runtimeCheckpointStarted := time.Now()
err = utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, args...)
// Ignore error returned from SetSocketLabel("") call,
// can't recover.
if labelErr := label.SetSocketLabel(""); labelErr == nil {
// Unlock the thread only if the process label could be restored
// successfully. Otherwise leave the thread locked and the Go runtime
// will terminate it once it returns to the threads pool.
runtime.UnlockOSThread()
} else {
logrus.Errorf("Unable to reset socket label: %q", labelErr)
}
runtimeCheckpointDuration := func() int64 {
if options.PrintStats {
return time.Since(runtimeCheckpointStarted).Microseconds()
}
return 0
}()
return runtimeCheckpointDuration, err
}
func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) {
if ctr.state.ConmonPID == 0 {
// If the container is running or paused, assume Conmon is
// running. We didn't record Conmon PID on some old versions, so
// that is likely what's going on...
// Unusual enough that we should print a warning message though.
if ctr.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
logrus.Warnf("Conmon PID is not set, but container is running!")
return true, nil
}
// Container's not running, so conmon PID being unset is
// expected. Conmon is not running.
return false, nil
}
// We have a conmon PID. Ping it with signal 0.
if err := unix.Kill(ctr.state.ConmonPID, 0); err != nil {
if err == unix.ESRCH {
return false, nil
}
return false, errors.Wrapf(err, "error pinging container %s conmon with signal 0", ctr.ID())
}
return true, nil
}
// SupportsCheckpoint checks if the OCI runtime supports checkpointing
// containers.
func (r *ConmonOCIRuntime) SupportsCheckpoint() bool {
return crutils.CRRuntimeSupportsCheckpointRestore(r.path)
}
// SupportsJSONErrors checks if the OCI runtime supports JSON-formatted error
// messages.
func (r *ConmonOCIRuntime) SupportsJSONErrors() bool {
return r.supportsJSON
}
// SupportsNoCgroups checks if the OCI runtime supports running containers
// without cgroups (the --cgroup-manager=disabled flag).
func (r *ConmonOCIRuntime) SupportsNoCgroups() bool {
return r.supportsNoCgroups
}
// SupportsKVM checks if the OCI runtime supports running containers
// without KVM separation
func (r *ConmonOCIRuntime) SupportsKVM() bool {
return r.supportsKVM
}
// AttachSocketPath is the path to a single container's attach socket.
func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) {
if ctr == nil {
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get attach socket path")
}
return filepath.Join(ctr.bundlePath(), "attach"), nil
}
// ExitFilePath is the path to a container's exit file.
func (r *ConmonOCIRuntime) ExitFilePath(ctr *Container) (string, error) {
if ctr == nil {
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get exit file path")
}
return filepath.Join(r.exitsDir, ctr.ID()), nil
}
// RuntimeInfo provides information on the runtime.
func (r *ConmonOCIRuntime) RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntimeInfo, error) {
runtimePackage := packageVersion(r.path)
conmonPackage := packageVersion(r.conmonPath)
runtimeVersion, err := r.getOCIRuntimeVersion()
if err != nil {
return nil, nil, errors.Wrapf(err, "error getting version of OCI runtime %s", r.name)
}
conmonVersion, err := r.getConmonVersion()
if err != nil {
return nil, nil, errors.Wrapf(err, "error getting conmon version")
}
conmon := define.ConmonInfo{
Package: conmonPackage,
Path: r.conmonPath,
Version: conmonVersion,
}
ocirt := define.OCIRuntimeInfo{
Name: r.name,
Path: r.path,
Package: runtimePackage,
Version: runtimeVersion,
}
return &conmon, &ocirt, nil
}
// makeAccessible changes the path permission and each parent directory to have --x--x--x
func makeAccessible(path string, uid, gid int) error {
for ; path != "/"; path = filepath.Dir(path) {
st, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
if int(st.Sys().(*syscall.Stat_t).Uid) == uid && int(st.Sys().(*syscall.Stat_t).Gid) == gid {
continue
}
if st.Mode()&0111 != 0111 {
if err := os.Chmod(path, st.Mode()|0111); err != nil {
return err
}
}
}
return nil
}
// Wait for a container which has been sent a signal to stop
func waitContainerStop(ctr *Container, timeout time.Duration) error {
return waitPidStop(ctr.state.PID, timeout)
}
// Wait for a given PID to stop
func waitPidStop(pid int, timeout time.Duration) error {
done := make(chan struct{})
chControl := make(chan struct{})
go func() {
for {
select {
case <-chControl:
return
default:
if err := unix.Kill(pid, 0); err != nil {
if err == unix.ESRCH {
close(done)
return
}
logrus.Errorf("Pinging PID %d with signal 0: %v", pid, err)
}
time.Sleep(100 * time.Millisecond)
}
}
}()
select {
case <-done:
return nil
case <-time.After(timeout):
close(chControl)
return errors.Errorf("given PIDs did not die within timeout")
}
}
func (r *ConmonOCIRuntime) getLogTag(ctr *Container) (string, error) {
logTag := ctr.LogTag()
if logTag == "" {
return "", nil
}
data, err := ctr.inspectLocked(false)
if err != nil {
// FIXME: this error should probably be returned
return "", nil // nolint: nilerr
}
tmpl, err := template.New("container").Parse(logTag)
if err != nil {
return "", errors.Wrapf(err, "template parsing error %s", logTag)
}
var b bytes.Buffer
err = tmpl.Execute(&b, data)
if err != nil {
return "", err
}
return b.String(), nil
}
// createOCIContainer generates this container's main conmon instance and prepares it for starting
func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) {
var stderrBuf bytes.Buffer
runtimeDir, err := util.GetRuntimeDir()
if err != nil {
return 0, err
}
parentSyncPipe, childSyncPipe, err := newPipe()
if err != nil {
return 0, errors.Wrapf(err, "error creating socket pair")
}
defer errorhandling.CloseQuiet(parentSyncPipe)
childStartPipe, parentStartPipe, err := newPipe()
if err != nil {
return 0, errors.Wrapf(err, "error creating socket pair for start pipe")
}
defer errorhandling.CloseQuiet(parentStartPipe)
var ociLog string
if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
ociLog = filepath.Join(ctr.state.RunDir, "oci-log")
}
logTag, err := r.getLogTag(ctr)
if err != nil {
return 0, err
}
if ctr.config.CgroupsMode == cgroupSplit {
if err := utils.MoveUnderCgroupSubtree("runtime"); err != nil {
return 0, err
}
}
pidfile := ctr.config.PidFile
if pidfile == "" {
pidfile = filepath.Join(ctr.state.RunDir, "pidfile")
}
args := r.sharedConmonArgs(ctr, ctr.ID(), ctr.bundlePath(), pidfile, ctr.LogPath(), r.exitsDir, ociLog, ctr.LogDriver(), logTag)
if ctr.config.SdNotifyMode == define.SdNotifyModeContainer && ctr.notifySocket != "" {
args = append(args, fmt.Sprintf("--sdnotify-socket=%s", ctr.notifySocket))
}
if ctr.config.Spec.Process.Terminal {
args = append(args, "-t")
} else if ctr.config.Stdin {
args = append(args, "-i")
}
if ctr.config.Timeout > 0 {
args = append(args, fmt.Sprintf("--timeout=%d", ctr.config.Timeout))
}
if !r.enableKeyring {
args = append(args, "--no-new-keyring")
}
if ctr.config.ConmonPidFile != "" {
args = append(args, "--conmon-pidfile", ctr.config.ConmonPidFile)
}
if r.noPivot {
args = append(args, "--no-pivot")
}
exitCommand, err := specgenutil.CreateExitCommandArgs(ctr.runtime.storageConfig, ctr.runtime.config, logrus.IsLevelEnabled(logrus.DebugLevel), ctr.AutoRemove(), false)
if err != nil {
return 0, err
}
exitCommand = append(exitCommand, ctr.config.ID)
args = append(args, "--exit-command", exitCommand[0])
for _, arg := range exitCommand[1:] {
args = append(args, []string{"--exit-command-arg", arg}...)
}
// Pass down the LISTEN_* environment (see #10443).
preserveFDs := ctr.config.PreserveFDs
if val := os.Getenv("LISTEN_FDS"); val != "" {
if ctr.config.PreserveFDs > 0 {
logrus.Warnf("Ignoring LISTEN_FDS to preserve custom user-specified FDs")
} else {
fds, err := strconv.Atoi(val)
if err != nil {
return 0, fmt.Errorf("converting LISTEN_FDS=%s: %w", val, err)
}
preserveFDs = uint(fds)
}
}
if preserveFDs > 0 {
args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", preserveFDs))...)
}
if restoreOptions != nil {
args = append(args, "--restore", ctr.CheckpointPath())
if restoreOptions.TCPEstablished {
args = append(args, "--runtime-opt", "--tcp-established")
}
if restoreOptions.FileLocks {
args = append(args, "--runtime-opt", "--file-locks")
}
if restoreOptions.Pod != "" {
mountLabel := ctr.config.MountLabel
processLabel := ctr.config.ProcessLabel
if mountLabel != "" {
args = append(
args,
"--runtime-opt",
fmt.Sprintf(
"--lsm-mount-context=%s",
mountLabel,
),
)
}
if processLabel != "" {
args = append(
args,
"--runtime-opt",
fmt.Sprintf(
"--lsm-profile=selinux:%s",
processLabel,
),
)
}
}
}
logrus.WithFields(logrus.Fields{
"args": args,
}).Debugf("running conmon: %s", r.conmonPath)
cmd := exec.Command(r.conmonPath, args...)
cmd.Dir = ctr.bundlePath()
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
// TODO this is probably a really bad idea for some uses
// Make this configurable
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if ctr.config.Spec.Process.Terminal {
cmd.Stderr = &stderrBuf
}
// 0, 1 and 2 are stdin, stdout and stderr
conmonEnv := r.configureConmonEnv(ctr, runtimeDir)
var filesToClose []*os.File
if preserveFDs > 0 {
for fd := 3; fd < int(3+preserveFDs); fd++ {
f := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd))
filesToClose = append(filesToClose, f)
cmd.ExtraFiles = append(cmd.ExtraFiles, f)
}
}
cmd.Env = r.conmonEnv
// we don't want to step on users fds they asked to preserve
// Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3
cmd.Env = append(cmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", preserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", preserveFDs+4))
cmd.Env = append(cmd.Env, conmonEnv...)
cmd.ExtraFiles = append(cmd.ExtraFiles, childSyncPipe, childStartPipe)
if r.reservePorts && !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() {
ports, err := bindPorts(ctr.convertPortMappings())
if err != nil {
return 0, err
}
filesToClose = append(filesToClose, ports...)
// Leak the port we bound in the conmon process. These fd's won't be used
// by the container and conmon will keep the ports busy so that another
// process cannot use them.
cmd.ExtraFiles = append(cmd.ExtraFiles, ports...)
}
if ctr.config.NetMode.IsSlirp4netns() || rootless.IsRootless() {
if ctr.config.PostConfigureNetNS {
havePortMapping := len(ctr.config.PortMappings) > 0
if havePortMapping {
ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe()
if err != nil {
return 0, errors.Wrapf(err, "failed to create rootless port sync pipe")
}
}
ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
if err != nil {
return 0, errors.Wrapf(err, "failed to create rootless network sync pipe")
}
} else {
if ctr.rootlessSlirpSyncR != nil {
defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncR)
}
if ctr.rootlessSlirpSyncW != nil {
defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncW)
}
}
// Leak one end in conmon, the other one will be leaked into slirp4netns
cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessSlirpSyncW)
if ctr.rootlessPortSyncW != nil {
defer errorhandling.CloseQuiet(ctr.rootlessPortSyncW)
// Leak one end in conmon, the other one will be leaked into rootlessport
cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessPortSyncW)
}
}
var runtimeRestoreStarted time.Time
if restoreOptions != nil {
runtimeRestoreStarted = time.Now()
}
err = startCommandGivenSelinux(cmd, ctr)
// regardless of whether we errored or not, we no longer need the children pipes
childSyncPipe.Close()
childStartPipe.Close()
if err != nil {
return 0, err
}
if err := r.moveConmonToCgroupAndSignal(ctr, cmd, parentStartPipe); err != nil {
return 0, err
}
/* Wait for initial setup and fork, and reap child */
err = cmd.Wait()
if err != nil {
return 0, err
}
pid, err := readConmonPipeData(r.name, parentSyncPipe, ociLog)
if err != nil {
if err2 := r.DeleteContainer(ctr); err2 != nil {
logrus.Errorf("Removing container %s from runtime after creation failed", ctr.ID())
}
return 0, err
}
ctr.state.PID = pid
conmonPID, err := readConmonPidFile(ctr.config.ConmonPidFile)
if err != nil {
logrus.Warnf("Error reading conmon pid file for container %s: %v", ctr.ID(), err)
} else if conmonPID > 0 {
// conmon not having a pid file is a valid state, so don't set it if we don't have it
logrus.Infof("Got Conmon PID as %d", conmonPID)
ctr.state.ConmonPID = conmonPID
// Send the MAINPID via sdnotify if needed.
switch ctr.config.SdNotifyMode {
case define.SdNotifyModeContainer, define.SdNotifyModeIgnore:
// Nothing to do or conmon takes care of it already.
default:
if sent, err := daemon.SdNotify(false, fmt.Sprintf("MAINPID=%d", conmonPID)); err != nil {
logrus.Errorf("Notifying systemd of Conmon PID: %v", err)
} else if sent {
logrus.Debugf("Notify MAINPID sent successfully")
}
}
}
runtimeRestoreDuration := func() int64 {
if restoreOptions != nil && restoreOptions.PrintStats {
return time.Since(runtimeRestoreStarted).Microseconds()
}
return 0
}()
// These fds were passed down to the runtime. Close them
// and not interfere
for _, f := range filesToClose {
errorhandling.CloseQuiet(f)
}
return runtimeRestoreDuration, nil
}
// configureConmonEnv gets the environment values to add to conmon's exec struct
// TODO this may want to be less hardcoded/more configurable in the future
func (r *ConmonOCIRuntime) configureConmonEnv(ctr *Container, runtimeDir string) []string {
var env []string
for _, e := range os.Environ() {
if strings.HasPrefix(e, "LC_") {
env = append(env, e)
}
}
conf, ok := os.LookupEnv("CONTAINERS_CONF")
if ok {
env = append(env, fmt.Sprintf("CONTAINERS_CONF=%s", conf))
}
env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
env = append(env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED")))
env = append(env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID")))
home := homedir.Get()
if home != "" {
env = append(env, fmt.Sprintf("HOME=%s", home))
}
return env
}
// sharedConmonArgs takes common arguments for exec and create/restore and formats them for the conmon CLI
func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, pidPath, logPath, exitDir, ociLogPath, logDriver, logTag string) []string {
// set the conmon API version to be able to use the correct sync struct keys
args := []string{
"--api-version", "1",
"-c", ctr.ID(),
"-u", cuuid,
"-r", r.path,
"-b", bundlePath,
"-p", pidPath,
"-n", ctr.Name(),
"--exit-dir", exitDir,
"--full-attach",
}
if len(r.runtimeFlags) > 0 {
rFlags := []string{}
for _, arg := range r.runtimeFlags {
rFlags = append(rFlags, "--runtime-arg", arg)
}
args = append(args, rFlags...)
}
if ctr.CgroupManager() == config.SystemdCgroupsManager && !ctr.config.NoCgroups && ctr.config.CgroupsMode != cgroupSplit {
args = append(args, "-s")
}
var logDriverArg string
switch logDriver {
case define.JournaldLogging:
logDriverArg = define.JournaldLogging
case define.NoLogging:
logDriverArg = define.NoLogging
case define.PassthroughLogging:
logDriverArg = define.PassthroughLogging
case define.JSONLogging:
fallthrough
//lint:ignore ST1015 the default case has to be here
default: //nolint:stylecheck
// No case here should happen except JSONLogging, but keep this here in case the options are extended
logrus.Errorf("%s logging specified but not supported. Choosing k8s-file logging instead", ctr.LogDriver())
fallthrough
case "":
// to get here, either a user would specify `--log-driver ""`, or this came from another place in libpod
// since the former case is obscure, and the latter case isn't an error, let's silently fallthrough
fallthrough
case define.KubernetesLogging:
logDriverArg = fmt.Sprintf("%s:%s", define.KubernetesLogging, logPath)
}
args = append(args, "-l", logDriverArg)
logLevel := logrus.GetLevel()
args = append(args, "--log-level", logLevel.String())
if logLevel == logrus.DebugLevel {
logrus.Debugf("%s messages will be logged to syslog", r.conmonPath)
args = append(args, "--syslog")
}
size := r.logSizeMax
if ctr.config.LogSize > 0 {
size = ctr.config.LogSize
}
if size > 0 {
args = append(args, "--log-size-max", fmt.Sprintf("%v", size))
}
if ociLogPath != "" {
args = append(args, "--runtime-arg", "--log-format=json", "--runtime-arg", "--log", fmt.Sprintf("--runtime-arg=%s", ociLogPath))
}
if logTag != "" {
args = append(args, "--log-tag", logTag)
}
if ctr.config.NoCgroups {
logrus.Debugf("Running with no Cgroups")
args = append(args, "--runtime-arg", "--cgroup-manager", "--runtime-arg", "disabled")
}
return args
}
// startCommandGivenSelinux starts a container ensuring to set the labels of
// the process to make sure SELinux doesn't block conmon communication, if SELinux is enabled
func startCommandGivenSelinux(cmd *exec.Cmd, ctr *Container) error {
// Make sure to unset the NOTIFY_SOCKET and reset if afterwards if needed.
switch ctr.config.SdNotifyMode {
case define.SdNotifyModeContainer, define.SdNotifyModeIgnore:
if ctr.notifySocket != "" {
if err := os.Unsetenv("NOTIFY_SOCKET"); err != nil {
logrus.Warnf("Error unsetting NOTIFY_SOCKET %v", err)
}
defer func() {
if err := os.Setenv("NOTIFY_SOCKET", ctr.notifySocket); err != nil {
logrus.Errorf("Resetting NOTIFY_SOCKET=%s", ctr.notifySocket)
}
}()
}
}
if !selinux.GetEnabled() {
return cmd.Start()
}
// Set the label of the conmon process to be level :s0
// This will allow the container processes to talk to fifo-files
// passed into the container by conmon
var (
plabel string
con selinux.Context
err error
)
plabel, err = selinux.CurrentLabel()
if err != nil {
return errors.Wrapf(err, "failed to get current SELinux label")
}
con, err = selinux.NewContext(plabel)
if err != nil {
return errors.Wrapf(err, "failed to get new context from SELinux label")
}
runtime.LockOSThread()
if con["level"] != "s0" && con["level"] != "" {
con["level"] = "s0"
if err = label.SetProcessLabel(con.Get()); err != nil {
runtime.UnlockOSThread()
return err
}
}
err = cmd.Start()
// Ignore error returned from SetProcessLabel("") call,
// can't recover.
if labelErr := label.SetProcessLabel(""); labelErr == nil {
// Unlock the thread only if the process label could be restored
// successfully. Otherwise leave the thread locked and the Go runtime
// will terminate it once it returns to the threads pool.
runtime.UnlockOSThread()
} else {
logrus.Errorf("Unable to set process label: %q", labelErr)
}
return err
}
// moveConmonToCgroupAndSignal gets a container's cgroupParent and moves the conmon process to that cgroup
// it then signals for conmon to start by sending nonce data down the start fd
func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec.Cmd, startFd *os.File) error {
mustCreateCgroup := true
if ctr.config.NoCgroups {
mustCreateCgroup = false
}
// If cgroup creation is disabled - just signal.
switch ctr.config.CgroupsMode {
case "disabled", "no-conmon", cgroupSplit:
mustCreateCgroup = false
}
// $INVOCATION_ID is set by systemd when running as a service.
if os.Getenv("INVOCATION_ID") != "" {
mustCreateCgroup = false
}
if mustCreateCgroup {
// Usually rootless users are not allowed to configure cgroupfs.
// There are cases though, where it is allowed, e.g. if the cgroup
// is manually configured and chowned). Avoid detecting all
// such cases and simply use a lower log level.
logLevel := logrus.WarnLevel
if rootless.IsRootless() {
logLevel = logrus.InfoLevel
}
// TODO: This should be a switch - we are not guaranteed that
// there are only 2 valid cgroup managers
cgroupParent := ctr.CgroupParent()
if ctr.CgroupManager() == config.SystemdCgroupsManager {
unitName := createUnitName("libpod-conmon", ctr.ID())
realCgroupParent := cgroupParent
splitParent := strings.Split(cgroupParent, "/")
if strings.HasSuffix(cgroupParent, ".slice") && len(splitParent) > 1 {
realCgroupParent = splitParent[len(splitParent)-1]
}
logrus.Infof("Running conmon under slice %s and unitName %s", realCgroupParent, unitName)
if err := utils.RunUnderSystemdScope(cmd.Process.Pid, realCgroupParent, unitName); err != nil {
logrus.StandardLogger().Logf(logLevel, "Failed to add conmon to systemd sandbox cgroup: %v", err)
}
} else {
cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon")
control, err := cgroups.New(cgroupPath, &spec.LinuxResources{})
if err != nil {
logrus.StandardLogger().Logf(logLevel, "Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
} else if err := control.AddPid(cmd.Process.Pid); err != nil {
// we need to remove this defer and delete the cgroup once conmon exits
// maybe need a conmon monitor?
logrus.StandardLogger().Logf(logLevel, "Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
}
}
}
/* We set the cgroup, now the child can start creating children */
if err := writeConmonPipeData(startFd); err != nil {
return err
}
return nil
}
// newPipe creates a unix socket pair for communication.
// Returns two files - first is parent, second is child.
func newPipe() (*os.File, *os.File, error) {
fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC, 0)
if err != nil {
return nil, nil, err
}
return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil
}
// readConmonPidFile attempts to read conmon's pid from its pid file
func readConmonPidFile(pidFile string) (int, error) {
// Let's try reading the Conmon pid at the same time.
if pidFile != "" {
contents, err := ioutil.ReadFile(pidFile)
if err != nil {
return -1, err
}
// Convert it to an int
conmonPID, err := strconv.Atoi(string(contents))
if err != nil {
return -1, err
}
return conmonPID, nil
}
return 0, nil
}
// readConmonPipeData attempts to read a syncInfo struct from the pipe
func readConmonPipeData(runtimeName string, pipe *os.File, ociLog string) (int, error) {
// syncInfo is used to return data from monitor process to daemon
type syncInfo struct {
Data int `json:"data"`
Message string `json:"message,omitempty"`
}
// Wait to get container pid from conmon
type syncStruct struct {
si *syncInfo
err error
}
ch := make(chan syncStruct)
go func() {
var si *syncInfo
rdr := bufio.NewReader(pipe)
b, err := rdr.ReadBytes('\n')
// ignore EOF here, error is returned even when data was read
// if it is no valid json unmarshal will fail below
if err != nil && !errors.Is(err, io.EOF) {
ch <- syncStruct{err: err}
}
if err := json.Unmarshal(b, &si); err != nil {
ch <- syncStruct{err: fmt.Errorf("conmon bytes %q: %w", string(b), err)}
return
}
ch <- syncStruct{si: si}
}()
data := -1 //nolint: wastedassign
select {
case ss := <-ch:
if ss.err != nil {
if ociLog != "" {
ociLogData, err := ioutil.ReadFile(ociLog)
if err == nil {
var ociErr ociError
if err := json.Unmarshal(ociLogData, &ociErr); err == nil {
return -1, getOCIRuntimeError(runtimeName, ociErr.Msg)
}
}
}
return -1, errors.Wrapf(ss.err, "container create failed (no logs from conmon)")
}
logrus.Debugf("Received: %d", ss.si.Data)
if ss.si.Data < 0 {
if ociLog != "" {
ociLogData, err := ioutil.ReadFile(ociLog)
if err == nil {
var ociErr ociError
if err := json.Unmarshal(ociLogData, &ociErr); err == nil {
return ss.si.Data, getOCIRuntimeError(runtimeName, ociErr.Msg)
}
}
}
// If we failed to parse the JSON errors, then print the output as it is
if ss.si.Message != "" {
return ss.si.Data, getOCIRuntimeError(runtimeName, ss.si.Message)
}
return ss.si.Data, errors.Wrapf(define.ErrInternal, "container create failed")
}
data = ss.si.Data
case <-time.After(define.ContainerCreateTimeout):
return -1, errors.Wrapf(define.ErrInternal, "container creation timeout")
}
return data, nil
}
// writeConmonPipeData writes nonce data to a pipe
func writeConmonPipeData(pipe *os.File) error {
someData := []byte{0}
_, err := pipe.Write(someData)
return err
}
// formatRuntimeOpts prepends opts passed to it with --runtime-opt for passing to conmon
func formatRuntimeOpts(opts ...string) []string {
args := make([]string, 0, len(opts)*2)
for _, o := range opts {
args = append(args, "--runtime-opt", o)
}
return args
}
// getConmonVersion returns a string representation of the conmon version.
func (r *ConmonOCIRuntime) getConmonVersion() (string, error) {
output, err := utils.ExecCmd(r.conmonPath, "--version")
if err != nil {
return "", err
}
return strings.TrimSuffix(strings.Replace(output, "\n", ", ", 1), "\n"), nil
}
// getOCIRuntimeVersion returns a string representation of the OCI runtime's
// version.
func (r *ConmonOCIRuntime) getOCIRuntimeVersion() (string, error) {
output, err := utils.ExecCmd(r.path, "--version")
if err != nil {
return "", err
}
return strings.TrimSuffix(output, "\n"), nil
}
// Copy data from container to HTTP connection, for terminal attach.
// Container is the container's attach socket connection, http is a buffer for
// the HTTP connection. cid is the ID of the container the attach session is
// running for (used solely for error messages).
func httpAttachTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter, cid string) error {
buf := make([]byte, bufferSize)
for {
numR, err := container.Read(buf)
logrus.Debugf("Read fd(%d) %d/%d bytes for container %s", int(buf[0]), numR, len(buf), cid)
if numR > 0 {
switch buf[0] {
case AttachPipeStdout:
// Do nothing
default:
logrus.Errorf("Received unexpected attach type %+d, discarding %d bytes", buf[0], numR)
continue
}
numW, err2 := http.Write(buf[1:numR])
if err2 != nil {
if err != nil {
logrus.Errorf("Reading container %s STDOUT: %v", cid, err)
}
return err2
} else if numW+1 != numR {
return io.ErrShortWrite
}
// We need to force the buffer to write immediately, so
// there isn't a delay on the terminal side.
if err2 := http.Flush(); err2 != nil {
if err != nil {
logrus.Errorf("Reading container %s STDOUT: %v", cid, err)
}
return err2
}
}
if err != nil {
if err == io.EOF {
return nil
}
return err
}
}
}
// Copy data from a container to an HTTP connection, for non-terminal attach.
// Appends a header to multiplex input.
func httpAttachNonTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter, cid string, stdin, stdout, stderr bool) error {
buf := make([]byte, bufferSize)
for {
numR, err := container.Read(buf)
if numR > 0 {
var headerBuf []byte
// Subtract 1 because we strip the first byte (used for
// multiplexing by Conmon).
headerLen := uint32(numR - 1)
// Practically speaking, we could make this buf[0] - 1,
// but we need to validate it anyways...
switch buf[0] {
case AttachPipeStdin:
headerBuf = makeHTTPAttachHeader(0, headerLen)
if !stdin {
continue
}
case AttachPipeStdout:
if !stdout {
continue
}
headerBuf = makeHTTPAttachHeader(1, headerLen)
case AttachPipeStderr:
if !stderr {
continue
}
headerBuf = makeHTTPAttachHeader(2, headerLen)
default:
logrus.Errorf("Received unexpected attach type %+d, discarding %d bytes", buf[0], numR)
continue
}
numH, err2 := http.Write(headerBuf)
if err2 != nil {
if err != nil {
logrus.Errorf("Reading container %s standard streams: %v", cid, err)
}
return err2
}
// Hardcoding header length is pretty gross, but
// fast. Should be safe, as this is a fixed part
// of the protocol.
if numH != 8 {
if err != nil {
logrus.Errorf("Reading container %s standard streams: %v", cid, err)
}
return io.ErrShortWrite
}
numW, err2 := http.Write(buf[1:numR])
if err2 != nil {
if err != nil {
logrus.Errorf("Reading container %s standard streams: %v", cid, err)
}
return err2
} else if numW+1 != numR {
if err != nil {
logrus.Errorf("Reading container %s standard streams: %v", cid, err)
}
return io.ErrShortWrite
}
// We need to force the buffer to write immediately, so
// there isn't a delay on the terminal side.
if err2 := http.Flush(); err2 != nil {
if err != nil {
logrus.Errorf("Reading container %s STDOUT: %v", cid, err)
}
return err2
}
}
if err != nil {
if err == io.EOF {
return nil
}
return err
}
}
}
| [
"\"LISTEN_FDS\"",
"\"_CONTAINERS_USERNS_CONFIGURED\"",
"\"_CONTAINERS_ROOTLESS_UID\"",
"\"INVOCATION_ID\""
]
| []
| [
"_CONTAINERS_ROOTLESS_UID",
"INVOCATION_ID",
"_CONTAINERS_USERNS_CONFIGURED",
"LISTEN_FDS"
]
| [] | ["_CONTAINERS_ROOTLESS_UID", "INVOCATION_ID", "_CONTAINERS_USERNS_CONFIGURED", "LISTEN_FDS"] | go | 4 | 0 | |
pkg/defaults/defaults_windows.go | /*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package defaults
import (
"fmt"
"os"
"path/filepath"
)
const AppArmorProfileName = ""
func DataRoot() string {
return filepath.Join(os.Getenv("ProgramData"), "nerdctl")
}
func CNIPath() string {
return filepath.Join(os.Getenv("ProgramFiles"), "containerd", "cni", "bin")
}
func CNINetConfPath() string {
return filepath.Join(os.Getenv("ProgramFiles"), "containerd", "cni", "conf")
}
func BuildKitHost() string {
return fmt.Sprint("\\\\.\\pipe\\buildkit")
}
func IsSystemdAvailable() bool {
return false
}
func CgroupManager() string {
return ""
}
func CgroupnsMode() string {
return ""
}
| [
"\"ProgramData\"",
"\"ProgramFiles\"",
"\"ProgramFiles\""
]
| []
| [
"ProgramFiles",
"ProgramData"
]
| [] | ["ProgramFiles", "ProgramData"] | go | 2 | 0 | |
main.go | package main
import (
"context"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"os/exec"
"strings"
"time"
"github.com/dghubble/oauth1"
"github.com/dghubble/oauth1/twitter"
"github.com/joho/godotenv"
"golang.org/x/oauth2"
)
var (
dfxExecPath string
)
func init() {
var err error
if dfxExecPath, err = exec.LookPath("dfx"); err != nil {
panic("Could not find the dfx canister sdk executable. Installation steps: https://sdk.dfinity.org")
}
}
func call(dir string, subCmd string, args ...string) ([]byte, int, error) {
cmd := exec.Cmd{
Path: dfxExecPath,
Args: append([]string{dfxExecPath, subCmd}, args...),
Dir: dir,
}
out, err := cmd.CombinedOutput()
if err != nil {
switch err := err.(type) {
case *exec.ExitError:
return out, err.ExitCode(), err
default:
return out, 0, err
}
}
return out, 0, nil
}
func main() {
if err := godotenv.Load(); err != nil {
log.Fatal("Error loading .env file")
}
discordCfg := &oauth2.Config{
RedirectURL: "http://localhost:3000/discord/callback",
ClientID: os.Getenv("DISCORD_CLIENT_ID"),
ClientSecret: os.Getenv("DISCORD_CLIENT_SECRET"),
Scopes: []string{"identify"},
Endpoint: oauth2.Endpoint{
AuthURL: "https://discord.com/api/oauth2/authorize",
TokenURL: "https://discord.com/api/oauth2/token",
AuthStyle: oauth2.AuthStyleInParams,
},
}
twitterCfg := oauth1.Config{
ConsumerKey: os.Getenv("TWITTER_API_KEY"),
ConsumerSecret: os.Getenv("TWITTER_API_SECRET"),
CallbackURL: "http://localhost:3000/twitter/callback",
Endpoint: twitter.AuthorizeEndpoint,
}
http.HandleFunc("/discord/", func(rw http.ResponseWriter, r *http.Request) {
oauthState := generateStateCookie(rw, "discord")
http.Redirect(rw, r, discordCfg.AuthCodeURL(oauthState), http.StatusTemporaryRedirect)
})
http.HandleFunc("/twitter/", func(rw http.ResponseWriter, r *http.Request) {
requestToken, _, _ := twitterCfg.RequestToken()
authorizationURL, _ := twitterCfg.AuthorizationURL(requestToken)
http.Redirect(rw, r, authorizationURL.String(), http.StatusTemporaryRedirect)
})
http.HandleFunc("/discord/callback", func(rw http.ResponseWriter, r *http.Request) {
oauthState, _ := r.Cookie("discord_oauthState")
if r.FormValue("state") != oauthState.Value {
rw.WriteHeader(http.StatusBadRequest)
rw.Write([]byte("State does not match."))
return
}
token, err := discordCfg.Exchange(context.Background(), r.FormValue("code"))
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
rw.Write([]byte(err.Error()))
return
}
res, err := discordCfg.Client(context.Background(), token).Get("https://discord.com/api/users/@me")
if err != nil || res.StatusCode != 200 {
rw.WriteHeader(http.StatusInternalServerError)
if err != nil {
rw.Write([]byte(err.Error()))
} else {
rw.Write([]byte(res.Status))
}
return
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
rw.Write([]byte(err.Error()))
return
}
var account DiscordAccount
if err := json.Unmarshal(body, &account); err != nil {
rw.WriteHeader(http.StatusInternalServerError)
rw.Write([]byte(err.Error()))
return
}
raw, _, err := call(".", "canister", []string{
"--network=ic", "call", "accounts", "addDiscordAccount",
fmt.Sprintf("(record { id=\"%s\"; username=\"%s\"; discriminator=\"%s\" })", account.ID, account.Username, account.Discriminator),
}...)
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
rw.Write(raw)
rw.Write([]byte(err.Error()))
return
}
key := strings.TrimSpace(string(raw))
key = key[2 : len(key)-2]
rw.Write([]byte(fmt.Sprintf(`Hello %s#%s!
You can use the following key to link your principal: %s
$ dfx canister --network=ic --no-wallet call 45pum-byaaa-aaaam-aaanq-cai linkPrincipal "(\"%s\")"`,
account.Username, account.Discriminator,
key, key,
)))
})
http.HandleFunc("/twitter/callback", func(rw http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
token := q["oauth_token"][0]
verifier := q["oauth_verifier"][0]
accessToken, accessSecret, _ := twitterCfg.AccessToken(token, "", verifier)
rw.Write([]byte(fmt.Sprintf("Access Token: %s\nAccess Secret: %s\n", accessToken, accessSecret)))
})
log.Println("Listening on http://localhost:3000")
log.Fatal(http.ListenAndServe(":3000", nil))
}
func generateStateCookie(w http.ResponseWriter, prefix string) string {
var expiration = time.Now().Add(365 * 24 * time.Hour)
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
log.Print(err)
}
state := base64.URLEncoding.EncodeToString(b)
cookie := http.Cookie{
Name: fmt.Sprintf("%s_oauthState", prefix),
Value: state,
Expires: expiration,
}
http.SetCookie(w, &cookie)
return state
}
type DiscordAccount struct {
ID string `json:"id"`
Username string `json:"username"`
Avatar string `json:"avatar"`
Discriminator string `json:"discriminator"`
PublicFlags int `json:"public_flags"`
Flags int `json:"flags"`
Banner string `json:"banner"`
BannerColor string `json:"banner_color"`
AccentColor int `json:"accent_color"`
Locale string `json:"locale"`
MfaEnabled bool `json:"mfa_enabled"`
PremiumType int `json:"premium_type"`
}
| [
"\"DISCORD_CLIENT_ID\"",
"\"DISCORD_CLIENT_SECRET\"",
"\"TWITTER_API_KEY\"",
"\"TWITTER_API_SECRET\""
]
| []
| [
"DISCORD_CLIENT_ID",
"TWITTER_API_KEY",
"TWITTER_API_SECRET",
"DISCORD_CLIENT_SECRET"
]
| [] | ["DISCORD_CLIENT_ID", "TWITTER_API_KEY", "TWITTER_API_SECRET", "DISCORD_CLIENT_SECRET"] | go | 4 | 0 | |
vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go | // Copyright 2016 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package invoke
import (
"context"
"os"
"path/filepath"
"github.com/containernetworking/cni/pkg/types"
)
func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) {
if exec == nil {
exec = defaultExec
}
paths := filepath.SplitList(os.Getenv("CNI_PATH"))
pluginPath, err := exec.FindInPath(delegatePlugin, paths)
if err != nil {
return "", nil, err
}
return pluginPath, exec, nil
}
// DelegateAdd calls the given delegate plugin with the CNI ADD action and
// JSON configuration
func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) {
// 查找插件的路径和执行器exec
pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
if err != nil {
return nil, err
}
// DelegateAdd will override the original "CNI_COMMAND" env from process with ADD
return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec)
}
// DelegateCheck calls the given delegate plugin with the CNI CHECK action and
// JSON configuration
func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
if err != nil {
return err
}
// DelegateCheck will override the original CNI_COMMAND env from process with CHECK
return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec)
}
// DelegateDel calls the given delegate plugin with the CNI DEL action and
// JSON configuration
func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
if err != nil {
return err
}
// DelegateDel will override the original CNI_COMMAND env from process with DEL
return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec)
}
// return CNIArgs used by delegation
func delegateArgs(action string) *DelegateArgs {
return &DelegateArgs{
Command: action,
}
}
| [
"\"CNI_PATH\""
]
| []
| [
"CNI_PATH"
]
| [] | ["CNI_PATH"] | go | 1 | 0 | |
pypy/interpreter/test/test_app_main.py | """
Tests for the entry point of pypy-c, app_main.py.
"""
from __future__ import with_statement
import py
import sys, os, re, runpy, subprocess
from rpython.tool.udir import udir
from contextlib import contextmanager
from pypy.conftest import pypydir
from lib_pypy._pypy_interact import irc_header
banner = sys.version.splitlines()[0]
app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py')
app_main = os.path.abspath(app_main)
_counter = 0
def _get_next_path(ext='.py'):
global _counter
p = udir.join('demo_test_app_main_%d%s' % (_counter, ext))
_counter += 1
return p
def getscript(source):
p = _get_next_path()
p.write(str(py.code.Source(source)))
return str(p)
def getscript_pyc(space, source):
p = _get_next_path()
p.write(str(py.code.Source(source)))
w_dir = space.wrap(str(p.dirpath()))
w_modname = space.wrap(p.purebasename)
space.appexec([w_dir, w_modname], """(dir, modname):
import sys
d = sys.modules.copy()
sys.path.insert(0, dir)
__import__(modname)
sys.path.pop(0)
for key in sys.modules.keys():
if key not in d:
del sys.modules[key]
""")
p = str(p) + 'c'
assert os.path.isfile(p) # the .pyc file should have been created above
return p
def getscript_in_dir(source):
pdir = _get_next_path(ext='')
p = pdir.ensure(dir=1).join('__main__.py')
p.write(str(py.code.Source(source)))
# return relative path for testing purposes
return py.path.local().bestrelpath(pdir)
demo_script = getscript("""
print 'hello'
print 'Name:', __name__
print 'File:', __file__
import sys
print 'Exec:', sys.executable
print 'Argv:', sys.argv
print 'goodbye'
myvalue = 6*7
""")
crashing_demo_script = getscript("""
print 'Hello2'
myvalue2 = 11
ooups
myvalue2 = 22
print 'Goodbye2' # should not be reached
""")
class TestParseCommandLine:
def check_options(self, options, sys_argv, **expected):
assert sys.argv == sys_argv
for key, value in expected.items():
assert options[key] == value
for key, value in options.items():
if key not in expected:
assert not value, (
"option %r has unexpectedly the value %r" % (key, value))
def check(self, argv, env, **expected):
import StringIO
from pypy.interpreter import app_main
saved_env = os.environ.copy()
saved_sys_argv = sys.argv[:]
saved_sys_stdout = sys.stdout
saved_sys_stderr = sys.stdout
app_main.os = os
try:
os.environ.update(env)
sys.stdout = sys.stderr = StringIO.StringIO()
try:
options = app_main.parse_command_line(argv)
except SystemExit:
output = expected['output_contains']
assert output in sys.stdout.getvalue()
else:
self.check_options(options, **expected)
finally:
os.environ.clear()
os.environ.update(saved_env)
sys.argv[:] = saved_sys_argv
sys.stdout = saved_sys_stdout
sys.stderr = saved_sys_stderr
def test_all_combinations_I_can_think_of(self):
self.check([], {}, sys_argv=[''], run_stdin=True)
self.check(['-'], {}, sys_argv=['-'], run_stdin=True)
self.check(['-S'], {}, sys_argv=[''], run_stdin=True, no_site=1)
self.check(['-OO'], {}, sys_argv=[''], run_stdin=True, optimize=2)
self.check(['-O', '-O'], {}, sys_argv=[''], run_stdin=True, optimize=2)
self.check(['-Qnew'], {}, sys_argv=[''], run_stdin=True, division_new=1)
self.check(['-Qold'], {}, sys_argv=[''], run_stdin=True, division_new=0)
self.check(['-Qwarn'], {}, sys_argv=[''], run_stdin=True, division_warning=1)
self.check(['-Qwarnall'], {}, sys_argv=[''], run_stdin=True,
division_warning=2)
self.check(['-Q', 'new'], {}, sys_argv=[''], run_stdin=True, division_new=1)
self.check(['-SOQnew'], {}, sys_argv=[''], run_stdin=True,
no_site=1, optimize=1, division_new=1)
self.check(['-SOQ', 'new'], {}, sys_argv=[''], run_stdin=True,
no_site=1, optimize=1, division_new=1)
self.check(['-i'], {}, sys_argv=[''], run_stdin=True,
interactive=1, inspect=1)
self.check(['-?'], {}, output_contains='usage:')
self.check(['-h'], {}, output_contains='usage:')
self.check(['-S', '-tO', '-h'], {}, output_contains='usage:')
self.check(['-S', '-thO'], {}, output_contains='usage:')
self.check(['-S', '-tO', '--help'], {}, output_contains='usage:')
self.check(['-S', '-tO', '--info'], {}, output_contains='translation')
self.check(['-S', '-tO', '--version'], {}, output_contains='Python')
self.check(['-S', '-tOV'], {}, output_contains='Python')
self.check(['--jit', 'foobar', '-S'], {}, sys_argv=[''],
run_stdin=True, no_site=1)
self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass')
self.check(['-cpass'], {}, sys_argv=['-c'], run_command='pass')
self.check(['-cpass','x'], {}, sys_argv=['-c','x'], run_command='pass')
self.check(['-Sc', 'pass'], {}, sys_argv=['-c'], run_command='pass',
no_site=1)
self.check(['-Scpass'], {}, sys_argv=['-c'], run_command='pass', no_site=1)
self.check(['-c', '', ''], {}, sys_argv=['-c', ''], run_command='')
self.check(['-mfoo', 'bar', 'baz'], {}, sys_argv=['foo', 'bar', 'baz'],
run_module=True)
self.check(['-m', 'foo', 'bar', 'baz'], {}, sys_argv=['foo', 'bar', 'baz'],
run_module=True)
self.check(['-Smfoo', 'bar', 'baz'], {}, sys_argv=['foo', 'bar', 'baz'],
run_module=True, no_site=1)
self.check(['-Sm', 'foo', 'bar', 'baz'], {}, sys_argv=['foo', 'bar', 'baz'],
run_module=True, no_site=1)
self.check(['-', 'foo', 'bar'], {}, sys_argv=['-', 'foo', 'bar'],
run_stdin=True)
self.check(['foo', 'bar'], {}, sys_argv=['foo', 'bar'])
self.check(['foo', '-i'], {}, sys_argv=['foo', '-i'])
self.check(['-i', 'foo'], {}, sys_argv=['foo'], interactive=1, inspect=1)
self.check(['--', 'foo'], {}, sys_argv=['foo'])
self.check(['--', '-i', 'foo'], {}, sys_argv=['-i', 'foo'])
self.check(['--', '-', 'foo'], {}, sys_argv=['-', 'foo'], run_stdin=True)
self.check(['-Wbog'], {}, sys_argv=[''], warnoptions=['bog'], run_stdin=True)
self.check(['-W', 'ab', '-SWc'], {}, sys_argv=[''], warnoptions=['ab', 'c'],
run_stdin=True, no_site=1)
self.check([], {'PYTHONDEBUG': '1'}, sys_argv=[''], run_stdin=True, debug=1)
self.check([], {'PYTHONDONTWRITEBYTECODE': '1'}, sys_argv=[''], run_stdin=True, dont_write_bytecode=1)
self.check([], {'PYTHONNOUSERSITE': '1'}, sys_argv=[''], run_stdin=True, no_user_site=1)
self.check([], {'PYTHONUNBUFFERED': '1'}, sys_argv=[''], run_stdin=True, unbuffered=1)
self.check([], {'PYTHONVERBOSE': '1'}, sys_argv=[''], run_stdin=True, verbose=1)
def test_sysflags(self):
flags = (
("debug", "-d", "1"),
("py3k_warning", "-3", "1"),
("division_warning", "-Qwarn", "1"),
("division_warning", "-Qwarnall", "2"),
("division_new", "-Qnew", "1"),
(["inspect", "interactive"], "-i", "1"),
("optimize", "-O", "1"),
("optimize", "-OO", "2"),
("dont_write_bytecode", "-B", "1"),
("no_user_site", "-s", "1"),
("no_site", "-S", "1"),
("ignore_environment", "-E", "1"),
("tabcheck", "-t", "1"),
("tabcheck", "-tt", "2"),
("verbose", "-v", "1"),
("unicode", "-U", "1"),
("bytes_warning", "-b", "1"),
)
for flag, opt, value in flags:
if isinstance(flag, list): # this is for inspect&interactive
expected = {}
for flag1 in flag:
expected[flag1] = int(value)
else:
expected = {flag: int(value)}
self.check([opt, '-c', 'pass'], {}, sys_argv=['-c'],
run_command='pass', **expected)
def test_sysflags_envvar(self, monkeypatch):
monkeypatch.setenv('PYTHONNOUSERSITE', '1')
expected = {"no_user_site": True}
self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass', **expected)
class TestInteraction:
"""
These tests require pexpect (UNIX-only).
http://pexpect.sourceforge.net/
"""
def _spawn(self, *args, **kwds):
try:
import pexpect
except ImportError, e:
py.test.skip(str(e))
else:
# Version is of the style "0.999" or "2.1". Older versions of
# pexpect try to get the fileno of stdin, which generally won't
# work with py.test (due to sys.stdin being a DontReadFromInput
# instance).
version = map(int, pexpect.__version__.split('.'))
# I only tested 0.999 and 2.1. The former does not work, the
# latter does. Feel free to refine this measurement.
# -exarkun, 17/12/2007
if version < [2, 1]:
py.test.skip(
"pexpect version too old, requires 2.1 or newer: %r" % (
pexpect.__version__,))
kwds.setdefault('timeout', 10)
print 'SPAWN:', ' '.join([args[0]] + args[1]), kwds
child = pexpect.spawn(*args, **kwds)
child.logfile = sys.stdout
return child
def spawn(self, argv):
return self._spawn(sys.executable, [app_main] + argv)
def test_interactive(self):
child = self.spawn([])
child.expect('Python ') # banner
child.expect('>>> ') # prompt
child.sendline('[6*7]')
child.expect(re.escape('[42]'))
child.sendline('def f(x):')
child.expect(re.escape('... '))
child.sendline(' return x + 100')
child.expect(re.escape('... '))
child.sendline('')
child.expect('>>> ')
child.sendline('f(98)')
child.expect('198')
child.expect('>>> ')
child.sendline('__name__')
child.expect("'__main__'")
child.expect('>>> ')
child.sendline('import sys')
child.expect('>>> ')
child.sendline("'' in sys.path")
child.expect("True")
def test_yes_irc_topic(self, monkeypatch):
monkeypatch.setenv('PYPY_IRC_TOPIC', '1')
child = self.spawn([])
child.expect(irc_header) # banner
def test_maybe_irc_topic(self):
import sys
pypy_version_info = getattr(sys, 'pypy_version_info', sys.version_info)
irc_topic = pypy_version_info[3] != 'final'
child = self.spawn([])
child.expect('>>>') # banner
if irc_topic:
assert irc_header in child.before
else:
assert irc_header not in child.before
def test_help(self):
# test that -h prints the usage, including the name of the executable
# which should be /full/path/to/app_main.py in this case
child = self.spawn(['-h'])
child.expect(r'usage: .*app_main.py \[option\]')
child.expect('PyPy options and arguments:')
def test_run_script(self):
child = self.spawn([demo_script])
idx = child.expect(['hello', 'Python ', '>>> '])
assert idx == 0 # no banner or prompt
child.expect(re.escape("Name: __main__"))
child.expect(re.escape('File: ' + demo_script))
child.expect(re.escape('Exec: ' + app_main))
child.expect(re.escape('Argv: ' + repr([demo_script])))
child.expect('goodbye')
def test_run_script_with_args(self):
argv = [demo_script, 'hello', 'world']
child = self.spawn(argv)
child.expect(re.escape('Argv: ' + repr(argv)))
child.expect('goodbye')
def test_no_such_script(self):
import errno
msg = os.strerror(errno.ENOENT) # 'No such file or directory'
child = self.spawn(['xxx-no-such-file-xxx'])
child.expect(re.escape(msg))
def test_option_i(self):
argv = [demo_script, 'foo', 'bar']
child = self.spawn(['-i'] + argv)
idx = child.expect(['hello', re.escape(banner)])
assert idx == 0 # no banner
child.expect(re.escape('File: ' + demo_script))
child.expect(re.escape('Argv: ' + repr(argv)))
child.expect('goodbye')
idx = child.expect(['>>> ', re.escape(banner)])
assert idx == 0 # prompt, but still no banner
child.sendline('myvalue * 102')
child.expect('4284')
child.sendline('__name__')
child.expect('__main__')
def test_option_i_crashing(self):
argv = [crashing_demo_script, 'foo', 'bar']
child = self.spawn(['-i'] + argv)
idx = child.expect(['Hello2', re.escape(banner)])
assert idx == 0 # no banner
child.expect('NameError')
child.sendline('myvalue2 * 1001')
child.expect('11011')
child.sendline('import sys; sys.argv')
child.expect(re.escape(repr(argv)))
child.sendline('sys.last_type.__name__')
child.expect(re.escape(repr('NameError')))
def test_options_i_c(self):
child = self.spawn(['-i', '-c', 'x=555'])
idx = child.expect(['>>> ', re.escape(banner)])
assert idx == 0 # prompt, but no banner
child.sendline('x')
child.expect('555')
child.sendline('__name__')
child.expect('__main__')
child.sendline('import sys; sys.argv')
child.expect(re.escape("['-c']"))
def test_options_i_c_crashing(self, monkeypatch):
monkeypatch.setenv('PYTHONPATH', None)
child = self.spawn(['-i', '-c', 'x=666;foobar'])
child.expect('NameError')
idx = child.expect(['>>> ', re.escape(banner)])
assert idx == 0 # prompt, but no banner
child.sendline('x')
child.expect('666')
child.sendline('__name__')
child.expect('__main__')
child.sendline('import sys; sys.argv')
child.expect(re.escape("['-c']"))
child.sendline('sys.last_type.__name__')
child.expect(re.escape(repr('NameError')))
def test_atexit(self):
child = self.spawn([])
child.expect('>>> ')
child.sendline('def f(): print "foobye"')
child.sendline('')
child.sendline('import atexit; atexit.register(f)')
child.sendline('6*7')
child.expect('42')
# pexpect's sendeof() is confused by py.test capturing, though
# I think that it is a bug of sendeof()
old = sys.stdin
try:
sys.stdin = child
child.sendeof()
finally:
sys.stdin = old
child.expect('foobye')
def test_pythonstartup(self, monkeypatch):
monkeypatch.setenv('PYTHONPATH', None)
monkeypatch.setenv('PYTHONSTARTUP', crashing_demo_script)
child = self.spawn([])
child.expect(re.escape(banner))
child.expect('Traceback')
child.expect('NameError')
child.expect('>>> ')
child.sendline('[myvalue2]')
child.expect(re.escape('[11]'))
child.expect('>>> ')
child = self.spawn(['-i', demo_script])
for line in ['hello', 'goodbye', '>>> ']:
idx = child.expect([line, 'Hello2'])
assert idx == 0 # no PYTHONSTARTUP run here
child.sendline('myvalue2')
child.expect('Traceback')
child.expect('NameError')
def test_pythonstartup_file1(self, monkeypatch):
monkeypatch.setenv('PYTHONPATH', None)
monkeypatch.setenv('PYTHONSTARTUP', demo_script)
child = self.spawn([])
child.expect('File: [^\n]+\.py')
child.expect('goodbye')
child.expect('>>> ')
child.sendline('[myvalue]')
child.expect(re.escape('[42]'))
child.expect('>>> ')
child.sendline('__file__')
child.expect('Traceback')
child.expect('NameError')
def test_pythonstartup_file2(self, monkeypatch):
monkeypatch.setenv('PYTHONPATH', None)
monkeypatch.setenv('PYTHONSTARTUP', crashing_demo_script)
child = self.spawn([])
child.expect('Traceback')
child.expect('>>> ')
child.sendline('__file__')
child.expect('Traceback')
child.expect('NameError')
def test_ignore_python_startup(self):
old = os.environ.get('PYTHONSTARTUP', '')
try:
os.environ['PYTHONSTARTUP'] = crashing_demo_script
child = self.spawn(['-E'])
child.expect(re.escape(banner))
index = child.expect(['Traceback', '>>> '])
assert index == 1 # no traceback
finally:
os.environ['PYTHONSTARTUP'] = old
def test_ignore_python_inspect(self):
os.environ['PYTHONINSPECT_'] = '1'
try:
child = self.spawn(['-E', '-c', 'pass'])
from pexpect import EOF
index = child.expect(['>>> ', EOF])
assert index == 1 # no prompt
finally:
del os.environ['PYTHONINSPECT_']
def test_python_path_keeps_duplicates(self):
old = os.environ.get('PYTHONPATH', '')
try:
os.environ['PYTHONPATH'] = 'foobarbaz:foobarbaz'
child = self.spawn(['-c', 'import sys; print sys.path'])
child.expect(r"\['', 'foobarbaz', 'foobarbaz', ")
finally:
os.environ['PYTHONPATH'] = old
def test_ignore_python_path(self):
old = os.environ.get('PYTHONPATH', '')
try:
os.environ['PYTHONPATH'] = 'foobarbaz'
child = self.spawn(['-E', '-c', 'import sys; print sys.path'])
from pexpect import EOF
index = child.expect(['foobarbaz', EOF])
assert index == 1 # no foobarbaz
finally:
os.environ['PYTHONPATH'] = old
def test_unbuffered(self):
line = 'import os,sys;sys.stdout.write(str(789));os.read(0,1)'
child = self.spawn(['-u', '-c', line])
child.expect('789') # expect to see it before the timeout hits
child.sendline('X')
def test_options_i_m(self, monkeypatch):
if sys.platform == "win32":
skip("close_fds is not supported on Windows platforms")
if not hasattr(runpy, '_run_module_as_main'):
skip("requires CPython >= 2.6")
p = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'mymodule.py')
p = os.path.abspath(p)
monkeypatch.chdir(os.path.dirname(app_main))
child = self.spawn(['-i',
'-m', 'test.mymodule',
'extra'])
child.expect('mymodule running')
child.expect('Name: __main__')
child.expect(re.escape('File: ' + p))
child.expect(re.escape('Argv: ' + repr([p, 'extra'])))
child.expect('>>> ')
child.sendline('somevalue')
child.expect(re.escape(repr("foobar")))
child.expect('>>> ')
child.sendline('import sys')
child.sendline('"test" in sys.modules')
child.expect('True')
child.sendline('"test.mymodule" in sys.modules')
child.expect('False')
child.sendline('sys.path[0]')
child.expect("''")
def test_option_i_noexit(self):
child = self.spawn(['-i', '-c', 'import sys; sys.exit(1)'])
child.expect('Traceback')
child.expect('SystemExit: 1')
def test_options_u_i(self):
if sys.platform == "win32":
skip("close_fds is not supported on Windows platforms")
import subprocess, select, os
python = sys.executable
pipe = subprocess.Popen([python, app_main, "-u", "-i"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=0, close_fds=True)
iwtd, owtd, ewtd = select.select([pipe.stdout], [], [], 5)
assert iwtd # else we timed out
data = os.read(pipe.stdout.fileno(), 1024)
assert data.startswith('Python')
def test_paste_several_lines_doesnt_mess_prompt(self):
py.test.skip("this can only work if readline is enabled")
child = self.spawn([])
child.expect('>>> ')
child.sendline('if 1:\n print 42\n')
child.expect('... print 42')
child.expect('... ')
child.expect('42')
child.expect('>>> ')
def test_pythoninspect(self):
os.environ['PYTHONINSPECT_'] = '1'
try:
path = getscript("""
print 6*7
""")
child = self.spawn([path])
child.expect('42')
child.expect('>>> ')
finally:
del os.environ['PYTHONINSPECT_']
def test_set_pythoninspect(self):
path = getscript("""
import os
os.environ['PYTHONINSPECT'] = '1'
print 6*7
""")
child = self.spawn([path])
child.expect('42')
child.expect('>>> ')
def test_clear_pythoninspect(self):
os.environ['PYTHONINSPECT_'] = '1'
try:
path = getscript("""
import os
del os.environ['PYTHONINSPECT']
""")
child = self.spawn([path])
child.expect('>>> ')
finally:
del os.environ['PYTHONINSPECT_']
def test_stdout_flushes_before_stdin_blocks(self):
# This doesn't really test app_main.py, but a behavior that
# can only be checked on top of py.py with pexpect.
path = getscript("""
import sys
sys.stdout.write('Are you suggesting coconuts migrate? ')
line = sys.stdin.readline()
assert line.rstrip() == 'Not at all. They could be carried.'
print 'A five ounce bird could not carry a one pound coconut.'
""")
py_py = os.path.join(pypydir, 'bin', 'pyinteractive.py')
child = self._spawn(sys.executable, [py_py, '-S', path])
child.expect('Are you suggesting coconuts migrate?', timeout=120)
child.sendline('Not at all. They could be carried.')
child.expect('A five ounce bird could not carry a one pound coconut.')
def test_no_space_before_argument(self, monkeypatch):
if not hasattr(runpy, '_run_module_as_main'):
skip("requires CPython >= 2.6")
child = self.spawn(['-cprint "hel" + "lo"'])
child.expect('hello')
monkeypatch.chdir(os.path.dirname(app_main))
child = self.spawn(['-mtest.mymodule'])
child.expect('mymodule running')
def test_ps1_only_if_interactive(self):
argv = ['-c', 'import sys; print hasattr(sys, "ps1")']
child = self.spawn(argv)
child.expect('False')
class TestNonInteractive:
def run_with_status_code(self, cmdline, senddata='', expect_prompt=False,
expect_banner=False, python_flags='', env=None):
if os.name == 'nt':
try:
import __pypy__
except:
py.test.skip('app_main cannot run on non-pypy for windows')
cmdline = '%s %s "%s" %s' % (sys.executable, python_flags,
app_main, cmdline)
print 'POPEN:', cmdline
process = subprocess.Popen(
cmdline,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True, env=env,
universal_newlines=True
)
child_in, child_out_err = process.stdin, process.stdout
child_in.write(senddata)
child_in.close()
data = child_out_err.read()
child_out_err.close()
process.wait()
assert (banner in data) == expect_banner # no banner unless expected
assert ('>>> ' in data) == expect_prompt # no prompt unless expected
return data, process.returncode
def run(self, *args, **kwargs):
data, status = self.run_with_status_code(*args, **kwargs)
return data
def test_script_on_stdin(self):
for extraargs, expected_argv in [
('', ['']),
('-', ['-']),
('- hello world', ['-', 'hello', 'world']),
]:
data = self.run('%s < "%s"' % (extraargs, demo_script))
assert "hello" in data
assert "Name: __main__" in data
assert "File: <stdin>" in data
assert ("Exec: " + app_main) in data
assert ("Argv: " + repr(expected_argv)) in data
assert "goodbye" in data
def test_run_crashing_script(self):
data = self.run('"%s"' % (crashing_demo_script,))
assert 'Hello2' in data
assert 'NameError' in data
assert 'Goodbye2' not in data
def test_crashing_script_on_stdin(self):
data = self.run(' < "%s"' % (crashing_demo_script,))
assert 'Hello2' in data
assert 'NameError' in data
assert 'Goodbye2' not in data
def test_option_W(self):
data = self.run('-W d -c "print 42"')
assert '42' in data
data = self.run('-Wd -c "print 42"')
assert '42' in data
def test_option_W_crashing(self):
data = self.run('-W')
assert "Argument expected for the '-W' option" in data
def test_option_W_arg_ignored(self):
data = self.run('-Wc')
assert "Invalid -W option ignored: invalid action: 'c'" in data
def test_option_W_arg_ignored2(self):
data = self.run('-W-W')
assert "Invalid -W option ignored: invalid action:" in data
def test_option_c(self):
data = self.run('-c "print 6**5"')
assert '7776' in data
def test_no_pythonstartup(self, monkeypatch):
monkeypatch.setenv('PYTHONSTARTUP', crashing_demo_script)
data = self.run('"%s"' % (demo_script,))
assert 'Hello2' not in data
data = self.run('-c pass')
assert 'Hello2' not in data
def test_pythonwarnings(self, monkeypatch):
# PYTHONWARNINGS_ is special cased by app_main: we cannot directly set
# PYTHONWARNINGS because else the warnings raised from within pypy are
# turned in errors.
monkeypatch.setenv('PYTHONWARNINGS_', "once,error")
data = self.run('-W ignore -W default '
'-c "import sys; print sys.warnoptions"')
assert "['ignore', 'default', 'once', 'error']" in data
def test_option_m(self, monkeypatch):
if not hasattr(runpy, '_run_module_as_main'):
skip("requires CPython >= 2.6")
p = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'mymodule.py')
p = os.path.abspath(p)
monkeypatch.chdir(os.path.dirname(app_main))
data = self.run('-m test.mymodule extra')
assert 'mymodule running' in data
assert 'Name: __main__' in data
# ignoring case for windows. abspath behaves different from autopath
# concerning drive letters right now.
assert ('File: ' + p) in data
assert ('Argv: ' + repr([p, 'extra'])) in data
def test_pythoninspect_doesnt_override_isatty(self):
os.environ['PYTHONINSPECT_'] = '1'
try:
data = self.run('', senddata='6*7\nprint 2+3\n')
assert data == '5\n'
finally:
del os.environ['PYTHONINSPECT_']
def test_i_flag_overrides_isatty(self):
data = self.run('-i', senddata='6*7\nraise SystemExit\n',
expect_prompt=True, expect_banner=True)
assert '42\n' in data
# if a file name is passed, the banner is never printed but
# we get a prompt anyway
cmdline = '-i %s' % getscript("""
print 'hello world'
""")
data = self.run(cmdline, senddata='6*7\nraise SystemExit\n',
expect_prompt=True, expect_banner=False)
assert 'hello world\n' in data
assert '42\n' in data
def test_putenv_fires_interactive_within_process(self):
try:
import __pypy__
except ImportError:
py.test.skip("This can be only tested on PyPy with real_getenv")
# should be noninteractive when piped in
data = 'import os\nos.putenv("PYTHONINSPECT", "1")\n'
self.run('', senddata=data, expect_prompt=False)
# should go interactive with -c
data = data.replace('\n', ';')
self.run("-c '%s'" % data, expect_prompt=True)
def test_option_S_copyright(self):
data = self.run('-S -i', expect_prompt=True, expect_banner=True)
assert 'copyright' not in data
def test_non_interactive_stdout_fully_buffered(self):
if os.name == 'nt':
try:
import __pypy__
except:
py.test.skip('app_main cannot run on non-pypy for windows')
path = getscript(r"""
import sys, time
sys.stdout.write('\x00(STDOUT)\n\x00') # stays in buffers
time.sleep(1)
sys.stderr.write('\x00[STDERR]\n\x00')
time.sleep(1)
# stdout flushed automatically here
""")
cmdline = '%s -u "%s" %s' % (sys.executable, app_main, path)
print 'POPEN:', cmdline
child_in, child_out_err = os.popen4(cmdline)
data = child_out_err.read(11)
assert data == '\x00[STDERR]\n\x00' # from stderr
child_in.close()
data = child_out_err.read(11)
assert data == '\x00(STDOUT)\n\x00' # from stdout
child_out_err.close()
def test_non_interactive_stdout_unbuffered(self, monkeypatch):
monkeypatch.setenv('PYTHONUNBUFFERED', '1')
if os.name == 'nt':
try:
import __pypy__
except:
py.test.skip('app_main cannot run on non-pypy for windows')
path = getscript(r"""
import sys, time
sys.stdout.write('\x00(STDOUT)\n\x00')
time.sleep(1)
sys.stderr.write('\x00[STDERR]\n\x00')
time.sleep(1)
# stdout flushed automatically here
""")
cmdline = '%s -E "%s" %s' % (sys.executable, app_main, path)
print 'POPEN:', cmdline
child_in, child_out_err = os.popen4(cmdline)
data = child_out_err.read(11)
assert data == '\x00(STDOUT)\n\x00' # from stderr
data = child_out_err.read(11)
assert data == '\x00[STDERR]\n\x00' # from stdout
child_out_err.close()
child_in.close()
def test_proper_sys_path(self, tmpdir):
data = self.run('-c "import _ctypes"', python_flags='-S')
if data.startswith('Traceback'):
py.test.skip("'python -S' cannot import extension modules: "
"see probably http://bugs.python.org/issue586680")
@contextmanager
def chdir_and_unset_pythonpath(new_cwd):
old_cwd = new_cwd.chdir()
old_pythonpath = os.getenv('PYTHONPATH')
os.unsetenv('PYTHONPATH')
try:
yield
finally:
old_cwd.chdir()
# Can't call putenv with a None argument.
if old_pythonpath is not None:
os.putenv('PYTHONPATH', old_pythonpath)
tmpdir.join('site.py').write('print "SHOULD NOT RUN"')
runme_py = tmpdir.join('runme.py')
runme_py.write('print "some text"')
cmdline = str(runme_py)
with chdir_and_unset_pythonpath(tmpdir):
data = self.run(cmdline, python_flags='-S')
assert data == "some text\n"
runme2_py = tmpdir.mkdir('otherpath').join('runme2.py')
runme2_py.write('print "some new text"\n'
'import sys\n'
'print sys.path\n')
cmdline2 = str(runme2_py)
with chdir_and_unset_pythonpath(tmpdir):
data = self.run(cmdline2, python_flags='-S')
assert data.startswith("some new text\n")
assert repr(str(tmpdir.join('otherpath'))) in data
assert "''" not in data
data = self.run('-c "import sys; print sys.path"')
assert data.startswith("[''")
def test_pyc_commandline_argument(self):
p = getscript_pyc(self.space, "print 6*7\n")
assert os.path.isfile(p) and p.endswith('.pyc')
data = self.run(p)
assert data == 'in _run_compiled_module\n'
def test_main_in_dir_commandline_argument(self):
if not hasattr(runpy, '_run_module_as_main'):
skip("requires CPython >= 2.6")
p = getscript_in_dir('import sys; print sys.argv[0]\n')
data = self.run(p)
assert data == p + '\n'
data = self.run(p + os.sep)
assert data == p + os.sep + '\n'
def test_getfilesystemencoding(self):
py.test.skip("encoding is only set if stdout.isatty(), test is flawed")
if sys.version_info < (2, 7):
skip("test requires Python >= 2.7")
p = getscript_in_dir("""
import sys
sys.stdout.write(u'15\u20ac')
sys.stdout.flush()
""")
env = os.environ.copy()
env["LC_CTYPE"] = 'en_US.UTF-8'
data = self.run(p, env=env)
assert data == '15\xe2\x82\xac'
def test_pythonioencoding(self):
if sys.version_info < (2, 7):
skip("test requires Python >= 2.7")
for encoding, expected in [
("iso-8859-15", "15\xa4"),
("utf-8", '15\xe2\x82\xac'),
("utf-16-le", '1\x005\x00\xac\x20'),
("iso-8859-1:ignore", "15"),
("iso-8859-1:replace", "15?"),
("iso-8859-1:backslashreplace", "15\\u20ac"),
]:
p = getscript_in_dir("""
import sys
sys.stdout.write(u'15\u20ac')
sys.stdout.flush()
""")
env = os.environ.copy()
env["PYTHONIOENCODING"] = encoding
data = self.run(p, env=env)
assert data == expected
def test_sys_exit_pythonioencoding(self):
if sys.version_info < (2, 7):
skip("test required Python >= 2.7")
p = getscript_in_dir("""
import sys
sys.exit(u'15\u20ac')
""")
env = os.environ.copy()
env["PYTHONIOENCODING"] = "utf-8"
data, status = self.run_with_status_code(p, env=env)
assert status == 1
assert data.startswith("15\xe2\x82\xac")
class TestAppMain:
def test_print_info(self):
from pypy.interpreter import app_main
import sys, cStringIO
prev_so = sys.stdout
prev_ti = getattr(sys, 'pypy_translation_info', 'missing')
sys.pypy_translation_info = {
'translation.foo': True,
'translation.bar': 42,
'translation.egg.something': None,
'objspace.x': 'hello',
}
try:
sys.stdout = f = cStringIO.StringIO()
py.test.raises(SystemExit, app_main.print_info)
finally:
sys.stdout = prev_so
if prev_ti == 'missing':
del sys.pypy_translation_info
else:
sys.pypy_translation_info = prev_ti
assert f.getvalue() == ("[objspace]\n"
" x = 'hello'\n"
"[translation]\n"
" bar = 42\n"
" [egg]\n"
" something = None\n"
" foo = True\n")
class AppTestAppMain:
def setup_class(self):
# ----------------------------------------
# setup code for test_setup_bootstrap_path
# ----------------------------------------
from pypy.module.sys.version import CPYTHON_VERSION, PYPY_VERSION
cpy_ver = '%d.%d' % CPYTHON_VERSION[:2]
from lib_pypy._pypy_interact import irc_header
goal_dir = os.path.dirname(app_main)
# build a directory hierarchy like which contains both bin/pypy-c and
# lib/pypy1.2/*
prefix = udir.join('pathtest').ensure(dir=1)
fake_exe = 'bin/pypy-c'
if sys.platform == 'win32':
fake_exe = 'pypy-c.exe'
fake_exe = prefix.join(fake_exe).ensure(file=1)
expected_path = [str(prefix.join(subdir).ensure(dir=1))
for subdir in ('lib_pypy',
'lib-python/%s' % cpy_ver)]
# an empty directory from where we can't find the stdlib
tmp_dir = str(udir.join('tmp').ensure(dir=1))
self.w_goal_dir = self.space.wrap(goal_dir)
self.w_fake_exe = self.space.wrap(str(fake_exe))
self.w_expected_path = self.space.wrap(expected_path)
self.w_trunkdir = self.space.wrap(os.path.dirname(pypydir))
self.w_is_release = self.space.wrap(PYPY_VERSION[3] == "final")
self.w_tmp_dir = self.space.wrap(tmp_dir)
foo_py = prefix.join('foo.py')
foo_py.write("pass")
self.w_foo_py = self.space.wrap(str(foo_py))
def test_setup_bootstrap_path(self):
# Check how sys.path is handled depending on if we can find a copy of
# the stdlib in setup_bootstrap_path.
import sys, os
old_sys_path = sys.path[:]
old_cwd = os.getcwd()
sys.path.append(self.goal_dir)
# make sure cwd does not contain a stdlib
if self.tmp_dir.startswith(self.trunkdir):
skip('TMPDIR is inside the PyPy source')
os.chdir(self.tmp_dir)
tmp_pypy_c = os.path.join(self.tmp_dir, 'pypy-c')
try:
import app_main
app_main.setup_bootstrap_path(tmp_pypy_c) # stdlib not found
assert sys.executable == ''
assert sys.path == old_sys_path + [self.goal_dir]
app_main.setup_bootstrap_path(self.fake_exe)
if not sys.platform == 'win32':
# an existing file is always 'executable' on windows
assert sys.executable == '' # not executable!
assert sys.path == old_sys_path + [self.goal_dir]
os.chmod(self.fake_exe, 0755)
app_main.setup_bootstrap_path(self.fake_exe)
assert sys.executable == self.fake_exe
assert self.goal_dir not in sys.path
newpath = sys.path[:]
if newpath[0].endswith('__extensions__'):
newpath = newpath[1:]
# we get at least 'expected_path', and maybe more (e.g.plat-linux2)
assert newpath[:len(self.expected_path)] == self.expected_path
finally:
sys.path[:] = old_sys_path
os.chdir(old_cwd)
def test_trunk_can_be_prefix(self):
import sys
import os
old_sys_path = sys.path[:]
sys.path.append(self.goal_dir)
try:
import app_main
pypy_c = os.path.join(self.trunkdir, 'pypy', 'goal', 'pypy-c')
app_main.setup_bootstrap_path(pypy_c)
newpath = sys.path[:]
# we get at least lib_pypy
# lib-python/X.Y.Z, and maybe more (e.g. plat-linux2)
assert len(newpath) >= 2
for p in newpath:
assert p.startswith(self.trunkdir)
finally:
sys.path[:] = old_sys_path
def test_entry_point(self):
import sys
import os
old_sys_path = sys.path[:]
sys.path.append(self.goal_dir)
try:
import app_main
pypy_c = os.path.join(self.trunkdir, 'pypy', 'goal', 'pypy-c')
app_main.entry_point(pypy_c, [self.foo_py])
# assert it did not crash
finally:
sys.path[:] = old_sys_path
| []
| []
| [
"PYTHONINSPECT",
"PYTHONSTARTUP",
"PYTHONPATH",
"PYTHONINSPECT_"
]
| [] | ["PYTHONINSPECT", "PYTHONSTARTUP", "PYTHONPATH", "PYTHONINSPECT_"] | python | 4 | 0 | |
google/sdk.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/JeroenVanSteijn/oauth2"
)
type sdkCredentials struct {
Data []struct {
Credential struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
TokenExpiry *time.Time `json:"token_expiry"`
} `json:"credential"`
Key struct {
Account string `json:"account"`
Scope string `json:"scope"`
} `json:"key"`
}
}
// An SDKConfig provides access to tokens from an account already
// authorized via the Google Cloud SDK.
type SDKConfig struct {
conf oauth2.Config
initialToken *oauth2.Token
}
// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
// account. If account is empty, the account currently active in
// Google Cloud SDK properties is used.
// Google Cloud SDK credentials must be created by running `gcloud auth`
// before using this function.
// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
func NewSDKConfig(account string) (*SDKConfig, error) {
configPath, err := sdkConfigPath()
if err != nil {
return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
}
credentialsPath := filepath.Join(configPath, "credentials")
f, err := os.Open(credentialsPath)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
}
defer f.Close()
var c sdkCredentials
if err := json.NewDecoder(f).Decode(&c); err != nil {
return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
}
if len(c.Data) == 0 {
return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
}
if account == "" {
propertiesPath := filepath.Join(configPath, "properties")
f, err := os.Open(propertiesPath)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
}
defer f.Close()
ini, err := parseINI(f)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
}
core, ok := ini["core"]
if !ok {
return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
}
active, ok := core["account"]
if !ok {
return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
}
account = active
}
for _, d := range c.Data {
if account == "" || d.Key.Account == account {
if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
}
var expiry time.Time
if d.Credential.TokenExpiry != nil {
expiry = *d.Credential.TokenExpiry
}
return &SDKConfig{
conf: oauth2.Config{
ClientID: d.Credential.ClientID,
ClientSecret: d.Credential.ClientSecret,
Scopes: strings.Split(d.Key.Scope, " "),
Endpoint: Endpoint,
RedirectURL: "oob",
},
initialToken: &oauth2.Token{
AccessToken: d.Credential.AccessToken,
RefreshToken: d.Credential.RefreshToken,
Expiry: expiry,
},
}, nil
}
}
return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
}
// Client returns an HTTP client using Google Cloud SDK credentials to
// authorize requests. The token will auto-refresh as necessary. The
// underlying http.RoundTripper will be obtained using the provided
// context. The returned client and its Transport should not be
// modified.
func (c *SDKConfig) Client(ctx context.Context) *http.Client {
return &http.Client{
Transport: &oauth2.Transport{
Source: c.TokenSource(ctx),
},
}
}
// TokenSource returns an oauth2.TokenSource that retrieve tokens from
// Google Cloud SDK credentials using the provided context.
// It will returns the current access token stored in the credentials,
// and refresh it when it expires, but it won't update the credentials
// with the new access token.
func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
return c.conf.TokenSource(ctx, c.initialToken)
}
// Scopes are the OAuth 2.0 scopes the current account is authorized for.
func (c *SDKConfig) Scopes() []string {
return c.conf.Scopes
}
func parseINI(ini io.Reader) (map[string]map[string]string, error) {
result := map[string]map[string]string{
"": {}, // root section
}
scanner := bufio.NewScanner(ini)
currentSection := ""
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(line, ";") {
// comment.
continue
}
if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
currentSection = strings.TrimSpace(line[1 : len(line)-1])
result[currentSection] = map[string]string{}
continue
}
parts := strings.SplitN(line, "=", 2)
if len(parts) == 2 && parts[0] != "" {
result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error scanning ini: %v", err)
}
return result, nil
}
// sdkConfigPath tries to guess where the gcloud config is located.
// It can be overridden during tests.
var sdkConfigPath = func() (string, error) {
if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
}
homeDir := guessUnixHomeDir()
if homeDir == "" {
return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
}
return filepath.Join(homeDir, ".config", "gcloud"), nil
}
func guessUnixHomeDir() string {
// Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470
if v := os.Getenv("HOME"); v != "" {
return v
}
// Else, fall back to user.Current:
if u, err := user.Current(); err == nil {
return u.HomeDir
}
return ""
}
| [
"\"APPDATA\"",
"\"HOME\""
]
| []
| [
"APPDATA",
"HOME"
]
| [] | ["APPDATA", "HOME"] | go | 2 | 0 | |
stream_anywhere/wsgi.py | """
WSGI config for delfos project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'stream_anywhere.settings.develop')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
test/e2e/autoscaling/cluster_size_autoscaling.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"fmt"
"io/ioutil"
"math"
"net/http"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
v1 "k8s.io/api/core/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
schedulingv1 "k8s.io/api/scheduling/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
)
const (
defaultTimeout = 3 * time.Minute
resizeTimeout = 5 * time.Minute
manualResizeTimeout = 6 * time.Minute
scaleUpTimeout = 5 * time.Minute
scaleUpTriggerTimeout = 2 * time.Minute
scaleDownTimeout = 20 * time.Minute
podTimeout = 2 * time.Minute
nodesRecoverTimeout = 5 * time.Minute
rcCreationRetryTimeout = 4 * time.Minute
rcCreationRetryDelay = 20 * time.Second
makeSchedulableTimeout = 10 * time.Minute
makeSchedulableDelay = 20 * time.Second
freshStatusLimit = 20 * time.Second
gkeUpdateTimeout = 15 * time.Minute
gkeNodepoolNameKey = "cloud.google.com/gke-nodepool"
disabledTaint = "DisabledForAutoscalingTest"
criticalAddonsOnlyTaint = "CriticalAddonsOnly"
newNodesForScaledownTests = 2
unhealthyClusterThreshold = 4
caNoScaleUpStatus = "NoActivity"
caOngoingScaleUpStatus = "InProgress"
timestampFormat = "2006-01-02 15:04:05 -0700 MST"
expendablePriorityClassName = "expendable-priority"
highPriorityClassName = "high-priority"
gpuLabel = "cloud.google.com/gke-accelerator"
)
var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
f := framework.NewDefaultFramework("autoscaling")
var c clientset.Interface
var nodeCount int
var coreCount int64
var memAllocatableMb int
var originalSizes map[string]int
ginkgo.BeforeEach(func() {
c = f.ClientSet
e2eskipper.SkipUnlessProviderIs("gce", "gke")
originalSizes = make(map[string]int)
sum := 0
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
size, err := framework.GroupSize(mig)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Initial size of %s: %d", mig, size))
originalSizes[mig] = size
sum += size
}
// Give instances time to spin up
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout))
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
nodeCount = len(nodes.Items)
coreCount = 0
for _, node := range nodes.Items {
quantity := node.Status.Allocatable[v1.ResourceCPU]
coreCount += quantity.Value()
}
ginkgo.By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
framework.ExpectNotEqual(nodeCount, 0)
mem := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
memAllocatableMb = int((&mem).Value() / 1024 / 1024)
framework.ExpectEqual(nodeCount, sum)
if framework.ProviderIs("gke") {
val, err := isAutoscalerEnabled(5)
framework.ExpectNoError(err)
if !val {
err = enableAutoscaler("default-pool", 3, 5)
framework.ExpectNoError(err)
}
}
})
ginkgo.AfterEach(func() {
e2eskipper.SkipUnlessProviderIs("gce", "gke")
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
expectedNodes := 0
for _, size := range originalSizes {
expectedNodes += size
}
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout))
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
s := time.Now()
makeSchedulableLoop:
for start := time.Now(); time.Since(start) < makeSchedulableTimeout; time.Sleep(makeSchedulableDelay) {
for _, n := range nodes.Items {
err = makeNodeSchedulable(c, &n, true)
switch err.(type) {
case CriticalAddonsOnlyError:
continue makeSchedulableLoop
default:
framework.ExpectNoError(err)
}
}
break
}
klog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
})
ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.By("Creating unschedulable pod")
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ginkgo.By("Waiting for scale up hoping it won't happen")
// Verify that the appropriate event was generated
eventFound := false
EventsLoop:
for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
ginkgo.By("Waiting for NotTriggerScaleUp event")
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{})
framework.ExpectNoError(err)
for _, e := range events.Items {
if e.InvolvedObject.Kind == "Pod" && e.Reason == "NotTriggerScaleUp" && strings.Contains(e.Message, "it wouldn't fit if a new node is added") {
ginkgo.By("NotTriggerScaleUp event found")
eventFound = true
break EventsLoop
}
}
}
framework.ExpectEqual(eventFound, true)
// Verify that cluster size is not changed
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size <= nodeCount }, time.Second))
})
simpleScaleUpTest := func(unready int) {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout, unready))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
}
ginkgo.It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
func() { simpleScaleUpTest(0) })
gpuType := os.Getenv("TESTED_GPU_TYPE")
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
e2eskipper.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet(f.Namespace.Name)
ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0)
ginkgo.By("Schedule a pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1)
})
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
e2eskipper.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet(f.Namespace.Name)
ginkgo.By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
defer disableAutoscaler(gpuPoolName, 0, 2)
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1)
ginkgo.By("Scale GPU deployment")
e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 2)
})
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
e2eskipper.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet(f.Namespace.Name)
ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0)
ginkgo.By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
// Expect gpu pool to stay intact
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0)
})
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
e2eskipper.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet(f.Namespace.Name)
ginkgo.By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1)
ginkgo.By("Remove the only POD requiring GPU")
e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0)
})
ginkgo.It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
func() {
e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleUpTest(1) })
})
ginkgo.It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func() {
// Wait for the situation to stabilize - CA should be running and have up-to-date node readiness info.
status, err := waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
return s.ready == s.target && s.ready <= nodeCount
}, scaleUpTriggerTimeout)
framework.ExpectNoError(err)
unmanagedNodes := nodeCount - status.ready
ginkgo.By("Schedule more pods than can fit and wait for cluster to scale-up")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
return s.status == caOngoingScaleUpStatus
}, scaleUpTriggerTimeout)
framework.ExpectNoError(err)
target := status.target
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
ginkgo.By("Expect no more scale-up to be happening after all pods are scheduled")
// wait for a while until scale-up finishes; we cannot read CA status immediately
// after pods are scheduled as status config map is updated by CA once every loop iteration
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
return s.status == caNoScaleUpStatus
}, 2*freshStatusLimit)
framework.ExpectNoError(err)
if status.target != target {
klog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target)
}
framework.ExpectEqual(status.timestamp.Add(freshStatusLimit).Before(time.Now()), false)
framework.ExpectEqual(status.status, caNoScaleUpStatus)
framework.ExpectEqual(status.ready, status.target)
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
framework.ExpectEqual(len(nodes.Items), status.target+unmanagedNodes)
})
ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
e2eskipper.SkipUnlessProviderIs("gke")
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
// We wait for nodes to become schedulable to make sure the new nodes
// will be returned by getPoolNodes below.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
klog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
ginkgo.By("Getting memory available on new nodes, so we can account for it when creating RC")
nodes := getPoolNodes(f, extraPoolName)
framework.ExpectEqual(len(nodes), extraNodes)
extraMemMb := 0
for _, node := range nodes {
mem := node.Status.Allocatable[v1.ResourceMemory]
extraMemMb += int((&mem).Value() / 1024 / 1024)
}
ginkgo.By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout)
// Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+extraNodes+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
e2eskipper.SkipUnlessProviderIs("gke")
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
})
ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
ginkgo.It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]", func() {
pods := nodeCount
newPods := 2
labels := map[string]string{
"anti-affinity": "yes",
}
ginkgo.By("starting a pod with anti-affinity on each node")
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
ginkgo.By("scheduling extra pods with anti-affinity to existing ones")
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
ginkgo.It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.By("creating pods")
pods := nodeCount
newPods := 1
labels := map[string]string{
"anti-affinity": "yes",
}
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
ginkgo.By("waiting for all pods before triggering scale up")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
ginkgo.By("creating a pod requesting EmptyDir")
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
e2eskipper.SkipUnlessProviderIs("gce", "gke")
volumeLabels := labels.Set{
e2epv.VolumeSelectorKey: f.Namespace.Name,
}
selector := metav1.SetAsLabelSelector(volumeLabels)
ginkgo.By("creating volume & pvc")
diskName, err := e2epv.CreatePDWithRetry()
framework.ExpectNoError(err)
pvConfig := e2epv.PersistentVolumeConfig{
NamePrefix: "gce-",
Labels: volumeLabels,
PVSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
FSType: "ext3",
ReadOnly: false,
},
},
Prebind: nil,
}
emptyStorageClass := ""
pvcConfig := e2epv.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Namespace.Name, pv, pvc))
defer func() {
errs := e2epv.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)
if len(errs) > 0 {
framework.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pv, pvc = nil, nil
if diskName != "" {
framework.ExpectNoError(e2epv.DeletePDWithRetry(diskName))
}
}()
ginkgo.By("creating pods")
pods := nodeCount
labels := map[string]string{
"anti-affinity": "yes",
}
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer func() {
e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
klog.Infof("RC and pods not using volume deleted")
}()
ginkgo.By("waiting for all pods before triggering scale up")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
ginkgo.By("creating a pod requesting PVC")
pvcPodName := "pvc-pod"
newPods := 1
volumes := buildVolumes(pv, pvc)
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes))
defer func() {
e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, pvcPodName)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
}()
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
ginkgo.It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
labelKey := "cluster-autoscaling-test.special-node"
labelValue := "true"
ginkgo.By("Finding the smallest MIG")
minMig := ""
minSize := nodeCount
for mig, size := range originalSizes {
if size <= minSize {
minMig = mig
minSize = size
}
}
if minSize == 0 {
newSizes := make(map[string]int)
for mig, size := range originalSizes {
newSizes[mig] = size
}
newSizes[minMig] = 1
setMigSizes(newSizes)
}
removeLabels := func(nodesToClean sets.String) {
ginkgo.By("Removing labels from nodes")
for node := range nodesToClean {
framework.RemoveLabelOffNode(c, node, labelKey)
}
}
nodes, err := framework.GetGroupNodes(minMig)
framework.ExpectNoError(err)
nodesSet := sets.NewString(nodes...)
defer removeLabels(nodesSet)
ginkgo.By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
for node := range nodesSet {
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
}
err = scheduling.CreateNodeSelectorPods(f, "node-selector", minSize+1, map[string]string{labelKey: labelValue}, false)
framework.ExpectNoError(err)
ginkgo.By("Waiting for new node to appear and annotating it")
framework.WaitForGroupSize(minMig, int32(minSize+1))
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
newNodes, err := framework.GetGroupNodes(minMig)
framework.ExpectNoError(err)
newNodesSet := sets.NewString(newNodes...)
newNodesSet.Delete(nodes...)
if len(newNodesSet) > 1 {
ginkgo.By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
klog.Infof("Usually only 1 new node is expected, investigating")
klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json"))
if output, err := exec.Command("gcloud", "compute", "instances", "list",
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
klog.Infof("Gcloud compute instances list: %s", output)
} else {
klog.Errorf("Failed to get instances list: %v", err)
}
for newNode := range newNodesSet {
if output, err := execCmd("gcloud", "compute", "instances", "describe",
newNode,
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
klog.Infof("Gcloud compute instances describe: %s", output)
} else {
klog.Errorf("Failed to get instances describe: %v", err)
}
}
// TODO: possibly remove broken node from newNodesSet to prevent removeLabel from crashing.
// However at this moment we DO WANT it to crash so that we don't check all test runs for the
// rare behavior, but only the broken ones.
}
ginkgo.By(fmt.Sprintf("New nodes: %v\n", newNodesSet))
registeredNodes := sets.NewString()
for nodeName := range newNodesSet {
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err == nil && node != nil {
registeredNodes.Insert(nodeName)
} else {
klog.Errorf("Failed to get node %v: %v", nodeName, err)
}
}
ginkgo.By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
for node := range registeredNodes {
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
}
defer removeLabels(registeredNodes)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "node-selector"))
})
ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
e2eskipper.SkipUnlessProviderIs("gke")
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
defer disableAutoscaler(extraPoolName, 1, 2)
extraPods := extraNodes + 1
totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb))
ginkgo.By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout)
// Apparently GKE master is restarted couple minutes after the node pool is added
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
// this issue.
// TODO: Remove the extra time when GKE restart is fixed.
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes+1, scaleUpTimeout+5*time.Minute))
})
simpleScaleDownTest := func(unready int) {
cleanup, err := addKubeSystemPdbs(f)
defer cleanup()
framework.ExpectNoError(err)
ginkgo.By("Manually increase cluster size")
increasedSize := 0
newSizes := make(map[string]int)
for key, val := range originalSizes {
newSizes[key] = val + 2 + unready
increasedSize += val + 2 + unready
}
setMigSizes(newSizes)
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
func(size int) bool { return size >= increasedSize }, manualResizeTimeout, unready))
ginkgo.By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout, unready))
}
ginkgo.It("should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]",
func() { simpleScaleDownTest(0) })
ginkgo.It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]",
func() {
e2eskipper.SkipUnlessSSHKeyPresent()
e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleDownTest(1) })
})
ginkgo.It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() {
e2eskipper.SkipUnlessProviderIs("gke")
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-1", 3)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= increasedSize+extraNodes }, scaleUpTimeout))
ginkgo.By("Some node should be removed")
// Apparently GKE master is restarted couple minutes after the node pool is added
// reseting all the timers in scale down code. Adding 10 extra minutes to workaround
// this issue.
// TODO: Remove the extra time when GKE restart is fixed.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize+extraNodes }, scaleDownTimeout+10*time.Minute))
})
ginkgo.It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func() {
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 1, func(increasedSize int) {
ginkgo.By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
})
})
ginkgo.It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func() {
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) {
ginkgo.By("No nodes should be removed")
time.Sleep(scaleDownTimeout)
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
framework.ExpectEqual(len(nodes.Items), increasedSize)
})
})
ginkgo.It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
runDrainTest(f, originalSizes, f.Namespace.Name, 2, 1, func(increasedSize int) {
ginkgo.By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
})
})
ginkgo.It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
runDrainTest(f, originalSizes, "kube-system", 2, 1, func(increasedSize int) {
ginkgo.By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
})
})
ginkgo.It("Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp]", func() {
// Provider-specific setup
if framework.ProviderIs("gke") {
// GKE-specific setup
ginkgo.By("Add a new node pool with 0 nodes and min size 0")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 0)
defer deleteNodePool(extraPoolName)
framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1))
defer disableAutoscaler(extraPoolName, 0, 1)
} else {
// on GCE, run only if there are already at least 2 node groups
e2eskipper.SkipUnlessAtLeast(len(originalSizes), 2, "At least 2 node groups are needed for scale-to-0 tests")
ginkgo.By("Manually scale smallest node group to 0")
minMig := ""
minSize := nodeCount
for mig, size := range originalSizes {
if size <= minSize {
minMig = mig
minSize = size
}
}
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(0)))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount-minSize, resizeTimeout))
}
ginkgo.By("Make remaining nodes unschedulable")
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
for _, node := range nodes.Items {
err = makeNodeUnschedulable(f.ClientSet, &node)
defer func(n v1.Node) {
makeNodeSchedulable(f.ClientSet, &n, false)
}(node)
framework.ExpectNoError(err)
}
ginkgo.By("Run a scale-up test")
ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= len(nodes.Items)+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
// Scale to 0 test is split into two functions (for GKE & GCE.)
// The reason for it is that scenario is exactly the same,
// but setup & verification use different APIs.
//
// Scenario:
// (GKE only) add an extra node pool with size 1 & enable autoscaling for it
// (GCE only) find the smallest MIG & resize it to 1
// manually drain the single node from this node pool/MIG
// wait for cluster size to decrease
// verify the targeted node pool/MIG is of size 0
gkeScaleToZero := func() {
// GKE-specific setup
ginkgo.By("Add a new node pool with size 1 and min size 0")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1))
defer disableAutoscaler(extraPoolName, 0, 1)
ngNodes := getPoolNodes(f, extraPoolName)
framework.ExpectEqual(len(ngNodes), extraNodes)
for _, node := range ngNodes {
ginkgo.By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
}
for _, node := range ngNodes {
drainNode(f, node)
}
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size <= nodeCount }, scaleDownTimeout))
// GKE-specific check
newSize := getPoolSize(f, extraPoolName)
framework.ExpectEqual(newSize, 0)
}
gceScaleToZero := func() {
// non-GKE only
ginkgo.By("Find smallest node group and manually scale it to a single node")
minMig := ""
minSize := nodeCount
for mig, size := range originalSizes {
if size <= minSize {
minMig = mig
minSize = size
}
}
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(1)))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount-minSize+1, resizeTimeout))
ngNodes, err := framework.GetGroupNodes(minMig)
framework.ExpectNoError(err)
framework.ExpectEqual(len(ngNodes) == 1, true)
node, err := f.ClientSet.CoreV1().Nodes().Get(ngNodes[0], metav1.GetOptions{})
ginkgo.By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
framework.ExpectNoError(err)
// this part is identical
drainNode(f, node)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < nodeCount-minSize+1 }, scaleDownTimeout))
// non-GKE only
newSize, err := framework.GroupSize(minMig)
framework.ExpectNoError(err)
framework.ExpectEqual(newSize, 0)
}
ginkgo.It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() {
if framework.ProviderIs("gke") { // In GKE, we can just add a node pool
gkeScaleToZero()
} else if len(originalSizes) >= 2 {
gceScaleToZero()
} else {
e2eskipper.Skipf("At least 2 node groups are needed for scale-to-0 tests")
}
})
ginkgo.It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() {
e2eskipper.SkipUnlessSSHKeyPresent()
clusterSize := nodeCount
for clusterSize < unhealthyClusterThreshold+1 {
clusterSize = manuallyIncreaseClusterSize(f, originalSizes)
}
// If new nodes are disconnected too soon, they'll be considered not started
// instead of unready, and cluster won't be considered unhealthy.
//
// More precisely, Cluster Autoscaler compares last transition time of
// several readiness conditions to node create time. If it's within
// 2 minutes, it'll assume node is just starting and not unhealthy.
//
// Nodes become ready in less than 1 minute after being created,
// so waiting extra 2 minutes before breaking them (which triggers
// readiness condition transition) should be sufficient, while
// making no assumptions about minimal node startup time.
time.Sleep(2 * time.Minute)
ginkgo.By("Block network connectivity to some nodes to simulate unhealthy cluster")
nodesToBreakCount := int(math.Ceil(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
framework.ExpectEqual(nodesToBreakCount <= len(nodes.Items), true)
nodesToBreak := nodes.Items[:nodesToBreakCount]
// TestUnderTemporaryNetworkFailure only removes connectivity to a single node,
// and accepts func() callback. This is expanding the loop to recursive call
// to avoid duplicating TestUnderTemporaryNetworkFailure
var testFunction func()
testFunction = func() {
if len(nodesToBreak) > 0 {
ntb := &nodesToBreak[0]
nodesToBreak = nodesToBreak[1:]
e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction)
} else {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
time.Sleep(scaleUpTimeout)
currentNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
framework.ExpectEqual(len(currentNodes.Items), len(nodes.Items)-nodesToBreakCount)
status, err := getClusterwideStatus(c)
framework.Logf("Clusterwide status: %v", status)
framework.ExpectNoError(err)
framework.ExpectEqual(status, "Unhealthy")
}
}
testFunction()
// Give nodes time to recover from network failure
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
})
ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
defer cleanupFunc()
ginkgo.By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
time.Sleep(scaleUpTimeout)
// Verify that cluster size is not changed
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, time.Second))
})
ginkgo.It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
defer cleanupFunc()
// Verify that cluster size is not changed
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size > nodeCount }, time.Second))
})
ginkgo.It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
defer createPriorityClasses(f)()
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
defer cleanupFunc1()
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node. Pods created here should preempt pods created above.
cleanupFunc2 := ReserveMemoryWithPriority(f, "memory-reservation2", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, highPriorityClassName)
defer cleanupFunc2()
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, time.Second))
})
ginkgo.It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, expendablePriorityClassName)
defer cleanupFunc()
ginkgo.By("Waiting for scale down")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
})
ginkgo.It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
defer cleanupFunc()
ginkgo.By(fmt.Sprintf("Waiting for scale down hoping it won't happen, sleep for %s", scaleDownTimeout.String()))
time.Sleep(scaleDownTimeout)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == increasedSize }, time.Second))
})
})
func installNvidiaDriversDaemonSet(namespace string) {
ginkgo.By("Add daemonset which installs nvidia drivers")
// the link differs from one in GKE documentation; discussed with @mindprince this one should be used
framework.RunKubectlOrDie(namespace, "apply", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml")
}
func execCmd(args ...string) *exec.Cmd {
klog.Infof("Executing: %s", strings.Join(args, " "))
return exec.Command(args[0], args[1:]...)
}
func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace string, podsPerNode, pdbSize int, verifyFunction func(int)) {
increasedSize := manuallyIncreaseClusterSize(f, migSizes)
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
numPods := len(nodes.Items) * podsPerNode
testID := string(uuid.NewUUID()) // So that we can label and find pods
labelMap := map[string]string{"test_id": testID}
framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0))
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, namespace, "reschedulable-pods")
ginkgo.By("Create a PodDisruptionBudget")
minAvailable := intstr.FromInt(numPods - pdbSize)
pdb := &policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: "test_pdb",
Namespace: namespace,
},
Spec: policyv1beta1.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{MatchLabels: labelMap},
MinAvailable: &minAvailable,
},
}
_, err = f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb)
defer func() {
f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Delete(pdb.Name, &metav1.DeleteOptions{})
}()
framework.ExpectNoError(err)
verifyFunction(increasedSize)
}
func getGkeAPIEndpoint() string {
gkeAPIEndpoint := os.Getenv("CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER")
if gkeAPIEndpoint == "" {
gkeAPIEndpoint = "https://test-container.sandbox.googleapis.com"
}
if strings.HasSuffix(gkeAPIEndpoint, "/") {
gkeAPIEndpoint = gkeAPIEndpoint[:len(gkeAPIEndpoint)-1]
}
return gkeAPIEndpoint
}
func getGKEURL(apiVersion string, suffix string) string {
out, err := execCmd("gcloud", "auth", "print-access-token").Output()
framework.ExpectNoError(err)
token := strings.Replace(string(out), "\n", "", -1)
return fmt.Sprintf("%s/%s/%s?access_token=%s",
getGkeAPIEndpoint(),
apiVersion,
suffix,
token)
}
func getGKEClusterURL(apiVersion string) string {
if isRegionalCluster() {
// TODO(bskiba): Use locations API for all clusters once it's graduated to v1.
return getGKEURL(apiVersion, fmt.Sprintf("projects/%s/locations/%s/clusters/%s",
framework.TestContext.CloudConfig.ProjectID,
framework.TestContext.CloudConfig.Region,
framework.TestContext.CloudConfig.Cluster))
}
return getGKEURL(apiVersion, fmt.Sprintf("projects/%s/zones/%s/clusters/%s",
framework.TestContext.CloudConfig.ProjectID,
framework.TestContext.CloudConfig.Zone,
framework.TestContext.CloudConfig.Cluster))
}
func getCluster(apiVersion string) (string, error) {
resp, err := http.Get(getGKEClusterURL(apiVersion))
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("error: %s %s", resp.Status, body)
}
return string(body), nil
}
func isAutoscalerEnabled(expectedMaxNodeCountInTargetPool int) (bool, error) {
apiVersion := "v1"
if isRegionalCluster() {
apiVersion = "v1beta1"
}
strBody, err := getCluster(apiVersion)
if err != nil {
return false, err
}
if strings.Contains(strBody, "\"maxNodeCount\": "+strconv.Itoa(expectedMaxNodeCountInTargetPool)) {
return true, nil
}
return false, nil
}
func getClusterLocation() string {
if isRegionalCluster() {
return "--region=" + framework.TestContext.CloudConfig.Region
}
return "--zone=" + framework.TestContext.CloudConfig.Zone
}
func getGcloudCommandFromTrack(commandTrack string, args []string) []string {
command := []string{"gcloud"}
if commandTrack == "beta" || commandTrack == "alpha" {
command = append(command, commandTrack)
}
command = append(command, args...)
command = append(command, getClusterLocation())
command = append(command, "--project="+framework.TestContext.CloudConfig.ProjectID)
return command
}
func getGcloudCommand(args []string) []string {
track := ""
if isRegionalCluster() {
track = "beta"
}
return getGcloudCommandFromTrack(track, args)
}
func isRegionalCluster() bool {
// TODO(bskiba): Use an appropriate indicator that the cluster is regional.
return framework.TestContext.CloudConfig.MultiZone
}
func enableAutoscaler(nodePool string, minCount, maxCount int) error {
klog.Infof("Using gcloud to enable autoscaling for pool %s", nodePool)
args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
"--enable-autoscaling",
"--min-nodes=" + strconv.Itoa(minCount),
"--max-nodes=" + strconv.Itoa(maxCount),
"--node-pool=" + nodePool}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
klog.Errorf("Failed config update result: %s", output)
return fmt.Errorf("Failed to enable autoscaling: %v", err)
}
klog.Infof("Config update result: %s", output)
var finalErr error
for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) {
val, err := isAutoscalerEnabled(maxCount)
if err == nil && val {
return nil
}
finalErr = err
}
return fmt.Errorf("autoscaler not enabled, last error: %v", finalErr)
}
func disableAutoscaler(nodePool string, minCount, maxCount int) error {
klog.Infof("Using gcloud to disable autoscaling for pool %s", nodePool)
args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
"--no-enable-autoscaling",
"--node-pool=" + nodePool}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
klog.Errorf("Failed config update result: %s", output)
return fmt.Errorf("Failed to disable autoscaling: %v", err)
}
klog.Infof("Config update result: %s", output)
var finalErr error
for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) {
val, err := isAutoscalerEnabled(maxCount)
if err == nil && !val {
return nil
}
finalErr = err
}
return fmt.Errorf("autoscaler still enabled, last error: %v", finalErr)
}
func addNodePool(name string, machineType string, numNodes int) {
args := []string{"container", "node-pools", "create", name, "--quiet",
"--machine-type=" + machineType,
"--num-nodes=" + strconv.Itoa(numNodes),
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
klog.Infof("Creating node-pool %s: %s", name, output)
framework.ExpectNoError(err, string(output))
}
func addGpuNodePool(name string, gpuType string, gpuCount int, numNodes int) {
args := []string{"beta", "container", "node-pools", "create", name, "--quiet",
"--accelerator", "type=" + gpuType + ",count=" + strconv.Itoa(gpuCount),
"--num-nodes=" + strconv.Itoa(numNodes),
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
klog.Infof("Creating node-pool %s: %s", name, output)
framework.ExpectNoError(err, string(output))
}
func deleteNodePool(name string) {
klog.Infof("Deleting node pool %s", name)
args := []string{"container", "node-pools", "delete", name, "--quiet",
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
err := wait.ExponentialBackoff(
wait.Backoff{Duration: 1 * time.Minute, Factor: float64(3), Steps: 3},
func() (bool, error) {
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
klog.Warningf("Error deleting nodegroup - error:%v, output: %s", err, output)
return false, nil
}
klog.Infof("Node-pool deletion output: %s", output)
return true, nil
})
framework.ExpectNoError(err)
}
func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node {
nodes := make([]*v1.Node, 0, 1)
nodeList, err := e2enode.GetReadyNodesIncludingTainted(f.ClientSet)
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
}
// TODO: write a wrapper for ExpectNoErrorWithOffset()
framework.ExpectNoErrorWithOffset(0, err)
for _, node := range nodeList.Items {
if node.Labels[gkeNodepoolNameKey] == poolName {
nodes = append(nodes, &node)
}
}
return nodes
}
// getPoolInitialSize returns the initial size of the node pool taking into
// account that it may span multiple zones. In that case, node pool consists of
// multiple migs all containing initialNodeCount nodes.
func getPoolInitialSize(poolName string) int {
// get initial node count
args := []string{"container", "node-pools", "describe", poolName, "--quiet",
"--cluster=" + framework.TestContext.CloudConfig.Cluster,
"--format=value(initialNodeCount)"}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
klog.Infof("Node-pool initial size: %s", output)
framework.ExpectNoError(err, string(output))
fields := strings.Fields(string(output))
framework.ExpectEqual(len(fields), 1)
size, err := strconv.ParseInt(fields[0], 10, 64)
framework.ExpectNoError(err)
// get number of node pools
args = []string{"container", "node-pools", "describe", poolName, "--quiet",
"--cluster=" + framework.TestContext.CloudConfig.Cluster,
"--format=value(instanceGroupUrls)"}
output, err = execCmd(getGcloudCommand(args)...).CombinedOutput()
framework.ExpectNoError(err, string(output))
nodeGroupCount := len(strings.Split(string(output), ";"))
return int(size) * nodeGroupCount
}
func getPoolSize(f *framework.Framework, poolName string) int {
size := 0
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
for _, node := range nodeList.Items {
if node.Labels[gkeNodepoolNameKey] == poolName {
size++
}
}
return size
}
func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration, priorityClassName string) func() error {
ginkgo.By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{
Client: f.ClientSet,
Name: id,
Namespace: f.Namespace.Name,
Timeout: timeout,
Image: imageutils.GetPauseImageName(),
Replicas: replicas,
MemRequest: request,
NodeSelector: selector,
Tolerations: tolerations,
PriorityClassName: priorityClassName,
}
for start := time.Now(); time.Since(start) < rcCreationRetryTimeout; time.Sleep(rcCreationRetryDelay) {
err := e2erc.RunRC(*config)
if err != nil && strings.Contains(err.Error(), "Error creating replication controller") {
klog.Warningf("Failed to create memory reservation: %v", err)
continue
}
if expectRunning {
framework.ExpectNoError(err)
}
return func() error {
return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
}
}
framework.Failf("Failed to reserve memory within timeout")
return nil
}
// ReserveMemoryWithPriority creates a replication controller with pods with priority that, in summation,
// request the specified amount of memory.
func ReserveMemoryWithPriority(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, priorityClassName string) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, priorityClassName)
}
// ReserveMemoryWithSelectorAndTolerations creates a replication controller with pods with node selector that, in summation,
// request the specified amount of memory.
func ReserveMemoryWithSelectorAndTolerations(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, tolerations, "")
}
// ReserveMemory creates a replication controller with pods that, in summation,
// request the specified amount of memory.
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, "")
}
// WaitForClusterSizeFunc waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
return WaitForClusterSizeFuncWithUnready(c, sizeFunc, timeout, 0)
}
// WaitForClusterSizeFuncWithUnready waits until the cluster size matches the given function and assumes some unready nodes.
func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration, expectedUnready int) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
klog.Warningf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
e2enode.Filter(nodes, func(node v1.Node) bool {
return e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == numReady+expectedUnready && sizeFunc(numNodes) {
klog.Infof("Cluster has reached the desired size")
return nil
}
klog.Infof("Waiting for cluster with func, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
}
func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface, tolerateUnreadyCount int) error {
var notready []string
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) {
pods, err := c.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to get pods: %v", err)
}
notready = make([]string, 0)
for _, pod := range pods.Items {
ready := false
for _, c := range pod.Status.Conditions {
if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
ready = true
}
}
// Failed pods in this context generally mean that they have been
// double scheduled onto a node, but then failed a constraint check.
if pod.Status.Phase == v1.PodFailed {
klog.Warningf("Pod has failed: %v", pod)
}
if !ready && pod.Status.Phase != v1.PodFailed {
notready = append(notready, pod.Name)
}
}
if len(notready) <= tolerateUnreadyCount {
klog.Infof("sufficient number of pods ready. Tolerating %d unready", tolerateUnreadyCount)
return nil
}
klog.Infof("Too many pods are not ready yet: %v", notready)
}
klog.Info("Timeout on waiting for pods being ready")
klog.Info(framework.RunKubectlOrDie(f.Namespace.Name, "get", "pods", "-o", "json", "--all-namespaces"))
klog.Info(framework.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json"))
// Some pods are still not running.
return fmt.Errorf("Too many pods are still not running: %v", notready)
}
func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error {
return waitForCaPodsReadyInNamespace(f, c, 0)
}
func getAnyNode(c clientset.Interface) *v1.Node {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
klog.Errorf("Failed to get node list: %v", err)
return nil
}
if len(nodes.Items) == 0 {
klog.Errorf("No nodes")
return nil
}
return &nodes.Items[0]
}
func setMigSizes(sizes map[string]int) bool {
madeChanges := false
for mig, desiredSize := range sizes {
currentSize, err := framework.GroupSize(mig)
framework.ExpectNoError(err)
if desiredSize != currentSize {
ginkgo.By(fmt.Sprintf("Setting size of %s to %d", mig, desiredSize))
err = framework.ResizeGroup(mig, int32(desiredSize))
framework.ExpectNoError(err)
madeChanges = true
}
}
return madeChanges
}
func drainNode(f *framework.Framework, node *v1.Node) {
ginkgo.By("Make the single node unschedulable")
makeNodeUnschedulable(f.ClientSet, node)
ginkgo.By("Manually drain the single node")
podOpts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
framework.ExpectNoError(err)
for _, pod := range pods.Items {
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}
}
func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error {
ginkgo.By(fmt.Sprintf("Taint node %s", node.Name))
for j := 0; j < 3; j++ {
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil {
return err
}
for _, taint := range freshNode.Spec.Taints {
if taint.Key == disabledTaint {
return nil
}
}
freshNode.Spec.Taints = append(freshNode.Spec.Taints, v1.Taint{
Key: disabledTaint,
Value: "DisabledForTest",
Effect: v1.TaintEffectNoSchedule,
})
_, err = c.CoreV1().Nodes().Update(freshNode)
if err == nil {
return nil
}
if !apierrors.IsConflict(err) {
return err
}
klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
}
return fmt.Errorf("Failed to taint node in allowed number of retries")
}
// CriticalAddonsOnlyError implements the `error` interface, and signifies the
// presence of the `CriticalAddonsOnly` taint on the node.
type CriticalAddonsOnlyError struct{}
func (CriticalAddonsOnlyError) Error() string {
return fmt.Sprintf("CriticalAddonsOnly taint found on node")
}
func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAddonsOnly bool) error {
ginkgo.By(fmt.Sprintf("Remove taint from node %s", node.Name))
for j := 0; j < 3; j++ {
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil {
return err
}
var newTaints []v1.Taint
for _, taint := range freshNode.Spec.Taints {
if failOnCriticalAddonsOnly && taint.Key == criticalAddonsOnlyTaint {
return CriticalAddonsOnlyError{}
}
if taint.Key != disabledTaint {
newTaints = append(newTaints, taint)
}
}
if len(newTaints) == len(freshNode.Spec.Taints) {
return nil
}
freshNode.Spec.Taints = newTaints
_, err = c.CoreV1().Nodes().Update(freshNode)
if err == nil {
return nil
}
if !apierrors.IsConflict(err) {
return err
}
klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
}
return fmt.Errorf("Failed to remove taint from node in allowed number of retries")
}
// ScheduleAnySingleGpuPod schedules a pod which requires single GPU of any type
func ScheduleAnySingleGpuPod(f *framework.Framework, id string) error {
return ScheduleGpuPod(f, id, "", 1)
}
// ScheduleGpuPod schedules a pod which requires a given number of gpus of given type
func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit int64) error {
config := &testutils.RCConfig{
Client: f.ClientSet,
Name: id,
Namespace: f.Namespace.Name,
Timeout: 3 * scaleUpTimeout, // spinning up GPU node is slow
Image: imageutils.GetPauseImageName(),
Replicas: 1,
GpuLimit: gpuLimit,
Labels: map[string]string{"requires-gpu": "yes"},
}
if gpuType != "" {
config.NodeSelector = map[string]string{gpuLabel: gpuType}
}
err := e2erc.RunRC(*config)
if err != nil {
return err
}
return nil
}
// Create an RC running a given number of pods with anti-affinity
func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string) error {
config := &testutils.RCConfig{
Affinity: buildAntiAffinity(antiAffinityLabels),
Client: f.ClientSet,
Name: id,
Namespace: namespace,
Timeout: scaleUpTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: pods,
Labels: podLabels,
}
err := e2erc.RunRC(*config)
if err != nil {
return err
}
_, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
return nil
}
func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string, volumes []v1.Volume) error {
config := &testutils.RCConfig{
Affinity: buildAntiAffinity(antiAffinityLabels),
Volumes: volumes,
Client: f.ClientSet,
Name: id,
Namespace: namespace,
Timeout: scaleUpTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: pods,
Labels: podLabels,
}
err := e2erc.RunRC(*config)
if err != nil {
return err
}
_, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
return nil
}
var emptyDirVolumes = []v1.Volume{
{
Name: "empty-volume",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
}
func buildVolumes(pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []v1.Volume {
return []v1.Volume{
{
Name: pv.Name,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: false,
},
},
},
}
}
func buildAntiAffinity(labels map[string]string) *v1.Affinity {
return &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: labels,
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
}
}
// Create an RC running a given number of pods on each node without adding any constraint forcing
// such pod distribution. This is meant to create a bunch of underutilized (but not unused) nodes
// with pods that can be rescheduled on different nodes.
// This is achieved using the following method:
// 1. disable scheduling on each node
// 2. create an empty RC
// 3. for each node:
// 3a. enable scheduling on that node
// 3b. increase number of replicas in RC by podsPerNode
func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) error {
ginkgo.By("Run a pod on each node")
for _, node := range nodes {
err := makeNodeUnschedulable(f.ClientSet, &node)
defer func(n v1.Node) {
makeNodeSchedulable(f.ClientSet, &n, false)
}(node)
if err != nil {
return err
}
}
config := &testutils.RCConfig{
Client: f.ClientSet,
Name: id,
Namespace: namespace,
Timeout: defaultTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: 0,
Labels: labels,
MemRequest: memRequest,
}
err := e2erc.RunRC(*config)
if err != nil {
return err
}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
for i, node := range nodes {
err = makeNodeSchedulable(f.ClientSet, &node, false)
if err != nil {
return err
}
// Update replicas count, to create new pods that will be allocated on node
// (we retry 409 errors in case rc reference got out of sync)
for j := 0; j < 3; j++ {
*rc.Spec.Replicas = int32((i + 1) * podsPerNode)
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(rc)
if err == nil {
break
}
if !apierrors.IsConflict(err) {
return err
}
klog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j)
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
}
err = wait.PollImmediate(5*time.Second, podTimeout, func() (bool, error) {
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil || rc.Status.ReadyReplicas < int32((i+1)*podsPerNode) {
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("failed to coerce RC into spawning a pod on node %s within timeout", node.Name)
}
err = makeNodeUnschedulable(f.ClientSet, &node)
if err != nil {
return err
}
}
return nil
}
// Increase cluster size by newNodesForScaledownTests to create some unused nodes
// that can be later removed by cluster autoscaler.
func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[string]int) int {
ginkgo.By("Manually increase cluster size")
increasedSize := 0
newSizes := make(map[string]int)
for key, val := range originalSizes {
newSizes[key] = val + newNodesForScaledownTests
increasedSize += val + newNodesForScaledownTests
}
setMigSizes(newSizes)
checkClusterSize := func(size int) bool {
if size >= increasedSize {
return true
}
resized := setMigSizes(newSizes)
if resized {
klog.Warning("Unexpected node group size while waiting for cluster resize. Setting size to target again.")
}
return false
}
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, checkClusterSize, manualResizeTimeout))
return increasedSize
}
// Try to get clusterwide health from CA status configmap.
// Status configmap is not parsing-friendly, so evil regexpery follows.
func getClusterwideStatus(c clientset.Interface) (string, error) {
configMap, err := c.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
if err != nil {
return "", err
}
status, ok := configMap.Data["status"]
if !ok {
return "", fmt.Errorf("Status information not found in configmap")
}
matcher, err := regexp.Compile("Cluster-wide:\\s*\n\\s*Health:\\s*([A-Za-z]+)")
if err != nil {
return "", err
}
result := matcher.FindStringSubmatch(status)
if len(result) < 2 {
return "", fmt.Errorf("Failed to parse CA status configmap, raw status: %v", status)
}
return result[1], nil
}
type scaleUpStatus struct {
status string
ready int
target int
timestamp time.Time
}
// Try to get timestamp from status.
// Status configmap is not parsing-friendly, so evil regexpery follows.
func getStatusTimestamp(status string) (time.Time, error) {
timestampMatcher, err := regexp.Compile("Cluster-autoscaler status at \\s*([0-9\\-]+ [0-9]+:[0-9]+:[0-9]+\\.[0-9]+ \\+[0-9]+ [A-Za-z]+)")
if err != nil {
return time.Time{}, err
}
timestampMatch := timestampMatcher.FindStringSubmatch(status)
if len(timestampMatch) < 2 {
return time.Time{}, fmt.Errorf("Failed to parse CA status timestamp, raw status: %v", status)
}
timestamp, err := time.Parse(timestampFormat, timestampMatch[1])
if err != nil {
return time.Time{}, err
}
return timestamp, nil
}
// Try to get scaleup statuses of all node groups.
// Status configmap is not parsing-friendly, so evil regexpery follows.
func getScaleUpStatus(c clientset.Interface) (*scaleUpStatus, error) {
configMap, err := c.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
if err != nil {
return nil, err
}
status, ok := configMap.Data["status"]
if !ok {
return nil, fmt.Errorf("Status information not found in configmap")
}
timestamp, err := getStatusTimestamp(status)
if err != nil {
return nil, err
}
matcher, err := regexp.Compile("s*ScaleUp:\\s*([A-Za-z]+)\\s*\\(ready=([0-9]+)\\s*cloudProviderTarget=([0-9]+)\\s*\\)")
if err != nil {
return nil, err
}
matches := matcher.FindAllStringSubmatch(status, -1)
if len(matches) < 1 {
return nil, fmt.Errorf("Failed to parse CA status configmap, raw status: %v", status)
}
result := scaleUpStatus{
status: caNoScaleUpStatus,
ready: 0,
target: 0,
timestamp: timestamp,
}
for _, match := range matches {
if match[1] == caOngoingScaleUpStatus {
result.status = caOngoingScaleUpStatus
}
newReady, err := strconv.Atoi(match[2])
if err != nil {
return nil, err
}
result.ready += newReady
newTarget, err := strconv.Atoi(match[3])
if err != nil {
return nil, err
}
result.target += newTarget
}
klog.Infof("Cluster-Autoscaler scale-up status: %v (%v, %v)", result.status, result.ready, result.target)
return &result, nil
}
func waitForScaleUpStatus(c clientset.Interface, cond func(s *scaleUpStatus) bool, timeout time.Duration) (*scaleUpStatus, error) {
var finalErr error
var status *scaleUpStatus
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
status, finalErr = getScaleUpStatus(c)
if finalErr != nil {
return false, nil
}
if status.timestamp.Add(freshStatusLimit).Before(time.Now()) {
// stale status
finalErr = fmt.Errorf("Status too old")
return false, nil
}
return cond(status), nil
})
if err != nil {
err = fmt.Errorf("Failed to find expected scale up status: %v, last status: %v, final err: %v", err, status, finalErr)
}
return status, err
}
// This is a temporary fix to allow CA to migrate some kube-system pods
// TODO: Remove this when the PDB is added for some of those components
func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
ginkgo.By("Create PodDisruptionBudgets for kube-system components, so they can be migrated if required")
var newPdbs []string
cleanup := func() {
var finalErr error
for _, newPdbName := range newPdbs {
ginkgo.By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName))
err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
if err != nil {
// log error, but attempt to remove other pdbs
klog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err)
finalErr = err
}
}
if finalErr != nil {
framework.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr)
}
}
type pdbInfo struct {
label string
minAvailable int
}
pdbsToAdd := []pdbInfo{
{label: "kube-dns", minAvailable: 1},
{label: "kube-dns-autoscaler", minAvailable: 0},
{label: "metrics-server", minAvailable: 0},
{label: "kubernetes-dashboard", minAvailable: 0},
{label: "glbc", minAvailable: 0},
}
for _, pdbData := range pdbsToAdd {
ginkgo.By(fmt.Sprintf("Create PodDisruptionBudget for %v", pdbData.label))
labelMap := map[string]string{"k8s-app": pdbData.label}
pdbName := fmt.Sprintf("test-pdb-for-%v", pdbData.label)
minAvailable := intstr.FromInt(pdbData.minAvailable)
pdb := &policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: pdbName,
Namespace: "kube-system",
},
Spec: policyv1beta1.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{MatchLabels: labelMap},
MinAvailable: &minAvailable,
},
}
_, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Create(pdb)
newPdbs = append(newPdbs, pdbName)
if err != nil {
return cleanup, err
}
}
return cleanup, nil
}
func createPriorityClasses(f *framework.Framework) func() {
priorityClasses := map[string]int32{
expendablePriorityClassName: -15,
highPriorityClassName: 1000,
}
for className, priority := range priorityClasses {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
if err != nil {
klog.Errorf("Error creating priority class: %v", err)
}
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
}
return func() {
for className := range priorityClasses {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(className, nil)
if err != nil {
klog.Errorf("Error deleting priority class: %v", err)
}
}
}
}
| [
"\"TESTED_GPU_TYPE\"",
"\"CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER\""
]
| []
| [
"CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER",
"TESTED_GPU_TYPE"
]
| [] | ["CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER", "TESTED_GPU_TYPE"] | go | 2 | 0 | |
backend/datasets/scrape.py | import os
import requests
import csv
import datetime
import time
import json
from tqdm import tqdm
from dotenv import load_dotenv
load_dotenv()
token = os.environ.get("COIN_CAP_API_KEY")
start_date = datetime.date(2018, 1, 1)
end_date = datetime.date(2021, 11, 1)
delta = datetime.timedelta(days=1)
range = end_date - start_date
count = range.days + 1
class BearerAuth(requests.auth.AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers["authorization"] = "Bearer " + self.token
return r
def make_req(start, end):
url = "http://api.coincap.io/v2/candles?exchange=binance&interval=d1&baseId=bitcoin"eId=tether&start={}&end={}".format(
start, end)
payload = {}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload, auth=BearerAuth(token))
return response
with open('data.csv', 'a') as csvfile:
fieldnames = ['open', 'high', 'low', 'close', 'volume', 'period']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
pbar = tqdm(total = count)
while start_date <= end_date:
start = int(time.mktime(start_date.timetuple()) * 1000)
start_date += delta
end = int(time.mktime(start_date.timetuple()) * 1000)
response = make_req(start, end)
data = json.loads(response.text)
if len(data['data']) > 0:
for i in data['data']:
writer.writerow(i)
pbar.update(1)
else:
pbar.update(1)
print("No data for this date")
continue
pbar.close() | []
| []
| [
"COIN_CAP_API_KEY"
]
| [] | ["COIN_CAP_API_KEY"] | python | 1 | 0 | |
nkdsu/wsgi.py | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nkdsu.settings")
from django.core.wsgi import get_wsgi_application # noqa
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
Lib/test/test_support.py | import errno
import importlib
import io
import os
import shutil
import socket
import stat
import subprocess
import sys
import tempfile
import textwrap
import time
import unittest
import warnings
from test import support
from test.support import import_helper
from test.support import os_helper
from test.support import script_helper
from test.support import socket_helper
from test.support import warnings_helper
TESTFN = os_helper.TESTFN
class TestSupport(unittest.TestCase):
@classmethod
def setUpClass(cls):
orig_filter_len = len(warnings.filters)
cls._warnings_helper_token = support.ignore_deprecations_from(
"test.support.warnings_helper", like=".*used in test_support.*"
)
cls._test_support_token = support.ignore_deprecations_from(
"test.test_support", like=".*You should NOT be seeing this.*"
)
assert len(warnings.filters) == orig_filter_len + 2
@classmethod
def tearDownClass(cls):
orig_filter_len = len(warnings.filters)
support.clear_ignored_deprecations(
cls._warnings_helper_token,
cls._test_support_token,
)
assert len(warnings.filters) == orig_filter_len - 2
def test_ignored_deprecations_are_silent(self):
"""Test support.ignore_deprecations_from() silences warnings"""
with warnings.catch_warnings(record=True) as warning_objs:
warnings_helper._warn_about_deprecation()
warnings.warn("You should NOT be seeing this.", DeprecationWarning)
messages = [str(w.message) for w in warning_objs]
self.assertEqual(len(messages), 0, messages)
def test_import_module(self):
import_helper.import_module("ftplib")
self.assertRaises(unittest.SkipTest,
import_helper.import_module, "foo")
def test_import_fresh_module(self):
import_helper.import_fresh_module("ftplib")
def test_get_attribute(self):
self.assertEqual(support.get_attribute(self, "test_get_attribute"),
self.test_get_attribute)
self.assertRaises(unittest.SkipTest, support.get_attribute, self, "foo")
@unittest.skip("failing buildbots")
def test_get_original_stdout(self):
self.assertEqual(support.get_original_stdout(), sys.stdout)
def test_unload(self):
import sched
self.assertIn("sched", sys.modules)
import_helper.unload("sched")
self.assertNotIn("sched", sys.modules)
def test_unlink(self):
with open(TESTFN, "w", encoding="utf-8") as f:
pass
os_helper.unlink(TESTFN)
self.assertFalse(os.path.exists(TESTFN))
os_helper.unlink(TESTFN)
def test_rmtree(self):
dirpath = os_helper.TESTFN + 'd'
subdirpath = os.path.join(dirpath, 'subdir')
os.mkdir(dirpath)
os.mkdir(subdirpath)
os_helper.rmtree(dirpath)
self.assertFalse(os.path.exists(dirpath))
with support.swap_attr(support, 'verbose', 0):
os_helper.rmtree(dirpath)
os.mkdir(dirpath)
os.mkdir(subdirpath)
os.chmod(dirpath, stat.S_IRUSR|stat.S_IXUSR)
with support.swap_attr(support, 'verbose', 0):
os_helper.rmtree(dirpath)
self.assertFalse(os.path.exists(dirpath))
os.mkdir(dirpath)
os.mkdir(subdirpath)
os.chmod(dirpath, 0)
with support.swap_attr(support, 'verbose', 0):
os_helper.rmtree(dirpath)
self.assertFalse(os.path.exists(dirpath))
def test_forget(self):
mod_filename = TESTFN + '.py'
with open(mod_filename, 'w', encoding="utf-8") as f:
print('foo = 1', file=f)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
import_helper.forget(TESTFN)
self.assertNotIn(TESTFN, sys.modules)
finally:
del sys.path[0]
os_helper.unlink(mod_filename)
os_helper.rmtree('__pycache__')
def test_HOST(self):
s = socket.create_server((socket_helper.HOST, 0))
s.close()
def test_find_unused_port(self):
port = socket_helper.find_unused_port()
s = socket.create_server((socket_helper.HOST, port))
s.close()
def test_bind_port(self):
s = socket.socket()
socket_helper.bind_port(s)
s.listen()
s.close()
# Tests for temp_dir()
def test_temp_dir(self):
"""Test that temp_dir() creates and destroys its directory."""
parent_dir = tempfile.mkdtemp()
parent_dir = os.path.realpath(parent_dir)
try:
path = os.path.join(parent_dir, 'temp')
self.assertFalse(os.path.isdir(path))
with os_helper.temp_dir(path) as temp_path:
self.assertEqual(temp_path, path)
self.assertTrue(os.path.isdir(path))
self.assertFalse(os.path.isdir(path))
finally:
os_helper.rmtree(parent_dir)
def test_temp_dir__path_none(self):
"""Test passing no path."""
with os_helper.temp_dir() as temp_path:
self.assertTrue(os.path.isdir(temp_path))
self.assertFalse(os.path.isdir(temp_path))
def test_temp_dir__existing_dir__quiet_default(self):
"""Test passing a directory that already exists."""
def call_temp_dir(path):
with os_helper.temp_dir(path) as temp_path:
raise Exception("should not get here")
path = tempfile.mkdtemp()
path = os.path.realpath(path)
try:
self.assertTrue(os.path.isdir(path))
self.assertRaises(FileExistsError, call_temp_dir, path)
# Make sure temp_dir did not delete the original directory.
self.assertTrue(os.path.isdir(path))
finally:
shutil.rmtree(path)
def test_temp_dir__existing_dir__quiet_true(self):
"""Test passing a directory that already exists with quiet=True."""
path = tempfile.mkdtemp()
path = os.path.realpath(path)
try:
with warnings_helper.check_warnings() as recorder:
with os_helper.temp_dir(path, quiet=True) as temp_path:
self.assertEqual(path, temp_path)
warnings = [str(w.message) for w in recorder.warnings]
# Make sure temp_dir did not delete the original directory.
self.assertTrue(os.path.isdir(path))
finally:
shutil.rmtree(path)
self.assertEqual(len(warnings), 1, warnings)
warn = warnings[0]
self.assertTrue(warn.startswith(f'tests may fail, unable to create '
f'temporary directory {path!r}: '),
warn)
@unittest.skipUnless(hasattr(os, "fork"), "test requires os.fork")
def test_temp_dir__forked_child(self):
"""Test that a forked child process does not remove the directory."""
# See bpo-30028 for details.
# Run the test as an external script, because it uses fork.
script_helper.assert_python_ok("-c", textwrap.dedent("""
import os
from test import support
from test.support import os_helper
with os_helper.temp_cwd() as temp_path:
pid = os.fork()
if pid != 0:
# parent process
# wait for the child to terminate
support.wait_process(pid, exitcode=0)
# Make sure that temp_path is still present. When the child
# process leaves the 'temp_cwd'-context, the __exit__()-
# method of the context must not remove the temporary
# directory.
if not os.path.isdir(temp_path):
raise AssertionError("Child removed temp_path.")
"""))
# Tests for change_cwd()
def test_change_cwd(self):
original_cwd = os.getcwd()
with os_helper.temp_dir() as temp_path:
with os_helper.change_cwd(temp_path) as new_cwd:
self.assertEqual(new_cwd, temp_path)
self.assertEqual(os.getcwd(), new_cwd)
self.assertEqual(os.getcwd(), original_cwd)
def test_change_cwd__non_existent_dir(self):
"""Test passing a non-existent directory."""
original_cwd = os.getcwd()
def call_change_cwd(path):
with os_helper.change_cwd(path) as new_cwd:
raise Exception("should not get here")
with os_helper.temp_dir() as parent_dir:
non_existent_dir = os.path.join(parent_dir, 'does_not_exist')
self.assertRaises(FileNotFoundError, call_change_cwd,
non_existent_dir)
self.assertEqual(os.getcwd(), original_cwd)
def test_change_cwd__non_existent_dir__quiet_true(self):
"""Test passing a non-existent directory with quiet=True."""
original_cwd = os.getcwd()
with os_helper.temp_dir() as parent_dir:
bad_dir = os.path.join(parent_dir, 'does_not_exist')
with warnings_helper.check_warnings() as recorder:
with os_helper.change_cwd(bad_dir, quiet=True) as new_cwd:
self.assertEqual(new_cwd, original_cwd)
self.assertEqual(os.getcwd(), new_cwd)
warnings = [str(w.message) for w in recorder.warnings]
self.assertEqual(len(warnings), 1, warnings)
warn = warnings[0]
self.assertTrue(warn.startswith(f'tests may fail, unable to change '
f'the current working directory '
f'to {bad_dir!r}: '),
warn)
# Tests for change_cwd()
def test_change_cwd__chdir_warning(self):
"""Check the warning message when os.chdir() fails."""
path = TESTFN + '_does_not_exist'
with warnings_helper.check_warnings() as recorder:
with os_helper.change_cwd(path=path, quiet=True):
pass
messages = [str(w.message) for w in recorder.warnings]
self.assertEqual(len(messages), 1, messages)
msg = messages[0]
self.assertTrue(msg.startswith(f'tests may fail, unable to change '
f'the current working directory '
f'to {path!r}: '),
msg)
# Tests for temp_cwd()
def test_temp_cwd(self):
here = os.getcwd()
with os_helper.temp_cwd(name=TESTFN):
self.assertEqual(os.path.basename(os.getcwd()), TESTFN)
self.assertFalse(os.path.exists(TESTFN))
self.assertEqual(os.getcwd(), here)
def test_temp_cwd__name_none(self):
"""Test passing None to temp_cwd()."""
original_cwd = os.getcwd()
with os_helper.temp_cwd(name=None) as new_cwd:
self.assertNotEqual(new_cwd, original_cwd)
self.assertTrue(os.path.isdir(new_cwd))
self.assertEqual(os.getcwd(), new_cwd)
self.assertEqual(os.getcwd(), original_cwd)
def test_sortdict(self):
self.assertEqual(support.sortdict({3:3, 2:2, 1:1}), "{1: 1, 2: 2, 3: 3}")
def test_make_bad_fd(self):
fd = os_helper.make_bad_fd()
with self.assertRaises(OSError) as cm:
os.write(fd, b"foo")
self.assertEqual(cm.exception.errno, errno.EBADF)
def test_check_syntax_error(self):
support.check_syntax_error(self, "def class", lineno=1, offset=5)
with self.assertRaises(AssertionError):
support.check_syntax_error(self, "x=1")
def test_CleanImport(self):
import importlib
with import_helper.CleanImport("pprint"):
importlib.import_module("pprint")
def test_DirsOnSysPath(self):
with import_helper.DirsOnSysPath('foo', 'bar'):
self.assertIn("foo", sys.path)
self.assertIn("bar", sys.path)
self.assertNotIn("foo", sys.path)
self.assertNotIn("bar", sys.path)
def test_captured_stdout(self):
with support.captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
def test_captured_stderr(self):
with support.captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
def test_captured_stdin(self):
with support.captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
def test_gc_collect(self):
support.gc_collect()
def test_python_is_optimized(self):
self.assertIsInstance(support.python_is_optimized(), bool)
def test_swap_attr(self):
class Obj:
pass
obj = Obj()
obj.x = 1
with support.swap_attr(obj, "x", 5) as x:
self.assertEqual(obj.x, 5)
self.assertEqual(x, 1)
self.assertEqual(obj.x, 1)
with support.swap_attr(obj, "y", 5) as y:
self.assertEqual(obj.y, 5)
self.assertIsNone(y)
self.assertFalse(hasattr(obj, 'y'))
with support.swap_attr(obj, "y", 5):
del obj.y
self.assertFalse(hasattr(obj, 'y'))
def test_swap_item(self):
D = {"x":1}
with support.swap_item(D, "x", 5) as x:
self.assertEqual(D["x"], 5)
self.assertEqual(x, 1)
self.assertEqual(D["x"], 1)
with support.swap_item(D, "y", 5) as y:
self.assertEqual(D["y"], 5)
self.assertIsNone(y)
self.assertNotIn("y", D)
with support.swap_item(D, "y", 5):
del D["y"]
self.assertNotIn("y", D)
class RefClass:
attribute1 = None
attribute2 = None
_hidden_attribute1 = None
__magic_1__ = None
class OtherClass:
attribute2 = None
attribute3 = None
__magic_1__ = None
__magic_2__ = None
def test_detect_api_mismatch(self):
missing_items = support.detect_api_mismatch(self.RefClass,
self.OtherClass)
self.assertEqual({'attribute1'}, missing_items)
missing_items = support.detect_api_mismatch(self.OtherClass,
self.RefClass)
self.assertEqual({'attribute3', '__magic_2__'}, missing_items)
def test_detect_api_mismatch__ignore(self):
ignore = ['attribute1', 'attribute3', '__magic_2__', 'not_in_either']
missing_items = support.detect_api_mismatch(
self.RefClass, self.OtherClass, ignore=ignore)
self.assertEqual(set(), missing_items)
missing_items = support.detect_api_mismatch(
self.OtherClass, self.RefClass, ignore=ignore)
self.assertEqual(set(), missing_items)
def test_check__all__(self):
extra = {'tempdir'}
not_exported = {'template'}
support.check__all__(self,
tempfile,
extra=extra,
not_exported=not_exported)
extra = {'TextTestResult', 'installHandler'}
not_exported = {'load_tests', "TestProgram", "BaseTestSuite"}
support.check__all__(self,
unittest,
("unittest.result", "unittest.case",
"unittest.suite", "unittest.loader",
"unittest.main", "unittest.runner",
"unittest.signals", "unittest.async_case"),
extra=extra,
not_exported=not_exported)
self.assertRaises(AssertionError, support.check__all__, self, unittest)
@unittest.skipUnless(hasattr(os, 'waitpid') and hasattr(os, 'WNOHANG'),
'need os.waitpid() and os.WNOHANG')
def test_reap_children(self):
# Make sure that there is no other pending child process
support.reap_children()
# Create a child process
pid = os.fork()
if pid == 0:
# child process: do nothing, just exit
os._exit(0)
t0 = time.monotonic()
deadline = time.monotonic() + support.SHORT_TIMEOUT
was_altered = support.environment_altered
try:
support.environment_altered = False
stderr = io.StringIO()
while True:
if time.monotonic() > deadline:
self.fail("timeout")
old_stderr = sys.__stderr__
try:
sys.__stderr__ = stderr
support.reap_children()
finally:
sys.__stderr__ = old_stderr
# Use environment_altered to check if reap_children() found
# the child process
if support.environment_altered:
break
# loop until the child process completed
time.sleep(0.100)
msg = "Warning -- reap_children() reaped child process %s" % pid
self.assertIn(msg, stderr.getvalue())
self.assertTrue(support.environment_altered)
finally:
support.environment_altered = was_altered
# Just in case, check again that there is no other
# pending child process
support.reap_children()
def check_options(self, args, func, expected=None):
code = f'from test.support import {func}; print(repr({func}()))'
cmd = [sys.executable, *args, '-c', code]
env = {key: value for key, value in os.environ.items()
if not key.startswith('PYTHON')}
proc = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True,
env=env)
if expected is None:
expected = args
self.assertEqual(proc.stdout.rstrip(), repr(expected))
self.assertEqual(proc.returncode, 0)
def test_args_from_interpreter_flags(self):
# Test test.support.args_from_interpreter_flags()
for opts in (
# no option
[],
# single option
['-B'],
['-s'],
['-S'],
['-E'],
['-v'],
['-b'],
['-q'],
['-I'],
# same option multiple times
['-bb'],
['-vvv'],
# -W options
['-Wignore'],
# -X options
['-X', 'dev'],
['-Wignore', '-X', 'dev'],
['-X', 'faulthandler'],
['-X', 'importtime'],
['-X', 'showrefcount'],
['-X', 'tracemalloc'],
['-X', 'tracemalloc=3'],
):
with self.subTest(opts=opts):
self.check_options(opts, 'args_from_interpreter_flags')
self.check_options(['-I', '-E', '-s'], 'args_from_interpreter_flags',
['-I'])
def test_optim_args_from_interpreter_flags(self):
# Test test.support.optim_args_from_interpreter_flags()
for opts in (
# no option
[],
['-O'],
['-OO'],
['-OOOO'],
):
with self.subTest(opts=opts):
self.check_options(opts, 'optim_args_from_interpreter_flags')
def test_match_test(self):
class Test:
def __init__(self, test_id):
self.test_id = test_id
def id(self):
return self.test_id
test_access = Test('test.test_os.FileTests.test_access')
test_chdir = Test('test.test_os.Win32ErrorTests.test_chdir')
# Test acceptance
with support.swap_attr(support, '_match_test_func', None):
# match all
support.set_match_tests([])
self.assertTrue(support.match_test(test_access))
self.assertTrue(support.match_test(test_chdir))
# match all using None
support.set_match_tests(None, None)
self.assertTrue(support.match_test(test_access))
self.assertTrue(support.match_test(test_chdir))
# match the full test identifier
support.set_match_tests([test_access.id()], None)
self.assertTrue(support.match_test(test_access))
self.assertFalse(support.match_test(test_chdir))
# match the module name
support.set_match_tests(['test_os'], None)
self.assertTrue(support.match_test(test_access))
self.assertTrue(support.match_test(test_chdir))
# Test '*' pattern
support.set_match_tests(['test_*'], None)
self.assertTrue(support.match_test(test_access))
self.assertTrue(support.match_test(test_chdir))
# Test case sensitivity
support.set_match_tests(['filetests'], None)
self.assertFalse(support.match_test(test_access))
support.set_match_tests(['FileTests'], None)
self.assertTrue(support.match_test(test_access))
# Test pattern containing '.' and a '*' metacharacter
support.set_match_tests(['*test_os.*.test_*'], None)
self.assertTrue(support.match_test(test_access))
self.assertTrue(support.match_test(test_chdir))
# Multiple patterns
support.set_match_tests([test_access.id(), test_chdir.id()], None)
self.assertTrue(support.match_test(test_access))
self.assertTrue(support.match_test(test_chdir))
support.set_match_tests(['test_access', 'DONTMATCH'], None)
self.assertTrue(support.match_test(test_access))
self.assertFalse(support.match_test(test_chdir))
# Test rejection
with support.swap_attr(support, '_match_test_func', None):
# match all
support.set_match_tests(ignore_patterns=[])
self.assertTrue(support.match_test(test_access))
self.assertTrue(support.match_test(test_chdir))
# match all using None
support.set_match_tests(None, None)
self.assertTrue(support.match_test(test_access))
self.assertTrue(support.match_test(test_chdir))
# match the full test identifier
support.set_match_tests(None, [test_access.id()])
self.assertFalse(support.match_test(test_access))
self.assertTrue(support.match_test(test_chdir))
# match the module name
support.set_match_tests(None, ['test_os'])
self.assertFalse(support.match_test(test_access))
self.assertFalse(support.match_test(test_chdir))
# Test '*' pattern
support.set_match_tests(None, ['test_*'])
self.assertFalse(support.match_test(test_access))
self.assertFalse(support.match_test(test_chdir))
# Test case sensitivity
support.set_match_tests(None, ['filetests'])
self.assertTrue(support.match_test(test_access))
support.set_match_tests(None, ['FileTests'])
self.assertFalse(support.match_test(test_access))
# Test pattern containing '.' and a '*' metacharacter
support.set_match_tests(None, ['*test_os.*.test_*'])
self.assertFalse(support.match_test(test_access))
self.assertFalse(support.match_test(test_chdir))
# Multiple patterns
support.set_match_tests(None, [test_access.id(), test_chdir.id()])
self.assertFalse(support.match_test(test_access))
self.assertFalse(support.match_test(test_chdir))
support.set_match_tests(None, ['test_access', 'DONTMATCH'])
self.assertFalse(support.match_test(test_access))
self.assertTrue(support.match_test(test_chdir))
def test_fd_count(self):
# We cannot test the absolute value of fd_count(): on old Linux
# kernel or glibc versions, os.urandom() keeps a FD open on
# /dev/urandom device and Python has 4 FD opens instead of 3.
start = os_helper.fd_count()
fd = os.open(__file__, os.O_RDONLY)
try:
more = os_helper.fd_count()
finally:
os.close(fd)
self.assertEqual(more - start, 1)
def check_print_warning(self, msg, expected):
stderr = io.StringIO()
old_stderr = sys.__stderr__
try:
sys.__stderr__ = stderr
support.print_warning(msg)
finally:
sys.__stderr__ = old_stderr
self.assertEqual(stderr.getvalue(), expected)
def test_print_warning(self):
self.check_print_warning("msg",
"Warning -- msg\n")
self.check_print_warning("a\nb",
'Warning -- a\nWarning -- b\n')
# XXX -follows a list of untested API
# make_legacy_pyc
# is_resource_enabled
# requires
# fcmp
# umaks
# findfile
# check_warnings
# EnvironmentVarGuard
# transient_internet
# run_with_locale
# set_memlimit
# bigmemtest
# precisionbigmemtest
# bigaddrspacetest
# requires_resource
# run_doctest
# threading_cleanup
# reap_threads
# can_symlink
# skip_unless_symlink
# SuppressCrashReport
if __name__ == '__main__':
unittest.main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
lib/ReadMapping/ReadMappingImpl.py | # -*- coding: utf-8 -*-
#BEGIN_HEADER
import logging
import os
from installed_clients.KBaseReportClient import KBaseReport
#END_HEADER
class ReadMapping:
'''
Module Name:
ReadMapping
Module Description:
A KBase module: ReadMapping
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "0.0.1"
GIT_URL = ""
GIT_COMMIT_HASH = ""
#BEGIN_CLASS_HEADER
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.shared_folder = config['scratch']
logging.basicConfig(format='%(created)s %(levelname)s: %(message)s',
level=logging.INFO)
#END_CONSTRUCTOR
pass
def hisat2(self, ctx, params):
"""
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN hisat2
#END hisat2
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method hisat2 return value ' +
'output is not type dict as required.')
# return the results
return [output]
def bwa(self, ctx, params):
"""
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN bwa
#END bwa
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method bwa return value ' +
'output is not type dict as required.')
# return the results
return [output]
def minimap2(self, ctx, params):
"""
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN minimap2
#END minimap2
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method minimap2 return value ' +
'output is not type dict as required.')
# return the results
return [output]
def bowtie2(self, ctx, params):
"""
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN bowtie2
#END bowtie2
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method bowtie2 return value ' +
'output is not type dict as required.')
# return the results
return [output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK",
'message': "",
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
| []
| []
| [
"SDK_CALLBACK_URL"
]
| [] | ["SDK_CALLBACK_URL"] | python | 1 | 0 | |
etc/Implemented_Papers/MixMatch/main.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import argparse
import numpy as np
import shutil
import random
import time
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, models, transforms
import tensorflow as tf
import torch.nn.functional as F
from ImageDataLoader import SimpleImageLoader
from models import Res18, Res50, Dense121, Res18_basic
#
# from pytorch_metric_learning import miners
# from pytorch_metric_learning import losses as lossfunc
import glob
import nsml
from nsml import DATASET_PATH, IS_ON_NSML
NUM_CLASSES = 265
if not IS_ON_NSML:
DATASET_PATH = 'fashion_demo'
def top_n_accuracy_score(y_true, y_prob, n=5, normalize=True):
num_obs, num_labels = y_prob.shape
idx = num_labels - n - 1
counter = 0
argsorted = np.argsort(y_prob, axis=1)
for i in range(num_obs):
if y_true[i] in argsorted[i, idx+1:]:
counter += 1
if normalize:
return counter * 1.0 / num_obs
else:
return counter
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(opts, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = opts.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def linear_rampup(current, rampup_length):
if rampup_length == 0:
return 1.0
else:
current = np.clip(current / rampup_length, 0.0, 1.0)
return float(current)
class SemiLoss(object):
def __call__(self, outputs_x, targets_x, outputs_u, targets_u, epoch, final_epoch):
probs_u = torch.softmax(outputs_u, dim=1)
Lx = -torch.mean(torch.sum(F.log_softmax(outputs_x, dim=1) * targets_x, dim=1))
Lu = torch.mean((probs_u - targets_u)**2)
return Lx, Lu, opts.lambda_u * linear_rampup(epoch, final_epoch)
def interleave_offsets(batch, nu):
groups = [batch // (nu + 1)] * (nu + 1)
for x in range(batch - sum(groups)):
groups[-x - 1] += 1
offsets = [0]
for g in groups:
offsets.append(offsets[-1] + g)
assert offsets[-1] == batch
return offsets
def interleave(xy, batch):
nu = len(xy) - 1
offsets = interleave_offsets(batch, nu)
xy = [[v[offsets[p]:offsets[p + 1]] for p in range(nu + 1)] for v in xy]
for i in range(1, nu + 1):
xy[0][i], xy[i][i] = xy[i][i], xy[0][i]
return [torch.cat(v, dim=0) for v in xy]
def split_ids(path, ratio):
with open(path) as f:
ids_l = []
ids_u = []
for i, line in enumerate(f.readlines()):
if i == 0 or line == '' or line == '\n':
continue
line = line.replace('\n', '').split('\t')
if int(line[1]) >= 0:
ids_l.append(int(line[0]))
else:
ids_u.append(int(line[0]))
ids_l = np.array(ids_l)
ids_u = np.array(ids_u)
perm = np.random.permutation(np.arange(len(ids_l)))
cut = int(ratio*len(ids_l))
train_ids = ids_l[perm][cut:]
val_ids = ids_l[perm][:cut]
return train_ids, val_ids, ids_u
### NSML functions
def _infer(model, root_path, test_loader=None):
if test_loader is None:
test_loader = torch.utils.data.DataLoader(
SimpleImageLoader(root_path, 'test',
transform=transforms.Compose([
transforms.Resize(opts.imResize),
transforms.CenterCrop(opts.imsize),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])), batch_size=opts.batchsize, shuffle=False, num_workers=4, pin_memory=True)
print('loaded {} test images'.format(len(test_loader.dataset)))
outputs = []
s_t = time.time()
for idx, image in enumerate(test_loader):
if torch.cuda.is_available():
image = image.cuda()
_, probs = model(image)
output = torch.argmax(probs, dim=1)
output = output.detach().cpu().numpy()
outputs.append(output)
outputs = np.concatenate(outputs)
return outputs
def bind_nsml(model):
def save(dir_name, *args, **kwargs):
os.makedirs(dir_name, exist_ok=True)
state = model.state_dict()
torch.save(state, os.path.join(dir_name, 'model.pt'))
print('saved')
def load(dir_name, *args, **kwargs):
state = torch.load(os.path.join(dir_name, 'model.pt'))
model.load_state_dict(state)
print('loaded')
def infer(root_path):
return _infer(model, root_path)
nsml.bind(save=save, load=load, infer=infer)
######################################################################
# Options
######################################################################
parser = argparse.ArgumentParser(description='Sample Product200K Training')
parser.add_argument('--start_epoch', type=int, default=250, metavar='N', help='number of start epoch (default: 1)')
parser.add_argument('--epochs', type=int, default=300, metavar='N', help='number of epochs to train (default: 200)')
# basic settings
parser.add_argument('--name',default='Res18baseMM', type=str, help='output model name')
parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')
parser.add_argument('--batchsize', default=30, type=int, help='batchsize_labeled')
parser.add_argument('--batchsize2', default=75, type=int, help='batchsize_unlabeled')
parser.add_argument('--seed', type=int, default=123, help='random seed')
# basic hyper-parameters
parser.add_argument('--momentum', type=float, default=0.9, metavar='LR', help=' ')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR', help='learning rate (default: 5e-5)')
parser.add_argument('--imResize', default=256, type=int, help='')
parser.add_argument('--imsize', default=224, type=int, help='')
parser.add_argument('--lossXent', type=float, default=1, help='lossWeight for Xent')
# arguments for logging and backup
parser.add_argument('--log_interval', type=int, default=10, metavar='N', help='logging training status')
parser.add_argument('--save_epoch', type=int, default=50, help='saving epoch interval')
# hyper-parameters for mix-match
parser.add_argument('--alpha', default=0.75, type=float)
parser.add_argument('--lambda-u', default=150, type=float)
parser.add_argument('--T', default=0.5, type=float)
### DO NOT MODIFY THIS BLOCK ###
# arguments for nsml
parser.add_argument('--pause', type=int, default=0)
parser.add_argument('--mode', type=str, default='train')
################################
def main():
global opts
opts = parser.parse_args()
opts.cuda = 0
# Set GPU
seed = opts.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
print(torch.cuda.device_count())
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_ids
use_gpu = torch.cuda.is_available()
if use_gpu:
opts.cuda = 1
print("Currently using GPU {}".format(opts.gpu_ids))
cudnn.benchmark = True
torch.cuda.manual_seed_all(seed)
else:
print("Currently using CPU (GPU is highly recommended)")
# Set model
model = Res50(NUM_CLASSES)
model.eval()
parameters = filter(lambda p: p.requires_grad, model.parameters())
n_parameters = sum([p.data.nelement() for p in model.parameters()])
print(' + Number of params: {}'.format(n_parameters))
if use_gpu:
model.cuda()
### DO NOT MODIFY THIS BLOCK ###
if IS_ON_NSML:
bind_nsml(model)
if opts.pause:
nsml.paused(scope=locals())
################################
#nsml.load(checkpoint = 'Res18baseMM_best', session = 'kaist_15/fashion_eval/4')
if opts.mode == 'train':
model.train()
# Set dataloader
train_ids, val_ids, unl_ids = split_ids(os.path.join(DATASET_PATH, 'train/train_label'), 0.2)
print('found {} train, {} validation and {} unlabeled images'.format(len(train_ids), len(val_ids), len(unl_ids)))
train_loader = torch.utils.data.DataLoader(
SimpleImageLoader(DATASET_PATH, 'train', train_ids,
transform=transforms.Compose([
transforms.Resize(opts.imResize),
transforms.RandomResizedCrop(opts.imsize),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])),
batch_size=opts.batchsize, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
print('train_loader done')
unlabel_loader = torch.utils.data.DataLoader(
SimpleImageLoader(DATASET_PATH, 'unlabel', unl_ids,
transform=transforms.Compose([
transforms.Resize(opts.imResize),
transforms.RandomResizedCrop(opts.imsize),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])),
batch_size=opts.batchsize2, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
print('unlabel_loader done')
validation_loader = torch.utils.data.DataLoader(
SimpleImageLoader(DATASET_PATH, 'val', val_ids,
transform=transforms.Compose([
transforms.Resize(opts.imResize),
transforms.CenterCrop(opts.imsize),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])),
batch_size=opts.batchsize2, shuffle=False, num_workers=4, pin_memory=True, drop_last=False)
print('validation_loader done')
# Set optimizer
#optimizer = optim.Adam(model.parameters(), lr=opts.lr)
optimizer = optim.SGD(model.parameters(), lr=opts.lr, momentum = opts.momentum, weight_decay = 0.0004)
# INSTANTIATE LOSS CLASS
train_criterion = SemiLoss()
# INSTANTIATE STEP LEARNING SCHEDULER CLASS
#scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50, 150], gamma=0.1)
'''
!!!!!!!!!!!!!
실험에 대한 정보 최대한 자세히 적기!!!
코드 다 저장해놓을순 없으니까 나중에 NSML 터미널만 보고도 무슨 실험인지 알 수 있게!
귀찮더라도..
!!!!!!!!!!!
'''
print("Title: {}".format("MixMatch"))
print("Purpose: {}".format("Mixmatch baseline testing(Chocolatefudge) // Check transfer learning 250 -> 300 epoch"))
print("Environments")
print("Model: {}".format("Resnet 50"))
print("Hyperparameters: batchsize {}, lr {}, epoch {}, lambdau {}".format(opts.batchsize, opts.lr, opts.epochs, opts.lambda_u))
print("Optimizer: {}, Scheduler: {}".format("SGD with momentum 0.9, wd 0.0004", "MultiStepLR with 50,150 schedule"))
print("Other necessary Hyperparameters: {}".format("Batchsize for unlabeled is 75."))
print("Details: {}".format("Experiment for constructing necessary baseline for CV project."))
print("Etc: {}".format("Changes from original code: Res18_basic -> Res50, batchsize smaller, Different batchsize btw labeled and unlabeled (30, 75)."))
# Train and Validation
best_acc = -1
for epoch in range(opts.start_epoch, opts.epochs + 1):
print('start training')
loss, _, _ = train(opts, train_loader, unlabel_loader, model, train_criterion, optimizer, epoch, use_gpu)
#scheduler.step()
print('start validation')
acc_top1, acc_top5 = validation(opts, validation_loader, model, epoch, use_gpu)
is_best = acc_top1 > best_acc
best_acc = max(acc_top1, best_acc)
nsml.report(summary=True, train_loss= loss, val_acc_top1= acc_top1, val_acc_top5=acc_top5, step=epoch)
if is_best:
print('saving best checkpoint...')
if IS_ON_NSML:
nsml.save(opts.name + '_best')
else:
torch.save(model.state_dict(), os.path.join('runs', opts.name + '_best'))
if (epoch + 1) % opts.save_epoch == 0:
if IS_ON_NSML:
nsml.save(opts.name + '_e{}'.format(epoch))
else:
torch.save(model.state_dict(), os.path.join('runs', opts.name + '_e{}'.format(epoch)))
def train(opts, train_loader, unlabel_loader, model, criterion, optimizer, epoch, use_gpu):
losses = AverageMeter()
losses_x = AverageMeter()
losses_un = AverageMeter()
weight_scale = AverageMeter()
acc_top1 = AverageMeter()
acc_top5 = AverageMeter()
avg_loss = 0.0
avg_top1 = 0.0
avg_top5 = 0.0
model.train()
nCnt =0
labeled_train_iter = iter(train_loader)
unlabeled_train_iter = iter(unlabel_loader)
for batch_idx in range(len(train_loader)):
try:
data = labeled_train_iter.next()
inputs_x, targets_x = data
except:
labeled_train_iter = iter(train_loader)
data = labeled_train_iter.next()
inputs_x, targets_x = data
try:
data = unlabeled_train_iter.next()
inputs_u1, inputs_u2 = data
except:
unlabeled_train_iter = iter(unlabel_loader)
data = unlabeled_train_iter.next()
inputs_u1, inputs_u2 = data
batch_size = inputs_x.size(0)
batch_size_u = inputs_u1.size(0)
# Transform label to one-hot
classno = NUM_CLASSES
targets_org = targets_x
targets_x = torch.zeros(batch_size, classno).scatter_(1, targets_x.view(-1,1), 1)
if use_gpu :
inputs_x, targets_x = inputs_x.cuda(), targets_x.cuda()
inputs_u1, inputs_u2 = inputs_u1.cuda(), inputs_u2.cuda()
inputs_x, targets_x = Variable(inputs_x), Variable(targets_x)
inputs_u1, inputs_u2 = Variable(inputs_u1), Variable(inputs_u2)
with torch.no_grad():
# compute guessed labels of unlabel samples
embed_u1, pred_u1 = model(inputs_u1)
embed_u2, pred_u2 = model(inputs_u2)
pred_u_all = (torch.softmax(pred_u1, dim=1) + torch.softmax(pred_u2, dim=1)) / 2
pt = pred_u_all**(1/opts.T)
targets_u = pt / pt.sum(dim=1, keepdim=True)
targets_u = targets_u.detach()
# mixup
all_inputs = torch.cat([inputs_x, inputs_u1, inputs_u2], dim=0)
all_targets = torch.cat([targets_x, targets_u, targets_u], dim=0)
lamda = np.random.beta(opts.alpha, opts.alpha)
lamda= max(lamda, 1-lamda)
newidx = torch.randperm(all_inputs.size(0))
input_a, input_b = all_inputs, all_inputs[newidx]
target_a, target_b = all_targets, all_targets[newidx]
mixed_input = lamda * input_a + (1 - lamda) * input_b
mixed_target = lamda * target_a + (1 - lamda) * target_b
# interleave labeled and unlabed samples between batches to get correct batchnorm calculation
mixed_input = list(torch.split(mixed_input, batch_size))
mixed_input = interleave(mixed_input, batch_size)
optimizer.zero_grad()
fea, logits_temp = model(mixed_input[0])
logits = [logits_temp]
for newinput in mixed_input[1:]:
fea, logits_temp = model(newinput)
logits.append(logits_temp)
# put interleaved samples back
logits = interleave(logits, batch_size)
logits_x = logits[0]
logits_u = torch.cat(logits[1:], dim=0)
loss_x, loss_un, weigts_mixing = criterion(logits_x, mixed_target[:batch_size], logits_u, mixed_target[batch_size:], epoch+batch_idx/len(train_loader), opts.epochs)
loss = loss_x + weigts_mixing * loss_un
losses.update(loss.item(), inputs_x.size(0))
losses_x.update(loss_x.item(), inputs_x.size(0))
losses_un.update(loss_un.item(), inputs_x.size(0))
weight_scale.update(weigts_mixing, inputs_x.size(0))
# compute gradient and do SGD step
loss.backward()
optimizer.step()
with torch.no_grad():
# compute guessed labels of unlabel samples
embed_x, pred_x1 = model(inputs_x)
acc_top1b = top_n_accuracy_score(targets_org.data.cpu().numpy(), pred_x1.data.cpu().numpy(), n=1)*100
acc_top5b = top_n_accuracy_score(targets_org.data.cpu().numpy(), pred_x1.data.cpu().numpy(), n=5)*100
acc_top1.update(torch.as_tensor(acc_top1b), inputs_x.size(0))
acc_top5.update(torch.as_tensor(acc_top5b), inputs_x.size(0))
avg_loss += loss.item()
avg_top1 += acc_top1b
avg_top5 += acc_top5b
if batch_idx % opts.log_interval == 0:
print('Train Epoch:{} [{}/{}] Loss:{:.4f}({:.4f}) Top-1:{:.2f}%({:.2f}%) Top-5:{:.2f}%({:.2f}%) '.format(
epoch, batch_idx *inputs_x.size(0), len(train_loader.dataset), losses.val, losses.avg, acc_top1.val, acc_top1.avg, acc_top5.val, acc_top5.avg))
nCnt += 1
avg_loss = float(avg_loss/nCnt)
avg_top1 = float(avg_top1/nCnt)
avg_top5 = float(avg_top5/nCnt)
nsml.report(summary=True, train_acc_top1= avg_top1, train_acc_top5=avg_top5, step=epoch)
return avg_loss, avg_top1, avg_top5
def validation(opts, validation_loader, model, epoch, use_gpu):
model.eval()
avg_top1= 0.0
avg_top5 = 0.0
nCnt =0
with torch.no_grad():
for batch_idx, data in enumerate(validation_loader):
inputs, labels = data
if use_gpu :
inputs = inputs.cuda()
inputs = Variable(inputs)
nCnt +=1
embed_fea, preds = model(inputs)
acc_top1 = top_n_accuracy_score(labels.numpy(), preds.data.cpu().numpy(), n=1)*100
acc_top5 = top_n_accuracy_score(labels.numpy(), preds.data.cpu().numpy(), n=5)*100
avg_top1 += acc_top1
avg_top5 += acc_top5
avg_top1 = float(avg_top1/nCnt)
avg_top5= float(avg_top5/nCnt)
print('Test Epoch:{} Top1_acc_val:{:.2f}% Top5_acc_val:{:.2f}% '.format(epoch, avg_top1, avg_top5))
return avg_top1, avg_top5
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
providers/ibm/cos.go | // Copyright 2019 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ibm
import (
"fmt"
"os"
"regexp"
"strings"
"github.com/GoogleCloudPlatform/terraformer/terraformutils"
bluemix "github.com/IBM-Cloud/bluemix-go"
"github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog"
"github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2"
"github.com/IBM-Cloud/bluemix-go/session"
"github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam"
ibmaws "github.com/IBM/ibm-cos-sdk-go/aws"
cossession "github.com/IBM/ibm-cos-sdk-go/aws/session"
coss3 "github.com/IBM/ibm-cos-sdk-go/service/s3"
)
type COSGenerator struct {
IBMService
}
func (g COSGenerator) loadCOS(cosID string, cosName string) terraformutils.Resource {
resources := terraformutils.NewSimpleResource(
cosID,
normalizeResourceName(cosName, false),
"ibm_resource_instance",
"ibm",
[]string{})
return resources
}
func (g COSGenerator) loadCOSBuckets(bucketID, bucketName string, dependsOn []string) terraformutils.Resource {
resources := terraformutils.NewResource(
bucketID,
normalizeResourceName(bucketName, false),
"ibm_cos_bucket",
"ibm",
map[string]string{},
[]string{},
map[string]interface{}{
"depends_on": dependsOn,
})
return resources
}
func (g *COSGenerator) InitResources() error {
bmxConfig := &bluemix.Config{
BluemixAPIKey: os.Getenv("IC_API_KEY"),
}
sess, err := session.New(bmxConfig)
if err != nil {
return err
}
catalogClient, err := catalog.New(sess)
if err != nil {
return err
}
controllerClient, err := controllerv2.New(sess)
if err != nil {
return err
}
serviceID, err := catalogClient.ResourceCatalog().FindByName("cloud-object-storage", true)
if err != nil {
return err
}
query := controllerv2.ServiceInstanceQuery{
ServiceID: serviceID[0].ID,
}
cosInstances, err := controllerClient.ResourceServiceInstanceV2().ListInstances(query)
if err != nil {
return err
}
authEndpoint := "https://iam.cloud.ibm.com/identity/token"
for _, cs := range cosInstances {
g.Resources = append(g.Resources, g.loadCOS(cs.ID, cs.Name))
csResourceName := g.Resources[len(g.Resources)-1:][0].ResourceName
s3Conf := ibmaws.NewConfig().WithCredentials(ibmiam.NewStaticCredentials(ibmaws.NewConfig(), authEndpoint, os.Getenv("IC_API_KEY"), cs.ID)).WithS3ForcePathStyle(true).WithEndpoint("s3.us-south.cloud-object-storage.appdomain.cloud")
s3Sess := cossession.Must(cossession.NewSession())
s3Client := coss3.New(s3Sess, s3Conf)
singleSiteLocationRegex := regexp.MustCompile("^[a-z]{3}[0-9][0-9]-[a-z]{4,8}$")
regionLocationRegex := regexp.MustCompile("^[a-z]{2}-[a-z]{2,5}-[a-z]{4,8}$")
crossRegionLocationRegex := regexp.MustCompile("^[a-z]{2}-[a-z]{4,8}$")
d, _ := s3Client.ListBucketsExtended(&coss3.ListBucketsExtendedInput{})
for _, b := range d.Buckets {
var dependsOn []string
dependsOn = append(dependsOn,
"ibm_resource_instance."+csResourceName)
var apiType, location string
bLocationConstraint := *b.LocationConstraint
if singleSiteLocationRegex.MatchString(bLocationConstraint) {
apiType = "ss1"
location = strings.Split(bLocationConstraint, "-")[0]
}
if regionLocationRegex.MatchString(bLocationConstraint) {
apiType = "rl"
location = fmt.Sprintf("%s-%s", strings.Split(bLocationConstraint, "-")[0], strings.Split(bLocationConstraint, "-")[1])
}
if crossRegionLocationRegex.MatchString(bLocationConstraint) {
apiType = "crl"
location = strings.Split(bLocationConstraint, "-")[0]
}
bucketID := fmt.Sprintf("%s:%s:%s:meta:%s:%s", strings.ReplaceAll(cs.ID, "::", ""), "bucket", *b.Name, apiType, location)
g.Resources = append(g.Resources, g.loadCOSBuckets(bucketID, *b.Name, dependsOn))
}
}
return nil
}
| [
"\"IC_API_KEY\"",
"\"IC_API_KEY\""
]
| []
| [
"IC_API_KEY"
]
| [] | ["IC_API_KEY"] | go | 1 | 0 | |
factsumm/factsumm.py | import logging
import os
from itertools import permutations
from typing import Dict, List, Set, Tuple, Union
import pysbd
from rich import print
from sumeval.metrics.rouge import RougeCalculator
from factsumm.utils.module_entity import load_ie, load_ner, load_rel
from factsumm.utils.module_question import load_qa, load_qg
from factsumm.utils.module_sentence import load_bert_score
from factsumm.utils.utils import Config, qags_score
os.environ["TOKENIZERS_PARALLELISM"] = "false"
logging.getLogger("transformers").setLevel(logging.ERROR)
logging.getLogger("flair").setLevel(logging.ERROR)
class FactSumm:
def __init__(
self,
ner_model: str = None,
rel_model: str = None,
qg_model: str = None,
qa_model: str = None,
bert_score_model: str = None,
):
"""
FactSumm object used to calculate Factual Consistency score of Abstractive Summarization model
Args:
ner_model (str, optional): NER model to be used (Flair or HuggingFace). Defaults to None.
rel_model (str, optional): RE model to be used (HuggingFace). Defaults to None.
qg_model (str, optional): QA model to be used (HuggingFace). Defaults to None.
qa_model (str, optional): QG model to be used (HuggingFace). Defaults to None.
bert_score_model (str, optional): BERTScore model to be used (HuggingFace). Defaults to None.
"""
self.config = Config()
self.segmenter = pysbd.Segmenter(language="en", clean=False)
self.rouge = RougeCalculator(stopwords=True, lang="en")
# NER, RE, QG, QA models supported by HuggingFace can be used (default can be found in `config.py`)
self.ner = ner_model if ner_model is not None else self.config.NER_MODEL
self.rel = rel_model if rel_model is not None else self.config.REL_MODEL
self.qg = qg_model if qg_model is not None else self.config.QG_MODEL
self.qa = qa_model if qa_model is not None else self.config.QA_MODEL
self.bert_score = bert_score_model if bert_score_model is not None else self.config.BERT_SCORE_MODEL
self.ie = None
def build_perm(
self,
lines: List[str],
total_entities: Union[List[Dict], List[List[Dict]]],
) -> List:
"""
Build entity permutations for Relation Extraction
Args:
lines (List[str]): segmented document lines
total_entities (Union[List[Dict], List[List[Dict]]]): list of total entities
Returns:
List: list of permutations
"""
total_perms = list()
for line, line_entities in zip(lines, total_entities):
line_perms = list(permutations(line_entities, 2))
line_perms = [{
"text":
line,
"spans": [
(comb[0]["start"], comb[0]["end"]),
(comb[-1]["start"], comb[-1]["end"]),
]
} for comb in line_perms]
total_perms.append(line_perms)
return total_perms
def get_facts(self, lines: List[str], entities: List[List[Dict]]) -> Set:
"""
Get fact triples using Relation Extraction model
Args:
lines (List[str]): segmented document lines
entities (List[List[Dict]]): list of total entities
Returns:
Set: set of relation inferenced from permutations
"""
perms = self.build_perm(lines, entities)
triples = list()
for perm in perms:
triples.extend(self.rel(perm))
return set(triples)
def _segment(self, text: str) -> List[str]:
"""
Segment input text into (possibly) multiple sentences
Args:
text (str): text to be segmented
Returns:
List[str]: list of segmented lines
"""
return [line.strip() for line in self.segmenter.segment(text)]
def _print_entities(self, mode: str, total_entities: List[List[Dict]]):
# yapf:disable
print(f"{mode.upper()} Entities")
for i, line_entities in enumerate(total_entities):
print(f'{i+1}: {[(entity["word"], entity["entity"]) for entity in line_entities]}')
print()
# yapf:enable
def calculate_rouge(
self,
source: str,
summary: str,
) -> Tuple[float, float, float]:
"""
Calculate ROUGE score
Args:
source (str): original source
summary (str): generated summary
Returns:
Tuple: (ROUGE-1, ROUGE-2, ROUGE-L) tuple
"""
source_lines = self._segment(source)
rouge_1 = self.rouge.rouge_n(summary, source_lines, 1)
rouge_2 = self.rouge.rouge_n(summary, source_lines, 2)
rouge_l = self.rouge.rouge_l(summary, source_lines)
print(
f"Avg. ROUGE-1: {rouge_1}\nAvg. ROUGE-2: {rouge_2}\nAvg. ROUGE-L: {rouge_l}"
)
return rouge_1, rouge_2, rouge_l
def _print_facts(self, mode: str, facts: Set[Tuple]):
print(f"{mode.upper()} Facts")
for fact in facts:
print(fact)
print()
def _filter_out(self, sources: Set, summaries: Set) -> Tuple[Set, Set]:
"""
Filter out triples that don't share a subject and relation for comparability
Args:
sources (Set): set of triples from source
summaries (Set): set of triples from summary
Returns:
Tuple[Set, Set]: filtered sources and summaries
"""
source_tuple = {(source[0], source[1]) for source in sources}
summary_tuple = {(summary[0], summary[1]) for summary in summaries}
sources = {
source for source in sources
if (source[0], source[1]) in summary_tuple
}
summaries = {
summary for summary in summaries
if (summary[0], summary[1]) in source_tuple
}
return sources, summaries
def extract_facts(
self,
source: str,
summary: str,
verbose: bool = False,
device: str = "cpu",
):
"""
Extract (head_entity, relation, tail_entity) relation triple using NER & RE module
See also https://arxiv.org/abs/1905.13322.pdf
Args:
source (str): original source
summary (str): generated summary
verbose (bool, optional): print verbose option. Defaults to False.
device (str): device info
"""
if isinstance(self.ner, str) and isinstance(self.rel, str):
self.ner = load_ner(self.ner, device)
self.rel = load_rel(self.rel, device)
source_lines = self._segment(source)
summary_lines = self._segment(summary)
# extract per-line entities
source_ents = self.ner(source_lines)
summary_ents = self.ner(summary_lines)
# extract entity-based triple: (head, relation, tail)
source_facts = self.get_facts(source_lines, source_ents)
summary_facts = self.get_facts(summary_lines, summary_ents)
# filter out some facts
source_facts, summary_facts = self._filter_out(
source_facts,
summary_facts,
)
common_facts = summary_facts.intersection(source_facts)
diff_facts = summary_facts.difference(source_facts)
if verbose:
self._print_entities("source", source_ents)
self._print_entities("summary", summary_ents)
self._print_facts("source", source_facts)
self._print_facts("summary", summary_facts)
self._print_facts("common", common_facts)
self._print_facts("diff", diff_facts)
if not summary_facts:
fact_score = 0.0
else:
fact_score = len(common_facts) / len(summary_facts)
print(f"Fact Score: {fact_score}")
return source_ents, summary_ents, fact_score
def _print_qas(self, mode: str, questions: List[Dict]):
# yapf:disable
print(f"Answers based on {mode.upper()} (Questions are generated from Summary)")
for question in questions:
print(f"[Q] {question['question']}\t[Pred] {question['prediction']}")
print()
# yapf:enable
def extract_qas(
self,
source: str,
summary: str,
source_ents: List = None,
summary_ents: List = None,
verbose: bool = False,
device: str = "cpu",
) -> float:
"""
Extract Question & Answering Pair generated from Question Generation module
See also https://arxiv.org/abs/2004.04228
Args:
source (str): original source
summary (str): generated summary
source_ents (List, optional): named entities extracted from source. Defaults to None.
summary_ents (List, optional): named entities extracted from source. Defaults to None.
verbose (bool, optional): print verbose option. Defaults to False.
device (str): device info
"""
if isinstance(self.qg, str) and isinstance(self.qa, str):
self.qg = load_qg(self.qg, device)
self.qa = load_qa(self.qa, device)
if isinstance(self.ner, str):
self.ner = load_ner(self.ner, device)
source_lines = self._segment(source)
summary_lines = self._segment(summary)
if source_ents is None:
source_ents = self.ner(source_lines)
if summary_ents is None:
summary_ents = self.ner(summary_lines)
summary_qas = self.qg(summary_lines, summary_ents)
source_answers = self.qa(source, summary_qas)
summary_answers = self.qa(summary, summary_qas)
if verbose:
self._print_qas("source", source_answers)
self._print_qas("summary", summary_answers)
qa_score = qags_score(source_answers, summary_answers)
print(f"QAGS Score: {qa_score}\n")
return qa_score
def _print_triples(self, mode: str, triples: Set):
print(f"{mode.upper()} Triples")
for triple in triples:
print(triple)
print()
def extract_triples(self, source: str, summary: str, verbose: bool = False):
"""
Extract OpenIE based fact triples
Args:
source (str): original source
summary (str): generated summary
verbose (bool, optional): print verbose option. Defaults to False.
"""
if self.ie is None:
self.ie = load_ie()
source_triples = {(
triple["subject"],
triple["relation"],
triple["object"],
) for triple in self.ie(source)}
summary_triples = {(
triple["subject"],
triple["relation"],
triple["object"],
) for triple in self.ie(summary)}
source_triples, summary_triples = self._filter_out(
source_triples,
summary_triples,
)
if verbose:
self._print_triples("source", source_triples)
self._print_triples("summary", summary_triples)
common_triples = summary_triples.intersection(source_triples)
if not summary_triples:
triple_score = 0.0
else:
triple_score = len(common_triples) / len(summary_triples)
print(f"Triple Score: {triple_score}\n")
return triple_score
def calculate_bert_score(
self,
source: str,
summary: str,
device: str = "cpu",
) -> List[float]:
"""
Calculate BERTScore
See also https://arxiv.org/abs/2005.03754
Args:
source (str): original source
summary (str): generated summary
device (str): device info
Returns:
List: (Precision, Recall, F1) BERTScore list
"""
if isinstance(self.bert_score, str):
self.bert_score = load_bert_score(self.bert_score, device)
# BUG: When len(source_lines) == 1, bmm error raises
source_lines = self._segment(source)
summary_lines = [summary, "dummy"]
scores = self.bert_score(summary_lines, source_lines)
filtered_scores = list()
for score in scores:
score = score.tolist()
score.pop(-1)
filtered_scores.append(sum(score) / len(score))
print(
f"BERTScore Score\nPrecision: {filtered_scores[0]}\nRecall: {filtered_scores[1]}\nF1: {filtered_scores[2]}"
)
return filtered_scores
def __call__(
self,
sources: Union[List[str], str],
summaries: Union[List[str], str],
verbose: bool = False,
device: str = "cpu",
) -> Dict:
if isinstance(sources, str) and isinstance(summaries, str):
sources = [sources]
summaries = [summaries]
if len(sources) != len(summaries):
# yapf:disable
raise ValueError("`sources` and `summaries` must have the same number of elements!")
# yapf:enable
num_pairs = len(sources)
fact_scores = 0
qags_scores = 0
triple_scores = 0
rouges = [0, 0, 0]
bert_scores = [0, 0, 0]
for source, summary in zip(sources, summaries):
source_ents, summary_ents, fact_score = self.extract_facts(
source,
summary,
verbose,
device,
)
fact_scores += fact_score
qags_score = self.extract_qas(
source,
summary,
source_ents,
summary_ents,
verbose,
device,
)
qags_scores += qags_score
triple_score = self.extract_triples(source, summary, verbose)
triple_scores += triple_score
rouge_1, rouge_2, rouge_l = self.calculate_rouge(source, summary)
rouges[0] += rouge_1
rouges[1] += rouge_2
rouges[2] += rouge_l
bert_score = self.calculate_bert_score(source, summary, device)
bert_scores[0] += bert_score[0]
bert_scores[1] += bert_score[1]
bert_scores[2] += bert_score[2]
return {
"fact_score": fact_scores / num_pairs,
"qa_score": qags_scores / num_pairs,
"triple_score": triple_scores / num_pairs,
"rouge": (
rouges[0] / num_pairs,
rouges[1] / num_pairs,
rouges[2] / num_pairs,
),
"bert_score": {
"precision": bert_scores[0],
"recall": bert_scores[1],
"f1": bert_scores[2],
},
}
| []
| []
| [
"TOKENIZERS_PARALLELISM"
]
| [] | ["TOKENIZERS_PARALLELISM"] | python | 1 | 0 | |
openmdao/solvers/tests/test_solver_iprint.py | """ Unit test for the solver printing behavior. """
import os
import sys
import unittest
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.double_sellar import SubSellar
from openmdao.test_suite.components.sellar import SellarDerivatives
from openmdao.utils.general_utils import run_model
from openmdao.utils.mpi import MPI
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
class TestSolverPrint(unittest.TestCase):
def test_feature_iprint_neg1(self):
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDerivatives
prob = om.Problem()
prob.model = SellarDerivatives()
prob.setup()
newton = prob.model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
scipy = prob.model.linear_solver = om.ScipyKrylov()
newton.options['maxiter'] = 2
# use a real bad initial guess
prob['y1'] = 10000
prob['y2'] = -26
newton.options['iprint'] = -1
scipy.options['iprint'] = -1
prob.run_model()
def test_feature_iprint_0(self):
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDerivatives
prob = om.Problem()
prob.model = SellarDerivatives()
prob.setup()
newton = prob.model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
scipy = prob.model.linear_solver = om.ScipyKrylov()
newton.options['maxiter'] = 1
prob['y1'] = 10000
prob['y2'] = -26
newton.options['iprint'] = 0
scipy.options['iprint'] = 0
prob.run_model()
def test_feature_iprint_1(self):
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDerivatives
prob = om.Problem()
prob.model = SellarDerivatives()
prob.setup()
newton = prob.model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
scipy = prob.model.linear_solver = om.ScipyKrylov()
newton.options['maxiter'] = 20
prob['y1'] = 10000
prob['y2'] = -26
newton.options['iprint'] = 1
scipy.options['iprint'] = 0
prob.run_model()
def test_feature_iprint_2(self):
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDerivatives
prob = om.Problem()
prob.model = SellarDerivatives()
prob.setup()
newton = prob.model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
scipy = prob.model.linear_solver = om.ScipyKrylov()
newton.options['maxiter'] = 20
prob['y1'] = 10000
prob['y2'] = -20
newton.options['iprint'] = 2
scipy.options['iprint'] = 1
prob.run_model()
def test_hierarchy_iprint(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])))
sub1 = model.add_subsystem('sub1', om.Group())
sub2 = sub1.add_subsystem('sub2', om.Group())
g1 = sub2.add_subsystem('g1', SubSellar())
g2 = model.add_subsystem('g2', SubSellar())
model.connect('pz.z', 'sub1.sub2.g1.z')
model.connect('sub1.sub2.g1.y2', 'g2.x')
model.connect('g2.y2', 'sub1.sub2.g1.x')
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.ScipyKrylov()
model.nonlinear_solver.options['solve_subsystems'] = True
model.nonlinear_solver.options['max_sub_solves'] = 0
g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
g1.linear_solver = om.LinearBlockGS()
g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
g2.linear_solver = om.ScipyKrylov()
g2.linear_solver.precon = om.LinearBlockGS()
g2.linear_solver.precon.options['maxiter'] = 2
prob.set_solver_print(level=2)
prob.setup()
output = run_model(prob)
# TODO: check output
def test_hierarchy_iprint2(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])))
sub1 = model.add_subsystem('sub1', om.Group())
sub2 = sub1.add_subsystem('sub2', om.Group())
g1 = sub2.add_subsystem('g1', SubSellar())
g2 = model.add_subsystem('g2', SubSellar())
model.connect('pz.z', 'sub1.sub2.g1.z')
model.connect('sub1.sub2.g1.y2', 'g2.x')
model.connect('g2.y2', 'sub1.sub2.g1.x')
model.nonlinear_solver = om.NonlinearBlockGS()
g1.nonlinear_solver = om.NonlinearBlockGS()
g2.nonlinear_solver = om.NonlinearBlockGS()
prob.set_solver_print(level=2)
prob.setup()
output = run_model(prob)
# TODO: check output
def test_hierarchy_iprint3(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])))
sub1 = model.add_subsystem('sub1', om.Group())
sub2 = sub1.add_subsystem('sub2', om.Group())
g1 = sub2.add_subsystem('g1', SubSellar())
g2 = model.add_subsystem('g2', SubSellar())
model.connect('pz.z', 'sub1.sub2.g1.z')
model.connect('sub1.sub2.g1.y2', 'g2.x')
model.connect('g2.y2', 'sub1.sub2.g1.x')
model.nonlinear_solver = om.NonlinearBlockJac()
sub1.nonlinear_solver = om.NonlinearBlockJac()
sub2.nonlinear_solver = om.NonlinearBlockJac()
g1.nonlinear_solver = om.NonlinearBlockJac()
g2.nonlinear_solver = om.NonlinearBlockJac()
prob.set_solver_print(level=2)
prob.setup()
output = run_model(prob)
# TODO: check output
def test_feature_set_solver_print1(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.double_sellar import SubSellar
prob = om.Problem()
model = prob.model
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])))
sub1 = model.add_subsystem('sub1', om.Group())
sub2 = sub1.add_subsystem('sub2', om.Group())
g1 = sub2.add_subsystem('g1', SubSellar())
g2 = model.add_subsystem('g2', SubSellar())
model.connect('pz.z', 'sub1.sub2.g1.z')
model.connect('sub1.sub2.g1.y2', 'g2.x')
model.connect('g2.y2', 'sub1.sub2.g1.x')
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.ScipyKrylov()
model.nonlinear_solver.options['solve_subsystems'] = True
model.nonlinear_solver.options['max_sub_solves'] = 0
g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
g1.linear_solver = om.LinearBlockGS()
g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
g2.linear_solver = om.ScipyKrylov()
g2.linear_solver.precon = om.LinearBlockGS()
g2.linear_solver.precon.options['maxiter'] = 2
prob.set_solver_print(level=2)
prob.setup()
prob.run_model()
def test_feature_set_solver_print2(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.double_sellar import SubSellar
prob = om.Problem()
model = prob.model
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])))
sub1 = model.add_subsystem('sub1', om.Group())
sub2 = sub1.add_subsystem('sub2', om.Group())
g1 = sub2.add_subsystem('g1', SubSellar())
g2 = model.add_subsystem('g2', SubSellar())
model.connect('pz.z', 'sub1.sub2.g1.z')
model.connect('sub1.sub2.g1.y2', 'g2.x')
model.connect('g2.y2', 'sub1.sub2.g1.x')
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.ScipyKrylov()
model.nonlinear_solver.options['solve_subsystems'] = True
model.nonlinear_solver.options['max_sub_solves'] = 0
g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
g1.linear_solver = om.LinearBlockGS()
g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
g2.linear_solver = om.ScipyKrylov()
g2.linear_solver.precon = om.LinearBlockGS()
g2.linear_solver.precon.options['maxiter'] = 2
prob.set_solver_print(level=2)
prob.set_solver_print(level=-1, type_='LN')
prob.setup()
prob.run_model()
def test_feature_set_solver_print3(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.double_sellar import SubSellar
prob = om.Problem()
model = prob.model
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])))
sub1 = model.add_subsystem('sub1', om.Group())
sub2 = sub1.add_subsystem('sub2', om.Group())
g1 = sub2.add_subsystem('g1', SubSellar())
g2 = model.add_subsystem('g2', SubSellar())
model.connect('pz.z', 'sub1.sub2.g1.z')
model.connect('sub1.sub2.g1.y2', 'g2.x')
model.connect('g2.y2', 'sub1.sub2.g1.x')
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.ScipyKrylov()
model.nonlinear_solver.options['solve_subsystems'] = True
model.nonlinear_solver.options['max_sub_solves'] = 0
g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
g1.linear_solver = om.LinearBlockGS()
g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
g2.linear_solver = om.ScipyKrylov()
g2.linear_solver.precon = om.LinearBlockGS()
g2.linear_solver.precon.options['maxiter'] = 2
prob.set_solver_print(level=0)
prob.set_solver_print(level=2, depth=2)
prob.setup()
prob.run_model()
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
class MPITests(unittest.TestCase):
N_PROCS = 2
def test_hierarchy_iprint(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])))
sub1 = model.add_subsystem('sub1', om.Group())
sub2 = sub1.add_subsystem('sub2', om.Group())
g1 = sub2.add_subsystem('g1', SubSellar())
g2 = model.add_subsystem('g2', SubSellar())
model.connect('pz.z', 'sub1.sub2.g1.z')
model.connect('sub1.sub2.g1.y2', 'g2.x')
model.connect('g2.y2', 'sub1.sub2.g1.x')
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.LinearBlockGS()
model.nonlinear_solver.options['solve_subsystems'] = True
model.nonlinear_solver.options['max_sub_solves'] = 0
g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
g1.linear_solver = om.LinearBlockGS()
g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
g2.linear_solver = om.PETScKrylov()
g2.linear_solver.precon = om.LinearBlockGS()
g2.linear_solver.precon.options['maxiter'] = 2
prob.set_solver_print(level=2)
prob.setup()
# Conclude setup but don't run model.
prob.final_setup()
# if USE_PROC_FILES is not set, solver convergence messages
# should only appear on proc 0
output = run_model(prob)
if model.comm.rank == 0 or os.environ.get('USE_PROC_FILES'):
self.assertTrue(output.count('\nNL: Newton Converged') == 1)
else:
self.assertTrue(output.count('\nNL: Newton Converged') == 0)
if __name__ == "__main__":
unittest.main()
| []
| []
| [
"USE_PROC_FILES"
]
| [] | ["USE_PROC_FILES"] | python | 1 | 0 | |
mephisto/abstractions/blueprints/mixins/screen_task_required.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import (
Optional,
Dict,
Any,
Union,
Iterable,
Callable,
Tuple,
cast,
Generator,
TYPE_CHECKING,
)
import types
from mephisto.abstractions.blueprint import BlueprintMixin
from dataclasses import dataclass, field
from omegaconf import MISSING, DictConfig
from mephisto.data_model.qualification import QUAL_NOT_EXIST
from mephisto.utils.qualifications import (
make_qualification_dict,
find_or_create_qualification,
)
if TYPE_CHECKING:
from mephisto.abstractions.blueprint import SharedTaskState
from mephisto.data_model.task_run import TaskRun
from mephisto.data_model.unit import Unit
from mephisto.data_model.packet import Packet
from mephisto.data_model.worker import Worker
from argparse import _ArgumentGroup as ArgumentGroup
@dataclass
class ScreenTaskRequiredArgs:
passed_qualification_name: str = field(
default=MISSING,
metadata={
"help": (
"Specify the name of a qualification used to designate "
"workers who have passed screening."
)
},
)
max_screening_units: int = field(
default=MISSING,
metadata={
"help": (
"The maximum number of screening units that can be launched "
"with this batch, specified to limit the number of validations "
"you may need to pay out for."
)
},
)
use_screening_task: bool = field(
default=False,
metadata={"help": ("Whether or not to use a screening task in this run.")},
)
ScreenUnitDataGenerator = Generator[Dict[str, Any], None, None]
def blank_generator():
while True:
yield {}
@dataclass
class ScreenTaskSharedState:
screening_data_factory: Tuple[bool, ScreenUnitDataGenerator] = field(
default_factory=lambda: blank_generator(),
metadata={
"help": (
"Either a generator that will create task data dicts to "
"be used as the `shared` field in InitializationData, or "
"the bool False to use real data in screening tasks."
),
"Type": "Tuple[bool, ScreenUnitDataGenerator]",
"default": "Generator that creates empty data forever",
},
)
class ScreenTaskRequired(BlueprintMixin):
"""
Compositional class for blueprints that may have a first task to
qualify workers who have never attempted the task before
"""
shared_state: "SharedTaskState"
ArgsMixin = ScreenTaskRequiredArgs
SharedStateMixin = ScreenTaskSharedState
def init_mixin_config(
self,
task_run: "TaskRun",
args: "DictConfig",
shared_state: "SharedTaskState",
) -> None:
assert isinstance(
shared_state, ScreenTaskSharedState
), "Must use ScreenTaskSharedState with ScreenTaskRequired blueprint"
return self.init_screening_config(task_run, args, shared_state)
def init_screening_config(
self,
task_run: "TaskRun",
args: "DictConfig",
shared_state: "ScreenTaskSharedState",
) -> None:
self.use_screening_task = args.blueprint.get("use_screening_task", False)
if not self.use_screening_task:
return
# Runs that are using a qualification task should be able to assign
# a specially generated unit to unqualified workers
self.passed_qualification_name = args.blueprint.passed_qualification_name
self.failed_qualification_name = args.blueprint.block_qualification
self.screening_data_factory: Tuple[
bool, ScreenUnitDataGenerator
] = shared_state.screening_data_factory
self.screening_units_launched = 0
self.screening_unit_cap = args.blueprint.max_screening_units
find_or_create_qualification(task_run.db, self.passed_qualification_name)
find_or_create_qualification(task_run.db, self.failed_qualification_name)
@classmethod
def assert_mixin_args(cls, args: "DictConfig", shared_state: "SharedTaskState"):
use_screening_task = args.blueprint.get("use_screening_task", False)
assert isinstance(
shared_state, ScreenTaskSharedState
), "Must use ScreenTaskSharedState with ScreenTaskRequired blueprint"
if not use_screening_task:
return
passed_qualification_name = args.blueprint.passed_qualification_name
failed_qualification_name = args.blueprint.block_qualification
assert args.task.allowed_concurrent == 1, (
"Can only run this task type with one allowed concurrent unit at a time per worker, to ensure "
"screening before moving into real units."
)
assert (
passed_qualification_name is not None
), "Must supply an passed_qualification_name in Hydra args to use a qualification task"
assert (
failed_qualification_name is not None
), "Must supply an block_qualification in Hydra args to use a qualification task"
assert hasattr(shared_state, "screening_data_factory"), (
"You must supply a screening_data_factory generator in your SharedTaskState to use "
"screening units, or False if you can screen on any tasks."
)
max_screening_units = args.blueprint.max_screening_units
assert max_screening_units is not None, (
"You must supply a blueprint.max_screening_units argument to set the maximum number of "
"additional units you will pay out for the purpose of screening new workers. Note that you "
"do pay for screening units, they are just like any other units."
)
screening_data_factory = shared_state.screening_data_factory
if screening_data_factory is not False:
assert isinstance(screening_data_factory, types.GeneratorType), (
"Must provide a generator function to SharedTaskState.screening_data_factory if "
"you want to generate screening tasks on the fly, or False if you can screen on any task "
)
def worker_needs_screening(self, worker: "Worker") -> bool:
"""Workers that are able to access the task (not blocked) but are not passed need qualification"""
return worker.get_granted_qualification(self.passed_qualification_name) is None
def should_generate_unit(self) -> bool:
return self.screening_data_factory is not False
def get_screening_unit_data(self) -> Optional[Dict[str, Any]]:
try:
if self.screening_units_launched >= self.screening_unit_cap:
return None # Exceeded the cap on these units
else:
data = next(
cast(
Generator[Dict[str, Any], None, None],
self.screening_data_factory,
)
)
self.screening_units_launched += 1
return data
except StopIteration:
return None # No screening units left...
@classmethod
def create_validation_function(
cls, args: "DictConfig", screen_unit: Callable[["Unit"], bool]
):
"""
Takes in a validator function to determine if validation units are
passable, and returns a `on_unit_submitted` function to be used
in the SharedTaskState
"""
passed_qualification_name = args.blueprint.passed_qualification_name
failed_qualification_name = args.blueprint.block_qualification
def _wrapped_validate(unit):
if unit.unit_index >= 0:
return # We only run validation on the validatable units
agent = unit.get_assigned_agent()
if agent is None:
return # Cannot validate a unit with no agent
validation_result = screen_unit(unit)
if validation_result is True:
agent.get_worker().grant_qualification(passed_qualification_name)
elif validation_result is False:
agent.get_worker().grant_qualification(failed_qualification_name)
return _wrapped_validate
@classmethod
def get_mixin_qualifications(
cls, args: "DictConfig", shared_state: "SharedTaskState"
):
"""Creates the relevant task qualifications for this task"""
passed_qualification_name = args.blueprint.passed_qualification_name
failed_qualification_name = args.blueprint.block_qualification
return [
make_qualification_dict(
failed_qualification_name,
QUAL_NOT_EXIST,
None,
)
]
| []
| []
| []
| [] | [] | python | null | null | null |
src/lib/encrypt/encrypt.go | // Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encrypt
import (
"os"
"sync"
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/lib/log"
)
var (
defaultKeyPath = "/etc/core/key"
)
// Encryptor encrypts or decrypts a strings
type Encryptor interface {
// Encrypt encrypts plaintext
Encrypt(string) (string, error)
// Decrypt decrypts ciphertext
Decrypt(string) (string, error)
}
// AESEncryptor uses AES to encrypt or decrypt string
type AESEncryptor struct {
keyProvider KeyProvider
keyParams map[string]interface{}
}
// NewAESEncryptor returns an instance of an AESEncryptor
func NewAESEncryptor(keyProvider KeyProvider) Encryptor {
return &AESEncryptor{
keyProvider: keyProvider,
}
}
var encryptInstance Encryptor
var encryptOnce sync.Once
// Instance ... Get instance of encryptor
func Instance() Encryptor {
encryptOnce.Do(func() {
kp := os.Getenv("KEY_PATH")
if len(kp) == 0 {
kp = defaultKeyPath
}
log.Infof("the path of key used by key provider: %s", kp)
encryptInstance = NewAESEncryptor(NewFileKeyProvider(kp))
})
return encryptInstance
}
// Encrypt ...
func (a *AESEncryptor) Encrypt(plaintext string) (string, error) {
key, err := a.keyProvider.Get(a.keyParams)
if err != nil {
return "", err
}
return utils.ReversibleEncrypt(plaintext, key)
}
// Decrypt ...
func (a *AESEncryptor) Decrypt(ciphertext string) (string, error) {
key, err := a.keyProvider.Get(a.keyParams)
if err != nil {
return "", err
}
return utils.ReversibleDecrypt(ciphertext, key)
}
| [
"\"KEY_PATH\""
]
| []
| [
"KEY_PATH"
]
| [] | ["KEY_PATH"] | go | 1 | 0 | |
docs/conf.py | # -*- coding: utf-8 -*-
#
# Setuptools documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 17 14:22:37 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import subprocess
import sys
import os
# hack to run the bootstrap script so that jaraco.packaging.sphinx
# can invoke setup.py
'READTHEDOCS' in os.environ and subprocess.check_call(
[sys.executable, 'bootstrap.py'],
cwd=os.path.join(os.path.dirname(__file__), os.path.pardir),
)
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['jaraco.packaging.sphinx', 'rst.linker', 'sphinx.ext.autosectionlabel']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index': 'indexsidebar.html'}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Setuptools.tex', 'Setuptools Documentation',
'The fellowship of the packaging', 'manual'),
]
link_files = {
'../CHANGES.rst': dict(
using=dict(
BB='https://bitbucket.org',
GH='https://github.com',
),
replace=[
dict(
pattern=r'(Issue )?#(?P<issue>\d+)',
url='{package_url}/issues/{issue}',
),
dict(
pattern=r'BB Pull Request ?#(?P<bb_pull_request>\d+)',
url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',
),
dict(
pattern=r'Distribute #(?P<distribute>\d+)',
url='{BB}/tarek/distribute/issue/{distribute}',
),
dict(
pattern=r'Buildout #(?P<buildout>\d+)',
url='{GH}/buildout/buildout/issues/{buildout}',
),
dict(
pattern=r'Old Setuptools #(?P<old_setuptools>\d+)',
url='http://bugs.python.org/setuptools/issue{old_setuptools}',
),
dict(
pattern=r'Jython #(?P<jython>\d+)',
url='http://bugs.jython.org/issue{jython}',
),
dict(
pattern=r'Python #(?P<python>\d+)',
url='http://bugs.python.org/issue{python}',
),
dict(
pattern=r'Interop #(?P<interop>\d+)',
url='{GH}/pypa/interoperability-peps/issues/{interop}',
),
dict(
pattern=r'Pip #(?P<pip>\d+)',
url='{GH}/pypa/pip/issues/{pip}',
),
dict(
pattern=r'Packaging #(?P<packaging>\d+)',
url='{GH}/pypa/packaging/issues/{packaging}',
),
dict(
pattern=r'[Pp]ackaging (?P<packaging_ver>\d+(\.\d+)+)',
url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',
),
dict(
pattern=r'PEP[- ](?P<pep_number>\d+)',
url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',
),
dict(
pattern=r'setuptools_svn #(?P<setuptools_svn>\d+)',
url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',
),
dict(
pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n',
with_scm='{text}\n{rev[timestamp]:%d %b %Y}\n',
),
],
),
}
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pytest_adaptavist/__init__.py | """This module provides a set of pytest hooks for generating Adaptavist test run results from test reports."""
from __future__ import annotations
import getpass
import logging
import os
from importlib.metadata import PackageNotFoundError, version
from typing import Any, Literal, NoReturn
import pytest
from _pytest.config import Config
from _pytest.config.argparsing import Parser
from _pytest.outcomes import Skipped, _with_exception
from _pytest.reports import TestReport
from ._atm_configuration import atm_user_is_valid
from ._helpers import get_code_base_url, get_option_ini
from ._pytest_adaptavist import PytestAdaptavist
from ._xdist import XdistHooks
from .constants import META_BLOCK_TIMEOUT
from .metablock import MetaBlock
from .types import MetaBlockFixture, MetaDataFixture
try:
__version__ = version("adaptavist")
except PackageNotFoundError:
# package is not installed - e.g. pulled and run locally
__version__ = "0.0.0"
def pytest_addoption(parser: Parser):
"""Add options to control plugin."""
group = parser.getgroup("adaptavist", "adaptavist test reporting")
def add_option_ini(option: str, dest: str, default: str | None = None, option_type: Literal['bool'] | None = None, **kwargs: Any):
group.addoption(option, dest=dest, **kwargs)
kwargs.pop("store", "")
parser.addini(dest, default=default, type=option_type, help="default value for " + option)
add_option_ini("--adaptavist", dest="adaptavist", option_type="bool", action="store_true", help="Enable adaptavist reporting (default: False).")
add_option_ini("--restrict-user", dest="restrict_user", help="Only send data to Adaptavist, if this user is executing the tests.")
add_option_ini("--restrict-branch",
dest="restrict_branch",
action="store_true",
option_type="bool",
help="Only send data to Adaptavist, if a certain branch is used.")
add_option_ini("--restrict-branch-name", dest="restrict_branch_name", default="origin/master", help="Branch to restrict to (default: origin/master)")
@pytest.hookimpl(trylast=True)
def pytest_configure(config: Config):
"""Configure reporting to Adaptavist and introduce pytest.block."""
# Register custom markers
config.addinivalue_line("markers", "testcase: mark test method as test case implementation (for internal use only)")
config.addinivalue_line("markers", "project(project_key): mark test method to be related to given project (used to create appropriate test case key")
config.addinivalue_line("markers", "block(reason): mark test method to be blocked")
adaptavist = PytestAdaptavist(config)
config.pluginmanager.register(adaptavist, "_adaptavist")
# Support for pytest.block
@_with_exception(Blocked)
def block(msg="") -> NoReturn:
__tracebackhide__ = True # pylint: disable=unused-variable
raise Blocked(msg=msg)
pytest.block = block # type: ignore
# Stop here, if pytest_adaptavist is not activated
if not get_option_ini(config, "adaptavist"):
return
# Configure xdist nodes, if pytest_xdist is installed
if config.pluginmanager.hasplugin("xdist"):
config.pluginmanager.register(XdistHooks(), "_xdist_adaptavist")
# Check, if user is known in Adaptavist
build_usr = getpass.getuser().lower()
if get_option_ini(config, "restrict_user") and get_option_ini(config, "restrict_user") != build_usr:
adaptavist.enabled = False
if not atm_user_is_valid(build_usr) and adaptavist.enabled:
logging.warning("Local user '%s' is not known in Jira. Test cases will be reported without an executor!", build_usr)
adaptavist.local_user = ""
# Store metadata for later usage (e.g. adaptavist traceability).
metadata = getattr(config, "_metadata", os.environ)
build_url = metadata.get("BUILD_URL")
jenkins_url = metadata.get("JENKINS_URL")
code_base = metadata.get("GIT_URL", get_code_base_url())
branch = metadata.get("GIT_BRANCH")
commit = metadata.get("GIT_COMMIT")
adaptavist.build_url = "/".join(build_url.split("/")[:5]) if build_url and jenkins_url and build_url.startswith(jenkins_url) else build_url
adaptavist.code_base = code_base.replace(":", "/").replace(".git", "").replace("git@", "https://") \
if code_base and code_base.startswith("git@") \
else code_base
# Check, if correct branch is used
if get_option_ini(config, "restrict_branch") and branch != get_option_ini(config, "restrict_branch_name"):
raise ValueError(
f'The branch "{branch}" cannot be used to report as reporting is restricted to "{get_option_ini(config, "restrict_branch_name")}" by configuration.'
)
# Print a header with useful information
if adaptavist.reporter:
adaptavist.reporter.section("ATM build meta data", bold=True)
adaptavist.reporter.line(f"build_usr: {build_usr or 'unknown'}")
adaptavist.reporter.line(f"build_url: {build_url or 'unknown'}")
adaptavist.reporter.line(
f"code_base: {code_base or 'unknown'} {(branch or 'unknown') if code_base else ''} {(commit or 'unknown') if code_base and branch else ''}")
adaptavist.reporter.line(f"reporting: {'enabled' if adaptavist.enabled else 'disabled'}")
@pytest.hookimpl(tryfirst=True)
def pytest_report_teststatus(report: TestReport) -> tuple[str, str, tuple[str, dict[str, bool]]] | None:
"""Return result-category, shortletter and verbose word for status reporting."""
if getattr(report, "blocked", False):
return "blocked", "b", ("BLOCKED", {"blue": True})
return None
class Blocked(Skipped):
"""Block exception used to abort test execution and set result status to 'Blocked'."""
@pytest.fixture
def meta_data(request: pytest.FixtureRequest) -> MetaDataFixture:
"""This can be used to store data inside of test methods."""
adaptavist: PytestAdaptavist = request.config.pluginmanager.getplugin("_adaptavist")
return adaptavist.test_result_data[request.node.nodeid]
@pytest.fixture
def meta_block(request: pytest.FixtureRequest) -> MetaBlockFixture:
"""
This fixture can be used to create reports for test blocks/steps immediately during test method call.
.. code-block:: python
with meta_block(step):
# do your thing here
pytest.assume(...)
"""
def get_meta_block(step: int | None = None, timeout: int = META_BLOCK_TIMEOUT) -> MetaBlock:
"""Return a meta block context to process single test blocks/steps."""
return MetaBlock(request, timeout=timeout, step=step)
return get_meta_block
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/cloud/airship/services/drydock/service.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package drydock
import (
"os"
"github.com/go-openapi/strfmt"
"github.com/keleustes/cluster-api-provider-airship/pkg/cloud/airship/actuators"
httptransport "github.com/go-openapi/runtime/client"
apiclient "github.com/kubekit99/airship-go-api/drydock/client"
)
// Service holds a collection of interfaces.
// The interfaces are broken down like this to group functions together.
// One alternative is to have a large list of functions from the ec2 client.
type Service struct {
scope *actuators.Scope
airshipclient *apiclient.Drydock
}
// NewService returns a new service given the api clients.
func NewService(scope *actuators.Scope) *Service {
// create the transport
transport := httptransport.New(os.Getenv("TODOLIST_HOST"), "", nil)
// create the API client, with the transport
client := apiclient.New(transport, strfmt.Default)
return &Service{
scope: scope,
airshipclient: client,
}
}
| [
"\"TODOLIST_HOST\""
]
| []
| [
"TODOLIST_HOST"
]
| [] | ["TODOLIST_HOST"] | go | 1 | 0 | |
reporter.go | package iopipe
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"time"
)
func getBaseURL(region string) string {
// array of supported regions so we can easily look up
// whether a region has its own collector
// using empty structs takes up no space versus using, say, a bool
supportedRegions := map[string]struct{}{
"ap-northeast-1": struct{}{},
"ap-southeast-2": struct{}{},
"eu-west-1": struct{}{},
"us-east-2": struct{}{},
"us-west-1": struct{}{},
"us-west-2": struct{}{},
}
url := "https://metrics-api.iopipe.com/"
if _, exists := supportedRegions[region]; exists {
url = fmt.Sprintf("https://metrics-api.%s.iopipe.com/", region)
}
return url
}
func sendReport(report *Report) error {
var (
err error
networkTimeout = 1 * time.Second
)
tr := &http.Transport{
DisableKeepAlives: false,
MaxIdleConns: 1, // is this equivalent to the maxCachedSessions in the js implementation
}
httpsClient := http.Client{Transport: tr, Timeout: networkTimeout}
reportJSONBytes, _ := json.Marshal(report) //.MarshalIndent(report, "", " ")
reqURL := getBaseURL(os.Getenv("AWS_REGION")) + "v0/event"
logger.Debug(string(reportJSONBytes))
req, err := http.NewRequest("POST", reqURL, bytes.NewReader(reportJSONBytes))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
res, err := httpsClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
resbody, err := ioutil.ReadAll(res.Body)
logger.Debug("body read from IOPIPE", string(resbody))
if err != nil {
return err
}
return nil
}
| [
"\"AWS_REGION\""
]
| []
| [
"AWS_REGION"
]
| [] | ["AWS_REGION"] | go | 1 | 0 | |
python/yb/download_and_extract_archive.py | #!/usr/bin/env python
# Copyright (c) YugaByte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
"""
Downloads and extracts an archive with pre-built third-party dependencies.
"""
# This script should not use any non-standard modules and should run with Python 2 and Python 3.
# It could be run before the main Python interpreter we'll be using for most of our scripts is
# even installed.
import os
import sys
import re
import logging
import socket
import random
import atexit
import subprocess
import argparse
import tempfile
import time
import getpass
import platform
import fcntl
import errno
g_verbose = False
EXPECTED_ARCHIVE_EXTENSION = '.tar.gz'
CHECKSUM_EXTENSION = '.sha256'
def remove_ignore_errors(file_path):
file_path = os.path.abspath(file_path)
if os.path.isfile(file_path):
try:
os.remove(file_path)
except Exception as e:
logging.warning("Error removing %s: %s, ignoring", file_path, e)
def run_cmd(args):
if g_verbose:
logging.info("Running command: %s", args)
try:
subprocess.check_call(args)
except: # noqa
logging.error("Error trying to run command: %s", args)
raise
def validate_sha256sum(checksum_str):
if not re.match(r'^[0-9a-f]{64}$', checksum_str):
raise ValueError("Invalid SHA256 checksum: '%s', expected 64 hex characters", checksum_str)
def read_file_and_strip(file_path):
with open(file_path) as f:
return f.read().strip()
def compute_sha256sum(file_path):
cmd_line = None
if sys.platform.startswith('linux'):
cmd_line = ['sha256sum', file_path]
elif sys.platform.startswith('darwin'):
cmd_line = ['shasum', '--algorithm', '256', file_path]
else:
raise ValueError("Don't know how to compute SHA256 checksum on platform %s" % sys.platform)
checksum_str = subprocess.check_output(cmd_line).strip().split()[0].decode('utf-8')
validate_sha256sum(checksum_str)
return checksum_str
def verify_sha256sum(checksum_file_path, data_file_path):
if not os.path.exists(checksum_file_path):
raise IOError("Checksum file does not exist: %s" % checksum_file_path)
if not os.path.exists(data_file_path):
raise IOError("Data file does not exist: %s", data_file_path)
if not checksum_file_path.endswith(CHECKSUM_EXTENSION):
raise ValueError("Checksum file path must end with '%s', got: %s" % (
CHECKSUM_EXTENSION, checksum_file_path))
# Guard against someone passing in the actual data file instead of the checksum file.
checksum_file_size = os.stat(checksum_file_path).st_size
if checksum_file_size > 4096:
raise IOError("Checksum file size is too big: %d bytes (file path: %s)" % (
checksum_file_size, checksum_file_path))
expected_checksum = read_file_and_strip(checksum_file_path).split()[0]
actual_checksum = compute_sha256sum(data_file_path)
if actual_checksum == expected_checksum:
return True
err_msg = "Invalid checksum for file %s: got %s, expected %s" % (
data_file_path, actual_checksum, expected_checksum)
logging.warning(err_msg)
return False
def download_url(url, dest_path):
start_time_sec = time.time()
logging.info("Downloading %s to %s", url, dest_path)
dest_dir = os.path.dirname(dest_path)
if not os.path.isdir(dest_dir):
raise IOError("Destination directory %s does not exist" % dest_dir)
run_cmd(['curl', '-LsS', url, '-o', dest_path])
if not os.path.exists(dest_path):
raise IOError("Failed to download %s: file %s does not exist" % (url, dest_path))
elapsed_sec = time.time() - start_time_sec
logging.info("Downloaded %s to %s in %.1fs" % (url, dest_path, elapsed_sec))
def move_file(src_path, dest_path):
if g_verbose:
logging.info("Trying to move file %s to %s", src_path, dest_path)
if not os.path.exists(src_path):
raise IOError("Does not exist: %s" % src_path)
if not os.path.isfile(src_path):
raise IOError("Not a file: %s" % src_path)
if os.path.isdir(dest_path):
raise IOError("Destination path can't be a directory: %s" % dest_path)
if os.path.exists(dest_path):
logging.warning("Destination path already exists: %s, moving %s there anyway" % (
dest_path, src_path))
dest_parent_dir = os.path.dirname(dest_path)
if not os.path.isdir(dest_parent_dir):
raise IOError("Destination directory %s does not exist" % dest_parent_dir)
os.rename(src_path, dest_path)
def check_dir_exists_and_is_writable(dir_path, description):
if not os.path.isdir(dir_path):
raise IOError("%s directory %s does not exist" % (description, dir_path))
if not os.access(dir_path, os.W_OK):
raise IOError("%s directory %s is not writable by current user (%s)" % (
description, dir_path, getpass.getuser()))
# From https://github.com/ianlini/mkdir-p/blob/master/mkdir_p/mkdir_p.py
def mkdir_p(path, mode=0o777):
try:
os.makedirs(path, mode=mode)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def exists_or_is_link(dest):
"""
A file could be a link to a non-existent directory, or to a directory owned by a different
user in a directory with sticky bit set. In such cases os.path.exists might return false, but
islink will return true.
"""
return os.path.exists(dest) or os.path.islink(dest)
def download_and_extract(url, dest_dir_parent, local_cache_dir, nfs_cache_dir):
tar_gz_name = os.path.basename(url)
checksum_file_name = tar_gz_name + CHECKSUM_EXTENSION
install_dir_name = tar_gz_name[:-len(EXPECTED_ARCHIVE_EXTENSION)]
dest_dir = os.path.join(dest_dir_parent, install_dir_name)
if os.path.isdir(dest_dir):
logging.info("Directory %s already exists, no need to install." % dest_dir)
return
if not os.path.isdir(local_cache_dir):
logging.info("Directory %s does not exist, trying to create", local_cache_dir)
try:
mkdir_p(local_cache_dir)
except Exception as ex:
logging.info("Failed creating directory '%s': %s", local_cache_dir, ex)
check_dir_exists_and_is_writable(local_cache_dir, "Local cache")
if not url.endswith(EXPECTED_ARCHIVE_EXTENSION):
raise ValueError("Archive download URL is expected to end with %s, got: %s" % (
url, EXPECTED_ARCHIVE_EXTENSION))
if os.path.isdir(dest_dir):
logging.info("Directory %s already exists, someone must have created it concurrently.",
dest_dir)
return
start_time_sec = time.time()
logging.info("Installing %s into directory %s", url, dest_dir)
tmp_dir_prefix = os.path.abspath(os.path.join(dest_dir_parent, install_dir_name + '.tmp.'))
mkdir_p(dest_dir_parent)
tmp_dir = tempfile.mkdtemp(prefix=tmp_dir_prefix)
def cleanup():
if os.path.isdir(tmp_dir):
run_cmd(['rm', '-rf', tmp_dir])
atexit.register(cleanup)
for cache_dir in [local_cache_dir, nfs_cache_dir]:
cached_tar_gz_path = os.path.join(cache_dir, tar_gz_name)
cached_checksum_path = cached_tar_gz_path + CHECKSUM_EXTENSION
tar_gz_path = None
if os.path.exists(cached_tar_gz_path) and os.path.exists(cached_checksum_path):
logging.info("Verifying the checksum of %s", cached_tar_gz_path)
if verify_sha256sum(cached_checksum_path, cached_tar_gz_path):
tar_gz_path = os.path.join(cache_dir, tar_gz_name)
break
else:
remove_ignore_errors(cached_tar_gz_path)
remove_ignore_errors(cached_checksum_path)
if tar_gz_path is None:
tmp_tar_gz_path = os.path.join(tmp_dir, tar_gz_name)
tmp_checksum_path = os.path.join(tmp_dir, checksum_file_name)
download_url(url + CHECKSUM_EXTENSION, tmp_checksum_path)
download_url(url, tmp_tar_gz_path)
if not verify_sha256sum(tmp_checksum_path, tmp_tar_gz_path):
raise ValueError("Checksum verification failed for the download of %s" % url)
file_names = [tar_gz_name, checksum_file_name]
for file_name in file_names:
move_file(os.path.join(tmp_dir, file_name),
os.path.join(local_cache_dir, file_name))
tar_gz_path = os.path.join(local_cache_dir, tar_gz_name)
nfs_tar_gz_path = os.path.join(nfs_cache_dir, tar_gz_name)
nfs_checksum_file_path = os.path.join(nfs_cache_dir, checksum_file_name)
if (os.path.isdir(nfs_cache_dir) and
os.access(nfs_cache_dir, os.W_OK) and
(not os.path.exists(nfs_tar_gz_path) or
not os.path.exists(nfs_checksum_file_path))):
for file_name in file_names:
run_cmd(['cp',
os.path.join(local_cache_dir, file_name),
os.path.join(nfs_cache_dir, file_name)])
logging.info("Extracting %s in %s", tar_gz_path, tmp_dir)
run_cmd(['tar', 'xf', tar_gz_path, '-C', tmp_dir])
tmp_extracted_dir = os.path.join(tmp_dir, install_dir_name)
if not os.path.exists(tmp_extracted_dir):
raise IOError(
"Extracted '%s' in '%s' but a directory named '%s' did not appear" % (
tar_gz_path, os.getcwd(), tmp_extracted_dir))
if exists_or_is_link(dest_dir):
logging.info("Looks like %s was created concurrently", dest_dir)
return
if install_dir_name.startswith('linuxbrew'):
orig_brew_home_file = os.path.join(tmp_extracted_dir, 'ORIG_BREW_HOME')
if not os.path.exists(orig_brew_home_file):
raise IOError("File '%s' not found after extracting '%s'" % (
orig_brew_home_file, tar_gz_name))
orig_brew_home = read_file_and_strip(orig_brew_home_file)
if not orig_brew_home.startswith(dest_dir):
raise ValueError(
"Original Homebrew/Linuxbrew install home directory is '%s'"
" but we are trying to install it in '%s', and that is not a prefix of"
" the former." % (orig_brew_home, dest_dir))
already_installed_msg = (
"'%s' already exists, cannot move '%s' to it. Someone else must have "
"installed it concurrently. This is OK." % (
orig_brew_home, dest_dir))
def create_brew_symlink_if_needed():
brew_link_src = os.path.basename(orig_brew_home)
# dest_dir will now be a symlink pointing to brew_link_src. We are NOT creating a
# symlink inside dest_dir.
if not exists_or_is_link(dest_dir):
logging.info("Creating a symlink '%s' -> '%s'", dest_dir, brew_link_src)
try:
os.symlink(brew_link_src, dest_dir)
except OSError as os_error:
if os_error.errno == errno.EEXIST:
if exists_or_is_link(dest_dir):
logging.info(
"Symlink '%s' was created concurrently. This is probably OK.",
dest_dir)
else:
err_msg = (
"Failed creating symlink '%s' -> '%s' with error: %s, but the "
"symlink does not actually exist!" % (
dest_dir, brew_link_src, os_error))
logging.error(err_msg)
raise IOError(err_msg)
else:
logging.error("Unexpected error when creating symlink '%s' -> '%s': %s",
dest_dir, brew_link_src, os_error)
raise os_error
assert exists_or_is_link(dest_dir)
if not os.path.islink(dest_dir):
# A defensive sanity check.
err_msg = "%s exists but is not a symbolic link" % dest_dir
logging.error(err_msg)
raise IOError(err_msg)
else:
actual_link_src = os.readlink(dest_dir)
if actual_link_src != brew_link_src:
err_msg = "Symlink %s is not pointing to %s but instead points to %s" % (
dest_dir, brew_link_src, actual_link_src)
logging.error(err_msg)
raise IOError(err_msg)
if os.path.exists(orig_brew_home):
logging.info(already_installed_msg)
create_brew_symlink_if_needed()
return
logging.info("Moving '%s' to '%s'" % (tmp_extracted_dir, orig_brew_home))
try:
os.rename(tmp_extracted_dir, orig_brew_home)
except IOError as io_error:
# A defensive sanity check in case locking is not working properly.
if io_error == errno.ENOTEMPTY:
# For whatever reason, this is what we get when the destination directory
# already exists.
logging.info(already_installed_msg)
create_brew_symlink_if_needed()
return
create_brew_symlink_if_needed()
else:
if g_verbose:
logging.info("Moving %s to %s", tmp_extracted_dir, dest_dir)
os.rename(tmp_extracted_dir, dest_dir)
logging.info("Installation of %s took %.1f sec", dest_dir, time.time() - start_time_sec)
def main():
# Created files/directories should be writable by the group.
os.umask(2)
logging.basicConfig(
level=logging.INFO,
format="%(filename)s:%(lineno)d " + socket.gethostname() + " pid " + str(os.getpid()) +
" %(asctime)s %(levelname)s: %(message)s")
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--url', help='URL to download. Must end with .tar.gz.', required=True)
parser.add_argument(
'--dest-dir-parent', help='Parent directory in which to extract the archive',
required=True)
parser.add_argument(
'--local-cache-dir',
default='/opt/yb-build/download_cache',
help='Download cache on the local disk')
parser.add_argument(
'--nfs-cache-dir',
default='/Volumes/n/jenkins/download_cache',
help='Download cache on NFS')
parser.add_argument('--verbose', action='store_true', help='Verbose logging')
args = parser.parse_args()
if args.verbose or os.getenv('YB_VERBOSE') == '1':
global g_verbose
g_verbose = True
download_and_extract(
url=args.url,
dest_dir_parent=args.dest_dir_parent,
local_cache_dir=args.local_cache_dir,
nfs_cache_dir=args.nfs_cache_dir)
if __name__ == '__main__':
main()
| []
| []
| [
"YB_VERBOSE"
]
| [] | ["YB_VERBOSE"] | python | 1 | 0 | |
app.py | # encoding: utf-8
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
app = Flask(__name__)
line_bot_api = LineBotApi('TLJcaOv30g5m7NuIwyQL9VZFO/yMvRhLE3x9dOKJEuU+AQ6/58UgRREjmkmQk18A6Rw7n/YmZuTgkM28QfQZLV5lE96CG64u5i78jDlfQwywTCS1+mpjgrQIAT59yG7zTpXbEwD+HPYf2gWohsfVaQdB04t89/1O/w1cDnyilFU=') #Your Channel Access Token
handler = WebhookHandler('1e00ec81e2dcd53cbce5fe50f4343485') #Your Channel Secret
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_text_message(event):
text = event.message.text #message from user
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=text)) #reply the same message from user
import os
if __name__ == "__main__":
app.run(host='0.0.0.0',port=os.environ['PORT'])
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
src/webserver/storage/storage.go | package storage
import (
"fmt"
"log"
"os"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/postgres"
)
func ConnectPsql() *gorm.DB {
connStr := fmt.Sprintf("postgres://%v:%v@%v:%v/%v?sslmode=disable",
os.Getenv("POSTGRES_USER"),
os.Getenv("POSTGRES_PASSWORD"),
"minitwit_db",
5432,
os.Getenv("POSTGRES_DB"))
db, err := gorm.Open("postgres", connStr)
if err != nil {
log.Fatalf("psql.go/ConnectPsql(): Failed to connect to PSQL: %s", err)
}
err = db.DB().Ping()
if err != nil {
log.Fatalf("Failed to ping DB: %s", err)
}
return db
}
func Migrate(db *gorm.DB) {
db.AutoMigrate(&User{}, &Message{})
}
| [
"\"POSTGRES_USER\"",
"\"POSTGRES_PASSWORD\"",
"\"POSTGRES_DB\""
]
| []
| [
"POSTGRES_PASSWORD",
"POSTGRES_USER",
"POSTGRES_DB"
]
| [] | ["POSTGRES_PASSWORD", "POSTGRES_USER", "POSTGRES_DB"] | go | 3 | 0 | |
impressiveUploader/impressiveUploader/wsgi.py | """
WSGI config for impressiveUploader project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "impressiveUploader.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
docs/conf.py | # -*- coding: utf-8 -*-
#
# Django Grappelli documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 5 19:11:46 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Grappelli'
copyright = u'2015, Patrick Kranzlmueller'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.12.2'
# The full version, including alpha/beta/rc tags.
release = '2.12.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = 'sphinx_grappelli'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['.']
#html_theme_path = ["_themes",]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoGrappellidoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoGrappelli.tex', u'Django Grappelli Documentation',
u'Patrick Kranzlmueller', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangograppelli', u'Django Grappelli Documentation',
[u'Patrick Kranzlmueller'], 1)
]
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
soracom/generated/cmd/vpg_list_packet_capture_sessions.go | // Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"net/url"
"os"
"github.com/spf13/cobra"
)
// VpgListPacketCaptureSessionsCmdLastEvaluatedKey holds value of 'last_evaluated_key' option
var VpgListPacketCaptureSessionsCmdLastEvaluatedKey string
// VpgListPacketCaptureSessionsCmdVpgId holds value of 'vpg_id' option
var VpgListPacketCaptureSessionsCmdVpgId string
// VpgListPacketCaptureSessionsCmdLimit holds value of 'limit' option
var VpgListPacketCaptureSessionsCmdLimit int64
func init() {
VpgListPacketCaptureSessionsCmd.Flags().StringVar(&VpgListPacketCaptureSessionsCmdLastEvaluatedKey, "last-evaluated-key", "null", TRAPI("ID of the last group in the previous page"))
VpgListPacketCaptureSessionsCmd.Flags().StringVar(&VpgListPacketCaptureSessionsCmdVpgId, "vpg-id", "", TRAPI("VPG ID"))
VpgListPacketCaptureSessionsCmd.Flags().Int64Var(&VpgListPacketCaptureSessionsCmdLimit, "limit", 10, TRAPI("Max number of results in a response"))
VpgCmd.AddCommand(VpgListPacketCaptureSessionsCmd)
}
// VpgListPacketCaptureSessionsCmd defines 'list-packet-capture-sessions' subcommand
var VpgListPacketCaptureSessionsCmd = &cobra.Command{
Use: "list-packet-capture-sessions",
Short: TRAPI("/virtual_private_gateways/{vpg_id}/packet_capture_sessions:get:summary"),
Long: TRAPI(`/virtual_private_gateways/{vpg_id}/packet_capture_sessions:get:description`),
RunE: func(cmd *cobra.Command, args []string) error {
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectVpgListPacketCaptureSessionsCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectVpgListPacketCaptureSessionsCmdParams(ac *apiClient) (*apiParams, error) {
var parsedBody interface{}
var err error
err = checkIfRequiredStringParameterIsSupplied("vpg_id", "vpg-id", "path", parsedBody, VpgListPacketCaptureSessionsCmdVpgId)
if err != nil {
return nil, err
}
return &apiParams{
method: "GET",
path: buildPathForVpgListPacketCaptureSessionsCmd("/virtual_private_gateways/{vpg_id}/packet_capture_sessions"),
query: buildQueryForVpgListPacketCaptureSessionsCmd(),
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForVpgListPacketCaptureSessionsCmd(path string) string {
escapedVpgId := url.PathEscape(VpgListPacketCaptureSessionsCmdVpgId)
path = strReplace(path, "{"+"vpg_id"+"}", escapedVpgId, -1)
return path
}
func buildQueryForVpgListPacketCaptureSessionsCmd() url.Values {
result := url.Values{}
if VpgListPacketCaptureSessionsCmdLastEvaluatedKey != "null" {
result.Add("last_evaluated_key", VpgListPacketCaptureSessionsCmdLastEvaluatedKey)
}
if VpgListPacketCaptureSessionsCmdLimit != 10 {
result.Add("limit", sprintf("%d", VpgListPacketCaptureSessionsCmdLimit))
}
return result
}
| [
"\"SORACOM_VERBOSE\""
]
| []
| [
"SORACOM_VERBOSE"
]
| [] | ["SORACOM_VERBOSE"] | go | 1 | 0 | |
uber_swagger.py | import re
from collections import defaultdict
from pram_flask.REST_UBER.swagger_ui import ApiSpec, Operation, OperationResponses, OperationParameters
from werkzeug.routing import parse_rule
import os
import logging
PROJECT_ROOT = os.environ['PROJECT_ROOT']
def swagger(app):
output = {
"swagger": "2.0",
"info": {
"title": u"\u00FCtool API Documentation",
"description": "Welcome to the EPA's utool interactive RESTful API documentation.",
# "termsOfService": "",
#"contact": {
# "name": u"\u00FCbertool Development Team",
# # "url": "",
# "email": "[email protected]",
#},
# "license": {
# "name": "",
# "url": ""
# },
"version": "0.0.1"
},
"paths": defaultdict(dict),
"definitions": defaultdict(dict),
"tags": []
}
paths = output['paths']
definitions = output['definitions']
tags = output['tags']
# TODO: Are these needed (from 'flask_swagger')
ignore_http_methods = {"HEAD", "OPTIONS"}
# technically only responses is non-optional
optional_fields = ['tags', 'consumes', 'produces', 'schemes', 'security',
'deprecated', 'operationId', 'externalDocs']
# Loop over the Flask-RESTful endpoints being served (called "rules"...e.g. /terrplant/)
for rule in app.url_map.iter_rules():
endpoint = app.view_functions[rule.endpoint]
print(endpoint)
try:
class_name = endpoint.view_class()
except AttributeError:
continue # skip to next iteration in for-loop ("rule" does not contain an ubertool REST endpoint)
try:
inputs = class_name.get_model_inputs().__dict__
outputs = class_name.get_model_outputs().__dict__
except AttributeError:
# This endpoint does not have get_model_inputs() or get_model_outputs()
logging.exception(AttributeError.message)
continue # skip to next iteration, as this is not an ubertool endpoint
# TODO: Logic for UBERTOOL API ENDPOINTS - Move to separate function for code clarity???
methods = {}
for http_method in rule.methods.difference(ignore_http_methods):
if hasattr(endpoint, 'methods') and http_method in endpoint.methods:
http_method = http_method.lower()
methods[http_method] = endpoint.view_class.__dict__.get(http_method)
else:
methods[http_method.lower()] = endpoint
# Extract the Rule argument from URL endpoint (e.g. /<jobId>)
rule_param = None
for converter, arguments, variable in parse_rule(str(rule)): # rule must already be converted to a string
if converter:
rule_param = variable
# Get model name
model_name = class_name.name
# Instantiate ApiSpec() class for current endpoint and parse YAML for initial class instance properties
api_spec = ApiSpec(model_name)
# This has to be at the end of the for-loop because it converts the 'rule' object to a string
# Rule = endpoint URL relative to hostname; needs to have special characters escaped to be defaultdict key
rule = str(rule)
for arg in re.findall('(<(.*?\:)?(.*?)>)', rule):
rule = rule.replace(arg[0], '{{{0!s}}}'.format(arg[2]))
# For each Rule (endpoint) iterate over its HTTP methods (e.g. POST, GET, PUT, etc...)
for http_method, handler_method in methods.items():
if http_method == 'post':
# Instantiate new Operation class
operation = Operation()
# Create Operations object from YAML
operation.yaml_operation_parse(
os.path.join(PROJECT_ROOT, 'REST_UBER', model_name + '_rest', 'apidoc.yaml',),
model_name
)
api_spec.paths.add_operation(operation)
# Append Rule parameter name to parameters list if needed
if rule_param:
param = {
'in': "path",
'name': rule_param,
'description': "Job ID for model run",
'required': True,
"type": "string"
}
# api_spec.parameters = [param] + api_spec.parameters
operation.parameters.insert(0, param)
# api_spec.parameters.append(param)
# Update the 'path' key in the Swagger JSON with the 'operation'
paths[rule].update({'post': operation.__dict__})
# Append the 'tag' (top-level) JSON for each rule/endpoint
tag = api_spec.tags.create_tag(model_name, model_name.capitalize() + ' Model')
tags.append(tag)
# TODO: Definitions JSON; move to separate class
definition_template_inputs = {
'type': "object",
'properties': {
'inputs': {
"type": "object",
"properties": {}
},
'run_type': {
"type": 'string',
"example": "single"
}
}
}
definition_template_outputs = {
'type': "object",
'properties': {
'user_id': {
'type': 'string',
},
'inputs': {
# inputs_json
'type': 'object',
'properties': {}
},
'outputs': {
# outputs_json
'type': 'object',
'properties': {}
},
'exp_out': {
# exp_out_json
'type': 'object',
'properties': {}
},
'_id': {
'type': 'string',
},
'run_type': {
'type': 'string',
}
}
}
model_def = {
model_name.capitalize() + "Inputs": definition_template_inputs,
model_name.capitalize() + "Outputs": definition_template_outputs
}
for k, v in inputs.items():
# Set the inputs to the input and output definition template
model_def[model_name.capitalize() + "Inputs"]['properties']['inputs']['properties'][k] = \
model_def[model_name.capitalize() + "Outputs"]['properties']['inputs']['properties'][k] = {
"type": "object",
"properties": {
"0": {
# 'type' is JSON data type (e.g. 'number' is a float; 'string' is a string or binary)
"type": 'string' if str(v.dtype) == 'object' else 'number',
# 'format' is an optional modifier for primitives
"format": 'string' if str(v.dtype) == 'object' else 'float'
}
}
}
for k, v in outputs.items():
# Set the outputs to the output definition template
model_def[model_name.capitalize() + "Outputs"]['properties']['outputs']['properties'][k] = {
"type": "object",
"properties": {
"0": {
"type": 'string' if str(v.dtype) == 'object' else 'number',
"format": 'string' if str(v.dtype) == 'object' else 'float'
}
}
}
definitions.update(model_def)
if http_method == 'get':
# Instantiate new Operation class
operation = Operation(
tags=[model_name],
summary="Returns " + model_name.capitalize() + " JSON schema",
description="Returns the JSON schema needed by the POST method to run " + model_name.capitalize() +
" model",
parameters=[],
produces=['application/json'],
responses=OperationResponses(
200,
"Returns model input schema required for POST method",
schema={
"allOf": [
{
"$ref": "#/definitions/" + model_name.capitalize() + "Outputs"
},
{
"type": "object",
"properties": {
"notes": {
"type": "object",
"properties": {
"info": {'type': 'string'},
"POST": {'type': 'string'},
"GET": {'type': 'string'},
"www": {'type': 'string'}
}
},
}
}
]
}
).get_json()
)
paths[rule].update({'get': operation.__dict__})
return output
| []
| []
| [
"PROJECT_ROOT"
]
| [] | ["PROJECT_ROOT"] | python | 1 | 0 | |
providers/linkedin/linkedin_test.go | package linkedin_test
import (
"fmt"
"os"
"testing"
"github.com/watercraft/goth"
"github.com/watercraft/goth/providers/linkedin"
"github.com/stretchr/testify/assert"
)
func Test_New(t *testing.T) {
t.Parallel()
a := assert.New(t)
provider := linkedinProvider()
a.Equal(provider.ClientKey, os.Getenv("LINKEDIN_KEY"))
a.Equal(provider.Secret, os.Getenv("LINKEDIN_SECRET"))
a.Equal(provider.CallbackURL, "/foo")
}
func Test_Implements_Provider(t *testing.T) {
t.Parallel()
a := assert.New(t)
a.Implements((*goth.Provider)(nil), linkedinProvider())
}
func Test_BeginAuth(t *testing.T) {
t.Parallel()
a := assert.New(t)
provider := linkedinProvider()
session, err := provider.BeginAuth("test_state")
s := session.(*linkedin.Session)
a.NoError(err)
a.Contains(s.AuthURL, "linkedin.com/oauth/v2/authorization")
a.Contains(s.AuthURL, fmt.Sprintf("client_id=%s", os.Getenv("LINKEDIN_KEY")))
a.Contains(s.AuthURL, "state=test_state")
a.Contains(s.AuthURL, "scope=r_liteprofile+r_emailaddress&state")
}
func Test_SessionFromJSON(t *testing.T) {
t.Parallel()
a := assert.New(t)
provider := linkedinProvider()
s, err := provider.UnmarshalSession(`{"AuthURL":"http://linkedin.com/auth_url","AccessToken":"1234567890"}`)
a.NoError(err)
session := s.(*linkedin.Session)
a.Equal(session.AuthURL, "http://linkedin.com/auth_url")
a.Equal(session.AccessToken, "1234567890")
}
func linkedinProvider() *linkedin.Provider {
return linkedin.New(os.Getenv("LINKEDIN_KEY"), os.Getenv("LINKEDIN_SECRET"), "/foo", "r_liteprofile", "r_emailaddress")
}
| [
"\"LINKEDIN_KEY\"",
"\"LINKEDIN_SECRET\"",
"\"LINKEDIN_KEY\"",
"\"LINKEDIN_KEY\"",
"\"LINKEDIN_SECRET\""
]
| []
| [
"LINKEDIN_SECRET",
"LINKEDIN_KEY"
]
| [] | ["LINKEDIN_SECRET", "LINKEDIN_KEY"] | go | 2 | 0 | |
opsdroid/__main__.py | """Starts opsdroid."""
import os
import subprocess
import sys
import logging
import gettext
import time
import contextlib
import click
from opsdroid import __version__
from opsdroid.core import OpsDroid
from opsdroid.web import Web
from opsdroid.const import DEFAULT_LOG_FILENAME, LOCALE_DIR, \
EXAMPLE_CONFIG_FILE, DEFAULT_LANGUAGE, DEFAULT_CONFIG_PATH
gettext.install('opsdroid')
_LOGGER = logging.getLogger("opsdroid")
def configure_lang(config):
"""Configure app language based on user config.
Args:
config: Language Configuration and it uses ISO 639-1 code.
for more info https://en.m.wikipedia.org/wiki/List_of_ISO_639-1_codes
"""
lang_code = config.get("lang", DEFAULT_LANGUAGE)
if lang_code != DEFAULT_LANGUAGE:
lang = gettext.translation(
'opsdroid', LOCALE_DIR, (lang_code,), fallback=True)
lang.install()
def configure_logging(config):
"""Configure the root logger based on user config."""
rootlogger = logging.getLogger()
while rootlogger.handlers:
rootlogger.handlers.pop()
try:
if config["logging"]["path"]:
logfile_path = os.path.expanduser(config["logging"]["path"])
else:
logfile_path = config["logging"]["path"]
except KeyError:
logfile_path = DEFAULT_LOG_FILENAME
try:
log_level = get_logging_level(
config["logging"]["level"])
except KeyError:
log_level = logging.INFO
rootlogger.setLevel(log_level)
formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')
console_handler = logging.StreamHandler()
console_handler.setLevel(log_level)
console_handler.setFormatter(formatter)
rootlogger.addHandler(console_handler)
with contextlib.suppress(KeyError):
if not config["logging"]["console"]:
console_handler.setLevel(logging.CRITICAL)
if logfile_path:
logdir = os.path.dirname(os.path.realpath(logfile_path))
if not os.path.isdir(logdir):
os.makedirs(logdir)
file_handler = logging.FileHandler(logfile_path)
file_handler.setLevel(log_level)
file_handler.setFormatter(formatter)
rootlogger.addHandler(file_handler)
_LOGGER.info("="*40)
_LOGGER.info(_("Started application"))
def get_logging_level(logging_level):
"""Get the logger level based on the user configuration."""
if logging_level == 'critical':
return logging.CRITICAL
if logging_level == 'error':
return logging.ERROR
if logging_level == 'warning':
return logging.WARNING
if logging_level == 'debug':
return logging.DEBUG
return logging.INFO
def check_dependencies():
"""Check for system dependencies required by opsdroid."""
if sys.version_info.major < 3 or sys.version_info.minor < 5:
logging.critical(_("Whoops! opsdroid requires python 3.5 or above."))
sys.exit(1)
def print_version(ctx, param, value):
"""Print out the version of opsdroid that is installed."""
if not value or ctx.resilient_parsing:
return
click.echo('opsdroid v{version}'.format(version=__version__))
ctx.exit(0)
def print_example_config(ctx, param, value):
"""Print out the example config."""
if not value or ctx.resilient_parsing:
return
with open(EXAMPLE_CONFIG_FILE, 'r') as conf:
click.echo(conf.read())
ctx.exit(0)
def edit_files(ctx, param, value):
"""Open config/log file with favourite editor."""
if value == 'config':
file = DEFAULT_CONFIG_PATH
elif value == 'log':
file = DEFAULT_LOG_FILENAME
else:
return
editor = os.environ.get('EDITOR', 'vi')
if editor == 'vi':
click.echo('You are about to edit a file in vim. \n'
'Read the tutorial on vim at: https://bit.ly/2HRvvrB')
time.sleep(3)
subprocess.run([editor, file])
ctx.exit(0)
def welcome_message(config):
"""Add welcome message if set to true in configuration."""
try:
if config['welcome-message']:
_LOGGER.info("=" * 40)
_LOGGER.info(_("You can customise your opsdroid by modifying "
"your configuration.yaml"))
_LOGGER.info(_("Read more at: "
"http://opsdroid.readthedocs.io/#configuration"))
_LOGGER.info(_("Watch the Get Started Videos at: "
"http://bit.ly/2fnC0Fh"))
_LOGGER.info(_("Install Opsdroid Desktop at: \n"
"https://github.com/opsdroid/opsdroid-desktop/"
"releases"))
_LOGGER.info("=" * 40)
except KeyError:
_LOGGER.warning(_("'welcome-message: true/false' is missing in "
"configuration.yaml"))
@click.command()
@click.option('--gen-config', is_flag=True, callback=print_example_config,
expose_value=False, default=False,
help='Print an example config and exit.')
@click.option('--version', '-v', is_flag=True, callback=print_version,
expose_value=False, default=False, is_eager=True,
help='Print the version and exit.')
@click.option('--edit-config', '-e', is_flag=True, callback=edit_files,
default=False, flag_value='config', expose_value=False,
help='Opens configuration.yaml with your favorite editor'
' and exits.')
@click.option('--view-log', '-l', is_flag=True, callback=edit_files,
default=False, flag_value='log', expose_value=False,
help='Opens opsdroid logs with your favorite editor'
' and exits.')
def main():
"""Opsdroid is a chat bot framework written in Python.
It is designed to be extendable, scalable and simple.
See https://opsdroid.github.io/ for more information.
"""
check_dependencies()
with OpsDroid() as opsdroid:
opsdroid.load()
configure_lang(opsdroid.config)
configure_logging(opsdroid.config)
welcome_message(opsdroid.config)
opsdroid.web_server = Web(opsdroid)
opsdroid.start_loop()
def init():
"""Enter the application."""
if __name__ == "__main__":
main()
init()
| []
| []
| [
"EDITOR"
]
| [] | ["EDITOR"] | python | 1 | 0 | |
Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query.py | import demistomock as demisto
from CommonServerPython import *
from typing import List, Dict, Optional
from ldap3 import Server, Connection, NTLM, SUBTREE, ALL_ATTRIBUTES, Tls, Entry, Reader, ObjectDef
from ldap3.extend import microsoft
import ssl
from datetime import datetime
import traceback
import os
from ldap3.utils.log import (set_library_log_detail_level, get_library_log_detail_level,
set_library_log_hide_sensitive_data, EXTENDED)
# global connection
conn: Optional[Connection] = None
''' GLOBAL VARS '''
# userAccountControl is a bitmask used to store a number of settings.
# find more at:
# https://support.microsoft.com/en-gb/help/305144/how-to-use-the-useraccountcontrol-flags-to-manipulate-user-account-pro
DEFAULT_OUTGOING_MAPPER = "User Profile - Active Directory (Outgoing)"
DEFAULT_INCOMING_MAPPER = "User Profile - Active Directory (Incoming)"
COOMON_ACCOUNT_CONTROL_FLAGS = {
512: "Enabled Account",
514: "Disabled account",
544: "Password Not Required",
4096: "Workstation/server",
66048: "Enabled, password never expires",
66050: "Disabled, password never expires",
66080: "Enables, password never expires, password not required.",
532480: "Domain controller"
}
NORMAL_ACCOUNT = 512
DISABLED_ACCOUNT = 514
INACTIVE_LIST_OPTIONS = [514, 546, 66050, 66082, 262658, 262690, 328226]
DEFAULT_LIMIT = 20
# common attributes for specific AD objects
DEFAULT_PERSON_ATTRIBUTES = [
'name',
'displayName',
'memberOf',
'mail',
'sAMAccountName',
'manager',
'userAccountControl'
]
DEFAULT_COMPUTER_ATTRIBUTES = [
'name',
'memberOf'
]
FIELDS_THAT_CANT_BE_MODIFIED = [
"dn", "samaccountname", "cn", "ou"
]
''' HELPER FUNCTIONS '''
def initialize_server(host, port, secure_connection, unsecure):
"""
uses the instance configuration to initialize the LDAP server
:param host: host or ip
:type host: string
:param port: port or None
:type port: number
:param secure_connection: SSL or None
:type secure_connection: string
:param unsecure: trust any cert
:type unsecure: boolean
:return: ldap3 Server
:rtype: Server
"""
if secure_connection == "SSL":
# intialize server with ssl
# port is configured by default as 389 or as 636 for LDAPS if not specified in configuration
demisto.debug("initializing sever with ssl (unsecure: {}). port: {}". format(unsecure, port or 'default(636)'))
if not unsecure:
demisto.debug("will require server certificate.")
tls = Tls(validate=ssl.CERT_REQUIRED, ca_certs_file=os.environ.get('SSL_CERT_FILE'))
if port:
return Server(host, port=port, use_ssl=True, tls=tls)
return Server(host, use_ssl=True, tls=tls)
if port:
return Server(host, port=port, use_ssl=True)
return Server(host, use_ssl=True)
demisto.debug("initializing server without secure connection. port: {}". format(port or 'default(389)'))
if port:
return Server(host, port=port)
return Server(host)
def account_entry(person_object, custome_attributes):
# create an account entry from a person objects
account = {
'Type': 'AD',
'ID': person_object.get('dn'),
'Email': person_object.get('mail'),
'Username': person_object.get('sAMAccountName'),
'DisplayName': person_object.get('displayName'),
'Managr': person_object.get('manager'),
'Manager': person_object.get('manager'),
'Groups': person_object.get('memberOf')
}
lower_cased_person_object_keys = {
person_object_key.lower(): person_object_key for person_object_key in person_object.keys()
}
for attr in custome_attributes:
try:
account[attr] = person_object[attr]
except KeyError as e:
lower_cased_custom_attr = attr.lower()
if lower_cased_custom_attr in lower_cased_person_object_keys:
cased_custom_attr = lower_cased_person_object_keys.get(lower_cased_custom_attr, '')
account[cased_custom_attr] = person_object[cased_custom_attr]
else:
demisto.error(f'Failed parsing custom attribute {attr}, error: {e}')
return account
def endpoint_entry(computer_object, custome_attributes):
# create an endpoint entry from a computer object
endpoint = {
'Type': 'AD',
'ID': computer_object.get('dn'),
'Hostname': computer_object.get('name'),
'Groups': computer_object.get('memberOf')
}
lower_cased_person_object_keys = {
person_object_key.lower(): person_object_key for person_object_key in computer_object.keys()
}
for attr in custome_attributes:
if attr == '*':
continue
try:
endpoint[attr] = computer_object[attr]
except KeyError as e:
lower_cased_custom_attr = attr.lower()
if lower_cased_custom_attr in lower_cased_person_object_keys:
cased_custom_attr = lower_cased_person_object_keys.get(lower_cased_custom_attr, '')
endpoint[cased_custom_attr] = computer_object[cased_custom_attr]
else:
demisto.error(f'Failed parsing custom attribute {attr}, error: {e}')
return endpoint
def base_dn_verified(base_dn):
# serch AD with a simple query to test base DN is configured correctly
try:
search(
"(objectClass=user)",
base_dn,
size_limit=1
)
except Exception as e:
demisto.info(str(e))
return False
return True
def generate_dn_and_remove_from_user_profile(user):
"""Generates a user dn, in case user dn is included in the user, will return it, otherwise
will generate one using the cn and ou values
:param user: The user dict including his values
:return: The user's dn.
"""
user_dn = user.get("dn")
if user_dn:
user.pop("dn")
return user_dn
cn = user.get("cn")
if not cn:
raise Exception("User must have cn, please provide a valid value")
ou = user.get("ou")
if not ou:
raise Exception("User must have ou, please provide a valid value")
return 'CN=' + str(cn) + ',' + str(ou)
def check_if_user_exists_by_samaccountname(default_base_dn, samaccountname):
"""Check if user exists base on his samaccountname
:param default_base_dn: The location in the DIT where the search will start
:param samaccountname: The user's unique samaccountname
:return: True if the user exists, False otherwise.
"""
query = f'(&(objectClass=User)(objectCategory=person)(samaccountname={samaccountname}))'
entries = search_with_paging(
query,
default_base_dn,
attributes=["samaccountname"],
size_limit=1,
page_size=1
)
if entries.get('flat'):
return True
return False
def get_user_activity_by_samaccountname(default_base_dn, samaccountname):
"""Get if user is active or not by samaccountname
:param default_base_dn: The location in the DIT where the search will start
:param samaccountname: The user's unique samaccountname
:return: True if the user active, False otherwise.
"""
active = False
query = f'(&(objectClass=User)(objectCategory=person)(samaccountname={samaccountname}))'
entries = search_with_paging(
query,
default_base_dn,
attributes=["userAccountControl"],
size_limit=1,
page_size=1
)
if entries.get('flat'):
user = entries.get('flat')[0]
activity = user.get('userAccountControl')[0]
active = activity not in INACTIVE_LIST_OPTIONS
return active
def modify_user_ou(dn, new_ou):
assert conn is not None
cn = dn.split(',', 1)[0]
success = conn.modify_dn(dn, cn, new_superior=new_ou)
return success
def get_all_attributes(search_base):
obj_inetorgperson = ObjectDef('user', conn)
r = Reader(conn, obj_inetorgperson, search_base)
r.search()
if not r:
return []
if not r[0]:
return []
attributes = r[0].entry_attributes
return attributes
''' COMMANDS '''
''' SEARCH '''
def search(search_filter, search_base, attributes=None, size_limit=0, time_limit=0):
"""
find entries in the DIT
Args:
search_base: the location in the DIT where the search will start
search_filte: LDAP query string
attributes: the attributes to specify for each entry found in the DIT
"""
assert conn is not None
success = conn.search(
search_base=search_base,
search_filter=search_filter,
attributes=attributes,
size_limit=size_limit,
time_limit=time_limit
)
if not success:
raise Exception("Search failed")
return conn.entries
def search_with_paging(search_filter, search_base, attributes=None, page_size=100, size_limit=0, time_limit=0):
"""
find entries in the DIT
Args:
search_base: the location in the DIT where the search will start
search_filter: LDAP query string
attributes: the attributes to specify for each entrxy found in the DIT
"""
assert conn is not None
total_entries = 0
cookie = None
start = datetime.now()
entries: List[Entry] = []
entries_left_to_fetch = size_limit
while True:
if 0 < entries_left_to_fetch < page_size:
page_size = entries_left_to_fetch
conn.search(
search_base,
search_filter,
search_scope=SUBTREE,
attributes=attributes,
paged_size=page_size,
paged_cookie=cookie
)
entries_left_to_fetch -= len(conn.entries)
total_entries += len(conn.entries)
cookie = conn.result['controls']['1.2.840.113556.1.4.319']['value']['cookie']
time_diff = (start - datetime.now()).seconds
entries.extend(conn.entries)
# stop when: 1.reached size limit 2.reached time limit 3. no cookie
if (size_limit and size_limit <= total_entries) or (time_limit and time_diff >= time_limit) or (not cookie):
break
# keep the raw entry for raw content (backward compatibility)
raw = []
# flatten the entries
flat = []
for entry in entries:
entry = json.loads(entry.entry_to_json())
flat_entry = {
'dn': entry['dn']
}
for attr in entry.get('attributes', {}):
flat_entry[attr] = entry['attributes'][attr]
raw.append(entry)
flat.append(flat_entry)
return {
"raw": raw,
"flat": flat
}
def user_dn(sam_account_name, search_base):
search_filter = '(&(objectClass=user)(sAMAccountName={}))'.format(sam_account_name)
entries = search(
search_filter,
search_base
)
if not entries:
raise Exception("Could not get full DN for user with sAMAccountName '{}'".format(sam_account_name))
entry = json.loads(entries[0].entry_to_json())
return entry['dn']
def computer_dn(compuer_name, search_base):
search_filter = '(&(objectClass=user)(objectCategory=computer)(name={}))'.format(compuer_name)
entries = search(
search_filter,
search_base
)
if not entries:
raise Exception("Could not get full DN for computer with name '{}'".format(compuer_name))
entry = json.loads(entries[0].entry_to_json())
return entry['dn']
def group_dn(group_name, search_base):
search_filter = '(&(objectClass=group)(cn={}))'.format(group_name)
entries = search(
search_filter,
search_base
)
if not entries:
raise Exception("Could not get full DN for group with name '{}'".format(group_name))
entry = json.loads(entries[0].entry_to_json())
return entry['dn']
def convert_special_chars_to_unicode(search_filter):
# We allow users to use special chars without explicitly typing their unicode values
chars_to_replace = {
'\\(': '\\28',
'\\)': '\\29',
'\\*': '\\2a',
'\\/': '\\2f',
'\\\\': '\\5c'
}
for i, j in chars_to_replace.items():
search_filter = search_filter.replace(i, j)
return search_filter
def free_search(default_base_dn, page_size):
args = demisto.args()
search_filter = args.get('filter')
size_limit = int(args.get('size-limit', '0'))
time_limit = int(args.get('time-limit', '0'))
search_base = args.get('base-dn') or default_base_dn
attributes = args.get('attributes')
context_output = args.get('context-output')
search_filter = convert_special_chars_to_unicode(search_filter)
# if ALL was specified - get all the object's attributes, else expect a string of comma separated values
if attributes:
attributes = ALL_ATTRIBUTES if attributes == 'ALL' else attributes.split(',')
entries = search_with_paging(
search_filter,
search_base,
attributes=attributes,
size_limit=size_limit,
time_limit=time_limit,
page_size=page_size
)
ec = {} if context_output == 'no' else {'ActiveDirectory.Search(obj.dn == val.dn)': entries['flat']}
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory Search", entries['flat']),
'EntryContext': ec
}
demisto.results(demisto_entry)
def search_users(default_base_dn, page_size):
# this command is equivalent to script ADGetUser
# will preform a custom search to find users by a specific (one) attribute specified by the user
args = demisto.args()
attributes: List[str] = []
custom_attributes: List[str] = []
# zero is actually no limitation, default is 20
limit = int(args.get('limit', '20'))
if limit <= 0:
limit = 20
# default query - list all users
query = "(&(objectClass=User)(objectCategory=person))"
# query by user DN
if args.get('dn'):
query = "(&(objectClass=User)(objectCategory=person)(distinguishedName={}))".format(args['dn'])
# query by name
if args.get('name'):
query = "(&(objectClass=User)(objectCategory=person)(cn={}))".format(args['name'])
# query by email
if args.get('email'):
query = "(&(objectClass=User)(objectCategory=person)(mail={}))".format(args['email'])
# query by sAMAccountName
if args.get('username'):
query = "(&(objectClass=User)(objectCategory=person)(sAMAccountName={}))".format(args['username'])
# query by custom object attribute
if args.get('custom-field-type'):
if not args.get('custom-field-data'):
raise Exception('Please specify "custom-field-data" as well when quering by "custom-field-type"')
query = "(&(objectClass=User)(objectCategory=person)({}={}))".format(
args['custom-field-type'], args['custom-field-data'])
if args.get('attributes'):
custom_attributes = args['attributes'].split(",")
attributes = list(set(custom_attributes + DEFAULT_PERSON_ATTRIBUTES))
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
size_limit=limit,
page_size=page_size
)
accounts = [account_entry(entry, custom_attributes) for entry in entries['flat']]
if args.get('user-account-control-out', '') == 'true':
# display a literal translation of the numeric account control flag
for i, user in enumerate(entries['flat']):
flag_no = user.get('userAccountControl')[0]
entries['flat'][i]['userAccountControl'] = COOMON_ACCOUNT_CONTROL_FLAGS.get(flag_no) or flag_no
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Users", entries['flat']),
'EntryContext': {
'ActiveDirectory.Users(obj.dn == val.dn)': entries['flat'],
# 'backward compatability' with ADGetUser script
'Account(obj.ID == val.ID)': accounts
}
}
demisto.results(demisto_entry)
def get_user_iam(default_base_dn, args, mapper_in, mapper_out):
"""Gets an AD user by User Profile.
:param default_base_dn: The location in the DIT where the search will start
:param args: Demisto args.
:param mapper_in: Mapping AD user to User Profiles
:param mapper_out: Mapping User Profiles to AD users.
:return: User Profile of the AD user
"""
try:
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
default_attribute = "samaccountname"
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=mapper_out)
value = ad_user.get(default_attribute)
# removing keys with no values
user = {k: v for k, v in ad_user.items() if v}
attributes = list(user.keys())
query = f'(&(objectClass=User)(objectCategory=person)({default_attribute}={value}))'
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
size_limit=1,
page_size=1
)
if not entries.get('flat'):
iam_user_profile.set_result(success=False,
error_message="No user was found",
action=IAMActions.GET_USER
)
else:
user_account_control = get_user_activity_by_samaccountname(default_base_dn, value)
ad_user["userAccountControl"] = user_account_control
iam_user_profile.update_with_app_data(ad_user, mapper_in)
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
action=IAMActions.GET_USER,
details=ad_user,
active=user_account_control)
return iam_user_profile
except Exception as e:
iam_user_profile.set_result(success=False,
error_message=str(e),
action=IAMActions.GET_USER
)
return iam_user_profile
def search_computers(default_base_dn, page_size):
# this command is equivalent to ADGetComputer script
args = demisto.args()
attributes: List[str] = []
custome_attributes: List[str] = []
# default query - list all users (computer category)
query = "(&(objectClass=user)(objectCategory=computer))"
# query by user DN
if args.get('dn'):
query = "(&(objectClass=user)(objectCategory=computer)(distinguishedName={}))".format(args['dn'])
# query by name
if args.get('name'):
query = "(&(objectClass=user)(objectCategory=computer)(name={}))".format(args['name'])
# query by custom object attribute
if args.get('custom-field-type'):
if not args.get('custom-field-data'):
raise Exception('Please specify "custom-field-data" as well when quering by "custom-field-type"')
query = "(&(objectClass=user)(objectCategory=computer)({}={}))".format(
args['custom-field-type'], args['custom-field-data'])
if args.get('attributes'):
custome_attributes = args['attributes'].split(",")
attributes = list(set(custome_attributes + DEFAULT_COMPUTER_ATTRIBUTES))
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
page_size=page_size
)
endpoints = [endpoint_entry(entry, custome_attributes) for entry in entries['flat']]
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Computers", entries['flat']),
'EntryContext': {
'ActiveDirectory.Computers(obj.dn == val.dn)': entries['flat'],
# 'backward compatability' with ADGetComputer script
'Endpoint(obj.ID == val.ID)': endpoints
}
}
demisto.results(demisto_entry)
def search_group_members(default_base_dn, page_size):
# this command is equivalent to ADGetGroupMembers script
args = demisto.args()
member_type = args.get('member-type')
group_dn = args.get('group-dn')
nested_search = '' if args.get('disable-nested-search') == 'true' else ':1.2.840.113556.1.4.1941:'
time_limit = int(args.get('time_limit', 180))
custome_attributes: List[str] = []
default_attributes = DEFAULT_PERSON_ATTRIBUTES if member_type == 'person' else DEFAULT_COMPUTER_ATTRIBUTES
if args.get('attributes'):
custome_attributes = args['attributes'].split(",")
attributes = list(set(custome_attributes + default_attributes))
query = "(&(objectCategory={})(objectClass=user)(memberOf{}={}))".format(member_type, nested_search, group_dn)
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
page_size=page_size,
time_limit=time_limit
)
members = [{'dn': entry['dn'], 'category': member_type} for entry in entries['flat']]
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Group Members", entries['flat']),
'EntryContext': {
'ActiveDirectory.Groups(obj.dn ==' + group_dn + ')': {
'dn': group_dn,
'members': members
}
}
}
if member_type == 'person':
demisto_entry['EntryContext']['ActiveDirectory.Users(obj.dn == val.dn)'] = entries['flat']
demisto_entry['EntryContext']['Account'] = [account_entry(
entry, custome_attributes) for entry in entries['flat']]
else:
demisto_entry['EntryContext']['ActiveDirectory.Computers(obj.dn == val.dn)'] = entries['flat']
demisto_entry['EntryContext']['Endpoint'] = [endpoint_entry(
entry, custome_attributes) for entry in entries['flat']]
demisto.results(demisto_entry)
''' DATABASE OPERATIONS '''
''' CREATE OBJECT'''
def create_user():
assert conn is not None
args = demisto.args()
object_classes = ["top", "person", "organizationalPerson", "user"]
user_dn = args.get('user-dn')
username = args.get("username")
password = args.get("password")
custome_attributes = args.get('custom-attributes')
attributes = {
"sAMAccountName": username
}
# set common user attributes
if args.get('display-name'):
attributes['displayName'] = args['display-name']
if args.get('description'):
attributes['description'] = args['description']
if args.get('email'):
attributes['mail'] = args['email']
if args.get('telephone-number'):
attributes['telephoneNumber'] = args['telephone-number']
if args.get('title'):
attributes['title'] = args['title']
# set user custome attributes
if custome_attributes:
try:
custome_attributes = json.loads(custome_attributes)
except Exception as e:
demisto.info(str(e))
raise Exception(
"Failed to parse custom attributes argument. Please see an example of this argument in the description."
)
for attribute_name, attribute_value in custome_attributes.items():
# can run default attribute stting
attributes[attribute_name] = attribute_value
# add user
success = conn.add(user_dn, object_classes, attributes)
if not success:
raise Exception("Failed to create user")
# set user password
success = conn.extend.microsoft.modify_password(user_dn, password)
if not success:
raise Exception("Failed to reset user password")
# enable user and expire password
modification = {
# enable user
'userAccountControl': [('MODIFY_REPLACE', NORMAL_ACCOUNT)],
# set to 0, to force password change on next login
"pwdLastSet": [('MODIFY_REPLACE', "0")]
}
modify_object(user_dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Created user with DN: {}".format(user_dn)
}
demisto.results(demisto_entry)
def create_user_iam(default_base_dn, args, mapper_out, disabled_users_group_cn):
"""Creates an AD user by User Profile.
:param default_base_dn: The location in the DIT where the search will start
:param args: Demisto args.
:param mapper_out: Mapping User Profiles to AD users.
:param disabled_users_group_cn: The disabled group cn, the user will be removed from this group when enabled
:return: The user that was created
"""
assert conn is not None
try:
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=mapper_out)
sam_account_name = ad_user.get("samaccountname")
if not sam_account_name:
raise DemistoException("User must have SAMAccountName")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, sam_account_name)
if user_exists:
iam_user_profile = update_user_iam(default_base_dn, args, False, mapper_out, disabled_users_group_cn)
else:
user_dn = generate_dn_and_remove_from_user_profile(ad_user)
object_classes = ["top", "person", "organizationalPerson", "user"]
success = conn.add(user_dn, object_classes, ad_user)
if success:
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
details=ad_user,
action=IAMActions.CREATE_USER,
active=True)
else:
iam_user_profile.set_result(success=False,
error_message="Failed to create user",
action=IAMActions.CREATE_USER
)
return iam_user_profile
except Exception as e:
iam_user_profile.set_result(success=False,
error_message=str(e),
action=IAMActions.CREATE_USER,
)
return iam_user_profile
def update_user_iam(default_base_dn, args, create_if_not_exists, mapper_out, disabled_users_group_cn):
"""Update an AD user by User Profile.
:param default_base_dn: The location in the DIT where the search will start
:param args: Demisto args.
:param create_if_not_exists: Created the user if it does not exists.
:param mapper_out: Mapping User Profiles to AD users.
:param disabled_users_group_cn: The disabled group cn, the user will be removed from this group when enabled
:return: Updated User
"""
assert conn is not None
try:
user_profile = args.get("user-profile")
allow_enable = args.get('allow-enable') == 'true'
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=mapper_out)
# check it user exists and if it doesn't, create it
sam_account_name = ad_user.get("samaccountname")
if not sam_account_name:
raise DemistoException("User must have SAMAccountName")
new_ou = ad_user.get("ou")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, sam_account_name)
if not user_exists and create_if_not_exists:
iam_user_profile = create_user_iam(default_base_dn, args, mapper_out, disabled_users_group_cn)
elif user_exists:
dn = user_dn(sam_account_name, default_base_dn)
if allow_enable:
enable_user_iam(default_base_dn, dn, disabled_users_group_cn)
# fields that can't be modified
# notice that we are changing the ou and that effects the dn and cn
for field in FIELDS_THAT_CANT_BE_MODIFIED:
if ad_user.get(field):
ad_user.pop(field)
fail_to_modify = []
for key in ad_user:
modification = {key: [('MODIFY_REPLACE', ad_user.get(key))]}
success = conn.modify(dn, modification)
if not success:
fail_to_modify.append(key)
ou_modified_succeed = modify_user_ou(dn, new_ou)
if not ou_modified_succeed:
fail_to_modify.append("ou")
if fail_to_modify:
error_list = '\n'.join(fail_to_modify)
error_message = f"Fail to modify the following attributes: {error_list}"
iam_user_profile.set_result(success=False,
error_message=error_message,
action=IAMActions.UPDATE_USER,
)
else:
active = get_user_activity_by_samaccountname(default_base_dn, sam_account_name)
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
action=IAMActions.UPDATE_USER,
details=ad_user,
active=active)
return iam_user_profile
except Exception as e:
iam_user_profile.set_result(success=False,
error_message=str(e),
action=IAMActions.UPDATE_USER
)
return iam_user_profile
def create_contact():
assert conn is not None
args = demisto.args()
object_classes = ["top", "person", "organizationalPerson", "contact"]
contact_dn = args.get('contact-dn')
# set contact attributes
attributes: Dict = {}
if args.get('custom-attributes'):
try:
attributes = json.loads(args['custom-attributes'])
except Exception as e:
demisto.info(str(e))
raise Exception(
'Failed to parse custom attributes argument. Please see an example of this argument in the argument.'
)
# set common user attributes
if args.get('display-name'):
attributes['displayName'] = args['display-name']
if args.get('description'):
attributes['description'] = args['description']
if args.get('email'):
attributes['mail'] = args['email']
if args.get('telephone-number'):
attributes['telephoneNumber'] = args['telephone-number']
if args.get('title'):
attributes['title'] = args['title']
# add contact
success = conn.add(contact_dn, object_classes, attributes)
if not success:
raise Exception("Failed to create contact")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Created contact with DN: {}".format(contact_dn)
}
demisto.results(demisto_entry)
def create_group():
assert conn is not None
args = demisto.args()
object_classes = ["top", "group"]
dn = args.get('dn')
group_name = args.get('name')
group_type_map = {"security": "2147483650", "distribution": "2"}
group_type = group_type_map[args.get("group-type")]
if args.get('members'):
members = args.get('members')
attributes = {
"samAccountName": group_name,
"groupType": group_type,
"member": members
}
else:
attributes = {
"samAccountName": group_name,
"groupType": group_type
}
# create group
success = conn.add(dn, object_classes, attributes)
if not success:
raise Exception("Failed to create group")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Created group with DN: {}".format(dn)
}
demisto.results(demisto_entry)
''' UPDATE OBJECT '''
def modify_object(dn, modification):
"""
modifies object in the DIT
"""
assert conn is not None
success = conn.modify(dn, modification)
if not success:
raise Exception("Failed to update object {} with the following modification: {}".format(
dn, json.dumps(modification)))
def update_user(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
attribute_name = args.get('attribute-name')
attribute_value = args.get('attribute-value')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
modification = {}
modification[attribute_name] = [('MODIFY_REPLACE', attribute_value)]
# modify user
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Updated user's {} to {} ".format(attribute_name, attribute_value)
}
demisto.results(demisto_entry)
def update_contact():
args = demisto.args()
contact_dn = args.get('contact-dn')
modification = {}
modification[args.get('attribute-name')] = [('MODIFY_REPLACE', args.get('attribute-value'))]
# modify
modify_object(contact_dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Updated contact's {} to: {} ".format(args.get('attribute-name'), args.get('attribute-value'))
}
demisto.results(demisto_entry)
def modify_computer_ou(default_base_dn):
assert conn is not None
args = demisto.args()
computer_name = args.get('computer-name')
dn = computer_dn(computer_name, args.get('base-dn') or default_base_dn)
success = conn.modify_dn(dn, "CN={}".format(computer_name), new_superior=args.get('full-superior-dn'))
if not success:
raise Exception("Failed to modify computer OU")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Moved computer {} to {}".format(computer_name, args.get('full-superior-dn'))
}
demisto.results(demisto_entry)
def expire_user_password(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
modification = {
# set to 0, to force password change on next login
"pwdLastSet": [('MODIFY_REPLACE', "0")]
}
# modify user
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Expired password successfully"
}
demisto.results(demisto_entry)
def set_user_password(default_base_dn):
assert conn is not None
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
password = args.get('password')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
# set user password
success = conn.extend.microsoft.modify_password(dn, password)
if not success:
raise Exception("Failed to reset user password")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "User password successfully set"
}
demisto.results(demisto_entry)
def enable_user(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
# modify user
modification = {
'userAccountControl': [('MODIFY_REPLACE', NORMAL_ACCOUNT)]
}
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "User {} was enabled".format(sam_account_name)
}
demisto.results(demisto_entry)
def disable_user(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
# modify user
modification = {
'userAccountControl': [('MODIFY_REPLACE', DISABLED_ACCOUNT)]
}
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "User {} was disabled".format(sam_account_name)
}
demisto.results(demisto_entry)
def enable_user_iam(default_base_dn, dn, disabled_users_group_cn):
"""Enables an AD user by User Profile.
:param default_base_dn: The location in the DIT where the search will start
:param dn: The users unique dn
:param disabled_users_group_cn: The disabled group cn, the user will be removed from this group when enabled
"""
modification = {
'userAccountControl': [('MODIFY_REPLACE', NORMAL_ACCOUNT)]
}
modify_object(dn, modification)
if disabled_users_group_cn:
grp_dn = group_dn(disabled_users_group_cn, default_base_dn)
success = microsoft.removeMembersFromGroups.ad_remove_members_from_groups(conn, [dn], [grp_dn], True)
if not success:
raise Exception('Failed to remove user from {} group'.format(disabled_users_group_cn))
def disable_user_iam(default_base_dn, disabled_users_group_cn, args, mapper_out):
"""Disables an AD user by User Profile.
:param default_base_dn: The location in the DIT where the search will start
:param disabled_users_group_cn: The disabled group cn, the user will be added from this group when enabled
:param args: Demisto args.
:param mapper_out: Mapping User Profiles to AD users.
:return: The disabled user
"""
try:
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=mapper_out)
sam_account_name = ad_user.get("samaccountname")
if not sam_account_name:
raise DemistoException("User must have SAMAccountName")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, sam_account_name)
if not user_exists:
iam_user_profile.set_result(success=True, action=IAMActions.DISABLE_USER,
skip=True, skip_reason="User doesn't exists")
return iam_user_profile
dn = user_dn(sam_account_name, default_base_dn)
# modify user
modification = {
'userAccountControl': [('MODIFY_REPLACE', DISABLED_ACCOUNT)]
}
command_failed = False
modify_object(dn, modification)
if disabled_users_group_cn:
grp_dn = group_dn(disabled_users_group_cn, default_base_dn)
success = microsoft.addMembersToGroups.ad_add_members_to_groups(conn, [dn], [grp_dn])
if not success:
command_failed = True
e = 'Failed to remove user from {} group'.format(disabled_users_group_cn)
iam_user_profile.set_result(success=False,
error_message=e,
action=IAMActions.DISABLE_USER,
)
if not command_failed:
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
action=IAMActions.DISABLE_USER,
details=ad_user,
active=False)
return iam_user_profile
except Exception as e:
iam_user_profile.set_result(success=False,
error_message=str(e),
action=IAMActions.DISABLE_USER
)
return iam_user_profile
def add_member_to_group(default_base_dn):
args = demisto.args()
search_base = args.get('base-dn') or default_base_dn
# get the dn of the member - either user or computer
args_err = "Pleade provide either username or computer-name"
member_dn = ''
if args.get('username') and args.get('computer-name'):
# both arguments passed
raise Exception(args_err)
if args.get('username'):
member_dn = user_dn(args['username'], search_base)
elif args.get('computer-name'):
member_dn = computer_dn(args['computer-name'], search_base)
else:
# none of the arguments passed
raise Exception(args_err)
grp_dn = group_dn(args.get('group-cn'), search_base)
success = microsoft.addMembersToGroups.ad_add_members_to_groups(conn, [member_dn], [grp_dn])
if not success:
raise Exception("Failed to add {} to group {}".format(
args.get('username') or args.get('computer-name'),
args.get('group_name')
))
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Object with dn {} was added to group {}".format(member_dn, args.get('group-cn'))
}
demisto.results(demisto_entry)
def remove_member_from_group(default_base_dn):
args = demisto.args()
search_base = args.get('base-dn') or default_base_dn
# get the dn of the member - either user or computer
args_err = "Pleade provide either username or computer-name"
member_dn = ''
if args.get('username') and args.get('computer-name'):
# both arguments passed
raise Exception(args_err)
if args.get('username'):
member_dn = user_dn(args['username'], search_base)
elif args.get('computer-name'):
member_dn = computer_dn(args['computer-name'], search_base)
else:
# none of the arguments passed
raise Exception(args_err)
grp_dn = group_dn(args.get('group-cn'), search_base)
success = microsoft.removeMembersFromGroups.ad_remove_members_from_groups(conn, [member_dn], [grp_dn], True)
if not success:
raise Exception("Failed to remove {} from group {}".format(
args.get('username') or args.get('computer-name'),
args.get('group_name')
))
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Object with dn {} removed from group {}".format(member_dn, args.get('group-cn'))
}
demisto.results(demisto_entry)
def unlock_account(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
success = microsoft.unlockAccount.ad_unlock_account(conn, dn)
if not success:
raise Exception("Failed to unlock user {}".format(sam_account_name))
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Unlocked user {}".format(sam_account_name)
}
demisto.results(demisto_entry)
''' DELETE OBJECT '''
def delete_user():
# can actually delete any object...
assert conn is not None
success = conn.delete(demisto.args().get('user-dn'))
if not success:
raise Exception('Failed to delete user')
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Deleted object with dn {}".format(demisto.args().get('user-dn'))
}
demisto.results(demisto_entry)
def delete_group():
assert conn is not None
args = demisto.args()
dn = args.get('dn')
# delete group
success = conn.delete(dn)
if not success:
raise Exception("Failed to delete group")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Deleted group with DN: {}".format(dn)
}
demisto.results(demisto_entry)
def get_mapping_fields_command(search_base):
ad_attributes = get_all_attributes(search_base)
# add keys that are not attributes but can be used in mapping
ad_attributes.extend(("dn", "samaccountname"))
incident_type_scheme = SchemeTypeMapping(type_name=IAMUserProfile.INDICATOR_TYPE)
for field in ad_attributes:
incident_type_scheme.add_field(field, "Field")
return GetMappingFieldsResponse([incident_type_scheme])
'''
TEST CONFIGURATION
authenticate user credentials while initializing connection with AD server
verify base DN is configured correctly
'''
def main():
''' INSTANCE CONFIGURATION '''
params = demisto.params()
SERVER_IP = params.get('server_ip')
USERNAME = params.get('credentials')['identifier']
PASSWORD = params.get('credentials')['password']
DEFAULT_BASE_DN = params.get('base_dn')
SECURE_CONNECTION = params.get('secure_connection')
DEFAULT_PAGE_SIZE = int(params.get('page_size'))
NTLM_AUTH = params.get('ntlm')
UNSECURE = params.get('unsecure', False)
PORT = params.get('port')
disabled_users_group_cn = params.get('group-cn')
create_if_not_exists = params.get('create-if-not-exists')
mapper_in = params.get('mapper-in', DEFAULT_INCOMING_MAPPER)
mapper_out = params.get('mapper-out', DEFAULT_OUTGOING_MAPPER)
if PORT:
# port was configured, cast to int
PORT = int(PORT)
last_log_detail_level = None
try:
try:
set_library_log_hide_sensitive_data(True)
if is_debug_mode():
demisto.info('debug-mode: setting library log detail to EXTENDED')
last_log_detail_level = get_library_log_detail_level()
set_library_log_detail_level(EXTENDED)
server = initialize_server(SERVER_IP, PORT, SECURE_CONNECTION, UNSECURE)
except Exception as e:
return_error(str(e))
return
global conn
if NTLM_AUTH:
# intialize connection to LDAP server with NTLM authentication
# user example: domain\user
domain_user = SERVER_IP + '\\' + USERNAME if '\\' not in USERNAME else USERNAME
conn = Connection(server, user=domain_user, password=PASSWORD, authentication=NTLM)
else:
# here username should be the user dn
conn = Connection(server, user=USERNAME, password=PASSWORD)
# bind operation is the “authenticate” operation.
try:
# open socket and bind to server
if not conn.bind():
message = "Failed to bind to server. Please validate the credentials configured correctly.\n{}".format(
json.dumps(conn.result))
return_error(message)
return
except Exception as e:
exc_msg = str(e)
demisto.info("Failed bind to: {}:{}. {}: {}".format(SERVER_IP, PORT, type(e), exc_msg
+ "\nTrace:\n{}".format(traceback.format_exc())))
message = "Failed to access LDAP server. Please validate the server host and port are configured correctly"
if 'ssl wrapping error' in exc_msg:
message = "Failed to access LDAP server. SSL error."
if not UNSECURE:
message += ' Try using: "Trust any certificate" option.'
return_error(message)
return
demisto.info('Established connection with AD LDAP server')
if not base_dn_verified(DEFAULT_BASE_DN):
message = "Failed to verify the base DN configured for the instance.\n" \
"Last connection result: {}\n" \
"Last error from LDAP server: {}".format(json.dumps(conn.result), json.dumps(conn.last_error))
return_error(message)
return
demisto.info('Verfied base DN "{}"'.format(DEFAULT_BASE_DN))
''' COMMAND EXECUTION '''
if demisto.command() == 'test-module':
if conn.user == '':
# Empty response means you have no authentication status on the server, so you are an anonymous user.
raise Exception("Failed to authenticate user")
demisto.results('ok')
args = demisto.args()
if demisto.command() == 'ad-search':
free_search(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-expire-password':
expire_user_password(DEFAULT_BASE_DN)
if demisto.command() == 'ad-set-new-password':
set_user_password(DEFAULT_BASE_DN)
if demisto.command() == 'ad-unlock-account':
unlock_account(DEFAULT_BASE_DN)
if demisto.command() == 'ad-disable-account':
disable_user(DEFAULT_BASE_DN)
if demisto.command() == 'ad-enable-account':
enable_user(DEFAULT_BASE_DN)
if demisto.command() == 'ad-remove-from-group':
remove_member_from_group(DEFAULT_BASE_DN)
if demisto.command() == 'ad-add-to-group':
add_member_to_group(DEFAULT_BASE_DN)
if demisto.command() == 'ad-create-user':
create_user()
if demisto.command() == 'ad-delete-user':
delete_user()
if demisto.command() == 'ad-update-user':
update_user(DEFAULT_BASE_DN)
if demisto.command() == 'ad-modify-computer-ou':
modify_computer_ou(DEFAULT_BASE_DN)
if demisto.command() == 'ad-create-contact':
create_contact()
if demisto.command() == 'ad-update-contact':
update_contact()
if demisto.command() == 'ad-get-user':
search_users(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-get-computer':
search_computers(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-get-group-members':
search_group_members(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-create-group':
create_group()
if demisto.command() == 'ad-delete-group':
delete_group()
# IAM commands
if demisto.command() == 'iam-get-user':
user_profile = get_user_iam(DEFAULT_BASE_DN, args, mapper_in, mapper_out)
return return_results(user_profile)
if demisto.command() == 'iam-create-user':
user_profile = create_user_iam(DEFAULT_BASE_DN, args, mapper_out, disabled_users_group_cn)
return return_results(user_profile)
if demisto.command() == 'iam-update-user':
user_profile = update_user_iam(DEFAULT_BASE_DN, args, create_if_not_exists, mapper_out,
disabled_users_group_cn)
return return_results(user_profile)
if demisto.command() == 'iam-disable-user':
user_profile = disable_user_iam(DEFAULT_BASE_DN, disabled_users_group_cn, args, mapper_out)
return return_results(user_profile)
elif demisto.command() == 'get-mapping-fields':
mapping_fields = get_mapping_fields_command(DEFAULT_BASE_DN)
return return_results(mapping_fields)
except Exception as e:
message = str(e)
if conn:
message += "\nLast connection result: {}\nLast error from LDAP server: {}".format(
json.dumps(conn.result), conn.last_error)
return_error(message)
return
finally:
# disconnect and close the connection
if conn:
conn.unbind()
if last_log_detail_level:
set_library_log_detail_level(last_log_detail_level)
from IAMApiModule import * # noqa: E402
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins" or __name__ == "__main__":
main()
| []
| []
| [
"SSL_CERT_FILE"
]
| [] | ["SSL_CERT_FILE"] | python | 1 | 0 | |
build.go | // Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"archive/tar"
"archive/zip"
"bytes"
"compress/gzip"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"os/user"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
)
var (
versionRe = regexp.MustCompile(`-[0-9]{1,3}-g[0-9a-f]{5,10}`)
goarch string
goos string
noupgrade bool
)
const minGoVersion = 1.3
func main() {
log.SetOutput(os.Stdout)
log.SetFlags(0)
if os.Getenv("GOPATH") == "" {
cwd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
gopath := filepath.Clean(filepath.Join(cwd, "../../../../"))
log.Println("GOPATH is", gopath)
os.Setenv("GOPATH", gopath)
}
os.Setenv("PATH", fmt.Sprintf("%s%cbin%c%s", os.Getenv("GOPATH"), os.PathSeparator, os.PathListSeparator, os.Getenv("PATH")))
flag.StringVar(&goarch, "goarch", runtime.GOARCH, "GOARCH")
flag.StringVar(&goos, "goos", runtime.GOOS, "GOOS")
flag.BoolVar(&noupgrade, "no-upgrade", false, "Disable upgrade functionality")
flag.Parse()
switch goarch {
case "386", "amd64", "armv5", "armv6", "armv7":
break
case "arm":
log.Println("Invalid goarch \"arm\". Use one of \"armv5\", \"armv6\", \"armv7\".")
log.Fatalln("Note that producing a correct \"armv5\" binary requires a rebuilt stdlib.")
default:
log.Printf("Unknown goarch %q; proceed with caution!", goarch)
}
checkRequiredGoVersion()
if check() != nil {
setup()
}
if flag.NArg() == 0 {
install("./cmd/...")
return
}
switch flag.Arg(0) {
case "install":
pkg := "./cmd/..."
if flag.NArg() > 2 {
pkg = flag.Arg(1)
}
install(pkg)
case "build":
pkg := "./cmd/syncthing"
if flag.NArg() > 2 {
pkg = flag.Arg(1)
}
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
}
build(pkg, tags)
case "test":
pkg := "./..."
if flag.NArg() > 2 {
pkg = flag.Arg(1)
}
test(pkg)
case "assets":
assets()
case "xdr":
xdr()
case "translate":
translate()
case "transifex":
transifex()
case "deps":
deps()
case "tar":
buildTar()
case "zip":
buildZip()
case "clean":
clean()
default:
log.Fatalf("Unknown command %q", flag.Arg(0))
}
}
func check() error {
_, err := exec.LookPath("godep")
return err
}
func checkRequiredGoVersion() {
ver := run("go", "version")
re := regexp.MustCompile(`go version go(\d+\.\d+)`)
if m := re.FindSubmatch(ver); len(m) == 2 {
vs := string(m[1])
// This is a standard go build. Verify that it's new enough.
f, err := strconv.ParseFloat(vs, 64)
if err != nil {
log.Printf("*** Could parse Go version out of %q.\n*** This isn't known to work, proceed on your own risk.", vs)
return
}
if f < minGoVersion {
log.Fatalf("*** Go version %.01f is less than required %.01f.\n*** This is known not to work, not proceeding.", f, minGoVersion)
}
} else {
log.Printf("*** Unknown Go version %q.\n*** This isn't known to work, proceed on your own risk.", ver)
}
}
func setup() {
runPrint("go", "get", "-v", "code.google.com/p/go.tools/cmd/cover")
runPrint("go", "get", "-v", "code.google.com/p/go.tools/cmd/vet")
runPrint("go", "get", "-v", "code.google.com/p/go.net/html")
runPrint("go", "get", "-v", "github.com/tools/godep")
}
func test(pkg string) {
runPrint("godep", "go", "test", "-short", "-timeout", "10s", pkg)
}
func install(pkg string) {
os.Setenv("GOBIN", "./bin")
setBuildEnv()
runPrint("godep", "go", "install", "-ldflags", ldflags(), pkg)
}
func build(pkg string, tags []string) {
rmr("syncthing", "syncthing.exe")
args := []string{"go", "build", "-ldflags", ldflags()}
if len(tags) > 0 {
args = append(args, "-tags", strings.Join(tags, ","))
}
args = append(args, pkg)
setBuildEnv()
runPrint("godep", args...)
}
func buildTar() {
name := archiveName()
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
name += "-noupgrade"
}
build("./cmd/syncthing", tags)
filename := name + ".tar.gz"
tarGz(filename, []archiveFile{
{"README.md", name + "/README.txt"},
{"LICENSE", name + "/LICENSE.txt"},
{"CONTRIBUTORS", name + "/CONTRIBUTORS.txt"},
{"syncthing", name + "/syncthing"},
})
log.Println(filename)
}
func buildZip() {
name := archiveName()
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
name += "-noupgrade"
}
build("./cmd/syncthing", tags)
filename := name + ".zip"
zipFile(filename, []archiveFile{
{"README.md", name + "/README.txt"},
{"LICENSE", name + "/LICENSE.txt"},
{"CONTRIBUTORS", name + "/CONTRIBUTORS.txt"},
{"syncthing.exe", name + "/syncthing.exe"},
})
log.Println(filename)
}
func setBuildEnv() {
os.Setenv("GOOS", goos)
if strings.HasPrefix(goarch, "arm") {
os.Setenv("GOARCH", "arm")
os.Setenv("GOARM", goarch[4:])
} else {
os.Setenv("GOARCH", goarch)
}
if goarch == "386" {
os.Setenv("GO386", "387")
}
}
func assets() {
runPipe("auto/gui.files.go", "godep", "go", "run", "cmd/genassets/main.go", "gui")
}
func xdr() {
for _, f := range []string{"discover/packets", "files/leveldb", "protocol/message"} {
runPipe(f+"_xdr.go", "go", "run", "./Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go", "--", f+".go")
}
}
func translate() {
os.Chdir("gui/lang")
runPipe("lang-en-new.json", "go", "run", "../../cmd/translate/main.go", "lang-en.json", "../index.html")
os.Remove("lang-en.json")
err := os.Rename("lang-en-new.json", "lang-en.json")
if err != nil {
log.Fatal(err)
}
os.Chdir("../..")
}
func transifex() {
os.Chdir("gui/lang")
runPrint("go", "run", "../../cmd/transifexdl/main.go")
os.Chdir("../..")
assets()
}
func deps() {
rmr("Godeps")
runPrint("godep", "save", "./cmd/...")
}
func clean() {
rmr("bin", "Godeps/_workspace/pkg", "Godeps/_workspace/bin")
rmr(filepath.Join(os.Getenv("GOPATH"), fmt.Sprintf("pkg/%s_%s/github.com/syncthing", goos, goarch)))
}
func ldflags() string {
var b bytes.Buffer
b.WriteString("-w")
b.WriteString(fmt.Sprintf(" -X main.Version %s", version()))
b.WriteString(fmt.Sprintf(" -X main.BuildStamp %d", buildStamp()))
b.WriteString(fmt.Sprintf(" -X main.BuildUser %s", buildUser()))
b.WriteString(fmt.Sprintf(" -X main.BuildHost %s", buildHost()))
b.WriteString(fmt.Sprintf(" -X main.BuildEnv %s", buildEnvironment()))
if strings.HasPrefix(goarch, "arm") {
b.WriteString(fmt.Sprintf(" -X main.GoArchExtra %s", goarch[3:]))
}
return b.String()
}
func rmr(paths ...string) {
for _, path := range paths {
log.Println("rm -r", path)
os.RemoveAll(path)
}
}
func version() string {
v := run("git", "describe", "--always", "--dirty")
v = versionRe.ReplaceAllFunc(v, func(s []byte) []byte {
s[0] = '+'
return s
})
return string(v)
}
func buildStamp() int64 {
bs := run("git", "show", "-s", "--format=%ct")
s, _ := strconv.ParseInt(string(bs), 10, 64)
return s
}
func buildUser() string {
u, err := user.Current()
if err != nil {
return "unknown-user"
}
return strings.Replace(u.Username, " ", "-", -1)
}
func buildHost() string {
h, err := os.Hostname()
if err != nil {
return "unknown-host"
}
return h
}
func buildEnvironment() string {
if v := os.Getenv("ENVIRONMENT"); len(v) > 0 {
return v
}
return "default"
}
func buildArch() string {
os := goos
if os == "darwin" {
os = "macosx"
}
return fmt.Sprintf("%s-%s", os, goarch)
}
func archiveName() string {
return fmt.Sprintf("syncthing-%s-%s", buildArch(), version())
}
func run(cmd string, args ...string) []byte {
ecmd := exec.Command(cmd, args...)
bs, err := ecmd.CombinedOutput()
if err != nil {
log.Println(cmd, strings.Join(args, " "))
log.Println(string(bs))
log.Fatal(err)
}
return bytes.TrimSpace(bs)
}
func runPrint(cmd string, args ...string) {
log.Println(cmd, strings.Join(args, " "))
ecmd := exec.Command(cmd, args...)
ecmd.Stdout = os.Stdout
ecmd.Stderr = os.Stderr
err := ecmd.Run()
if err != nil {
log.Fatal(err)
}
}
func runPipe(file, cmd string, args ...string) {
log.Println(cmd, strings.Join(args, " "), ">", file)
fd, err := os.Create(file)
if err != nil {
log.Fatal(err)
}
ecmd := exec.Command(cmd, args...)
ecmd.Stdout = fd
ecmd.Stderr = os.Stderr
err = ecmd.Run()
if err != nil {
log.Fatal(err)
}
fd.Close()
}
type archiveFile struct {
src string
dst string
}
func tarGz(out string, files []archiveFile) {
fd, err := os.Create(out)
if err != nil {
log.Fatal(err)
}
gw := gzip.NewWriter(fd)
tw := tar.NewWriter(gw)
for _, f := range files {
sf, err := os.Open(f.src)
if err != nil {
log.Fatal(err)
}
info, err := sf.Stat()
if err != nil {
log.Fatal(err)
}
h := &tar.Header{
Name: f.dst,
Size: info.Size(),
Mode: int64(info.Mode()),
ModTime: info.ModTime(),
}
err = tw.WriteHeader(h)
if err != nil {
log.Fatal(err)
}
_, err = io.Copy(tw, sf)
if err != nil {
log.Fatal(err)
}
sf.Close()
}
err = tw.Close()
if err != nil {
log.Fatal(err)
}
err = gw.Close()
if err != nil {
log.Fatal(err)
}
err = fd.Close()
if err != nil {
log.Fatal(err)
}
}
func zipFile(out string, files []archiveFile) {
fd, err := os.Create(out)
if err != nil {
log.Fatal(err)
}
zw := zip.NewWriter(fd)
for _, f := range files {
sf, err := os.Open(f.src)
if err != nil {
log.Fatal(err)
}
info, err := sf.Stat()
if err != nil {
log.Fatal(err)
}
fh, err := zip.FileInfoHeader(info)
if err != nil {
log.Fatal(err)
}
fh.Name = f.dst
fh.Method = zip.Deflate
if strings.HasSuffix(f.dst, ".txt") {
// Text file. Read it and convert line endings.
bs, err := ioutil.ReadAll(sf)
if err != nil {
log.Fatal(err)
}
bs = bytes.Replace(bs, []byte{'\n'}, []byte{'\n', '\r'}, -1)
fh.UncompressedSize = uint32(len(bs))
fh.UncompressedSize64 = uint64(len(bs))
of, err := zw.CreateHeader(fh)
if err != nil {
log.Fatal(err)
}
of.Write(bs)
} else {
// Binary file. Copy verbatim.
of, err := zw.CreateHeader(fh)
if err != nil {
log.Fatal(err)
}
_, err = io.Copy(of, sf)
if err != nil {
log.Fatal(err)
}
}
}
err = zw.Close()
if err != nil {
log.Fatal(err)
}
err = fd.Close()
if err != nil {
log.Fatal(err)
}
}
| [
"\"GOPATH\"",
"\"GOPATH\"",
"\"PATH\"",
"\"GOPATH\"",
"\"ENVIRONMENT\""
]
| []
| [
"GOPATH",
"ENVIRONMENT",
"PATH"
]
| [] | ["GOPATH", "ENVIRONMENT", "PATH"] | go | 3 | 0 | |
.github/scripts/k8s-ci.py | import time
import base64
import os
import pathlib
import random
import string
import subprocess
from kubernetes import client, watch, config
KUBE_SYSTEM = "kube-system"
META_URL = os.getenv("JUICEFS_META_URL") or ""
ACCESS_KEY = os.getenv("JUICEFS_ACCESS_KEY") or ""
SECRET_KEY = os.getenv("JUICEFS_SECRET_KEY") or ""
STORAGE = os.getenv("JUICEFS_STORAGE") or ""
BUCKET = os.getenv("JUICEFS_BUCKET") or ""
TOKEN = os.getenv("JUICEFS_TOKEN")
IS_CE = os.getenv("IS_CE") == "True"
RESOURCE_PREFIX = "ce-" if IS_CE else "ee-"
SECRET_NAME = os.getenv("JUICEFS_NAME") or "ce-juicefs-secret"
STORAGECLASS_NAME = "ce-juicefs-sc" if IS_CE else "ee-juicefs-sc"
SECRETs = []
STORAGECLASSs = []
DEPLOYMENTs = []
PVCs = []
PVs = []
class Secret:
def __init__(self, *, secret_name):
self.secret_name = secret_name
self.namespace = KUBE_SYSTEM
self.meta_url = META_URL
self.access_key = ACCESS_KEY
self.secret_key = SECRET_KEY
self.storage_name = STORAGE
self.bucket = BUCKET
self.token = TOKEN
def create(self):
if IS_CE:
data = {
"name": base64.b64encode(self.secret_name.encode('utf-8')).decode("utf-8"),
"metaurl": base64.b64encode(self.meta_url.encode('utf-8')).decode("utf-8"),
"access-key": base64.b64encode(self.access_key.encode('utf-8')).decode("utf-8"),
"secret-key": base64.b64encode(self.secret_key.encode('utf-8')).decode("utf-8"),
"storage": base64.b64encode(self.storage_name.encode('utf-8')).decode("utf-8"),
"bucket": base64.b64encode(self.bucket.encode('utf-8')).decode("utf-8"),
}
else:
data = {
"name": base64.b64encode(self.secret_name.encode('utf-8')).decode("utf-8"),
"token": base64.b64encode(self.token.encode('utf-8')).decode("utf-8"),
"accesskey": base64.b64encode(self.access_key.encode('utf-8')).decode("utf-8"),
"secretkey": base64.b64encode(self.secret_key.encode('utf-8')).decode("utf-8"),
"storage": base64.b64encode(self.storage_name.encode('utf-8')).decode("utf-8"),
"bucket": base64.b64encode(self.bucket.encode('utf-8')).decode("utf-8"),
}
sec = client.V1Secret(
api_version="v1",
kind="Secret",
metadata=client.V1ObjectMeta(name=self.secret_name),
data=data
)
client.CoreV1Api().create_namespaced_secret(namespace=self.namespace, body=sec)
SECRETs.append(self)
def delete(self):
client.CoreV1Api().delete_namespaced_secret(name=self.secret_name, namespace=self.namespace)
SECRETs.remove(self)
class StorageClass:
def __init__(self, *, name, secret_name):
self.name = name
self.secret_name = secret_name
self.secret_namespace = KUBE_SYSTEM
def create(self):
sc = client.V1StorageClass(
api_version="storage.k8s.io/v1",
kind="StorageClass",
metadata=client.V1ObjectMeta(name=self.name),
provisioner="csi.juicefs.com",
reclaim_policy="Delete",
volume_binding_mode="Immediate",
parameters={
"csi.storage.k8s.io/node-publish-secret-name": self.secret_name,
"csi.storage.k8s.io/node-publish-secret-namespace": self.secret_namespace,
"csi.storage.k8s.io/provisioner-secret-name": self.secret_name,
"csi.storage.k8s.io/provisioner-secret-namespace": self.secret_namespace,
}
)
client.StorageV1Api().create_storage_class(body=sc)
STORAGECLASSs.append(self)
def delete(self):
client.StorageV1Api().delete_storage_class(name=self.name)
STORAGECLASSs.remove(self)
class PVC:
def __init__(self, *, name, access_mode, storage_name, pv):
self.name = RESOURCE_PREFIX + name
self.namespace = "default"
self.access_mode = access_mode
self.storage_class = storage_name
self.pv = pv
def create(self):
spec = client.V1PersistentVolumeClaimSpec(
access_modes=[self.access_mode],
resources=client.V1ResourceRequirements(
requests={"storage": "1Gi"}
)
)
if self.pv != "":
spec.selector = client.V1LabelSelector(match_labels={"pv": self.pv})
spec.storage_class_name = self.storage_class
pvc = client.V1PersistentVolumeClaim(
api_version="v1",
kind="PersistentVolumeClaim",
metadata=client.V1ObjectMeta(name=self.name),
spec=spec
)
client.CoreV1Api().create_namespaced_persistent_volume_claim(namespace=self.namespace, body=pvc)
PVCs.append(self)
def delete(self):
client.CoreV1Api().delete_namespaced_persistent_volume_claim(name=self.name, namespace=self.namespace)
PVCs.remove(self)
def check_is_deleted(self):
try:
client.CoreV1Api().read_namespaced_persistent_volume_claim(name=self.name, namespace=self.namespace)
except client.exceptions.ApiException as e:
if e.status == 404:
return True
raise e
return False
def get_volume_id(self):
p = client.CoreV1Api().read_namespaced_persistent_volume_claim(name=self.name, namespace=self.namespace)
pv_name = p.spec.volume_name
pv = client.CoreV1Api().read_persistent_volume(name=pv_name)
return pv.spec.csi.volume_handle
class PV:
def __init__(self, *, name, access_mode, volume_handle, secret_name):
self.name = RESOURCE_PREFIX + name
self.access_mode = access_mode
self.volume_handle = volume_handle
self.secret_name = secret_name
self.secret_namespace = KUBE_SYSTEM
def create(self):
spec = client.V1PersistentVolumeSpec(
access_modes=[self.access_mode],
capacity={"storage": "10Pi"},
volume_mode="Filesystem",
persistent_volume_reclaim_policy="Delete",
csi=client.V1CSIPersistentVolumeSource(
driver="csi.juicefs.com",
fs_type="juicefs",
volume_handle=self.volume_handle,
node_publish_secret_ref=client.V1SecretReference(
name=self.secret_name,
namespace=self.secret_namespace
),
)
)
pv = client.V1PersistentVolume(
api_version="v1",
kind="PersistentVolume",
metadata=client.V1ObjectMeta(name=self.name, labels={"pv": self.name}),
spec=spec
)
client.CoreV1Api().create_persistent_volume(body=pv)
PVs.append(self)
def delete(self):
client.CoreV1Api().delete_persistent_volume(name=self.name)
PVs.remove(self)
def get_volume_id(self):
p = client.CoreV1Api().read_persistent_volume(name=self.name)
return p.spec.csi.volume_handle
class Deployment:
def __init__(self, *, name, pvc, replicas, out_put=""):
self.name = RESOURCE_PREFIX + name
self.namespace = "default"
self.image = "centos"
self.pvc = pvc
self.replicas = replicas
self.out_put = out_put
def create(self):
cmd = "while true; do echo $(date -u) >> /data/out.txt; sleep 5; done"
if self.out_put != "":
cmd = "while true; do echo $(date -u) >> /data/{}; sleep 5; done".format(self.out_put)
container = client.V1Container(
name="app",
image="centos",
command=["/bin/sh"],
args=["-c", cmd],
volume_mounts=[client.V1VolumeMount(
name="juicefs-pv",
mount_path="/data"
)]
)
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={"deployment": self.name}),
spec=client.V1PodSpec(
containers=[container],
volumes=[client.V1Volume(
name="juicefs-pv",
persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name=self.pvc)
)]),
)
deploySpec = client.V1DeploymentSpec(
replicas=self.replicas,
template=template,
selector={"matchLabels": {"deployment": self.name}}
)
deploy = client.V1Deployment(
api_version="apps/v1",
kind="Deployment",
metadata=client.V1ObjectMeta(name=self.name),
spec=deploySpec,
)
client.AppsV1Api().create_namespaced_deployment(namespace=self.namespace, body=deploy)
DEPLOYMENTs.append(self)
def update_replicas(self, replicas):
deployment = client.AppsV1Api().read_namespaced_deployment(name=self.name, namespace=self.namespace)
deployment.spec.replicas = replicas
client.AppsV1Api().patch_namespaced_deployment(name=self.name, namespace=self.namespace, body=deployment)
def delete(self):
client.AppsV1Api().delete_namespaced_deployment(name=self.name, namespace=self.namespace)
DEPLOYMENTs.remove(self)
def refresh(self):
deploy = client.AppsV1Api().read_namespaced_deployment(name=self.name, namespace=self.namespace)
self.replicas = deploy.spec.replicas
return self
class Pod:
def __init__(self, name, deployment_name, replicas, namespace="default"):
self.name = name
self.namespace = namespace
self.deployment = deployment_name
self.pods = []
self.replicas = replicas
def watch_for_success(self):
v1 = client.CoreV1Api()
w = watch.Watch()
for event in w.stream(v1.list_pod_for_all_namespaces, timeout_seconds=5 * 60):
resource = event['object']
if resource.metadata.namespace != "default":
continue
if self.name == "" and resource.metadata.labels.get("deployment") != self.deployment:
continue
if self.name != "" and resource.metadata.name != self.name:
continue
print("Event: %s %s" % (event['type'], event['object'].metadata.name))
if self.__is_pod_ready(resource):
if self.name == "":
self.pods.append(resource)
if len(self.pods) == self.replicas:
self.pods = []
return True
else:
return True
return False
@staticmethod
def __is_pod_ready(resource):
if resource.status.phase.lower() != "running":
print("Pod {} status phase: {}".format(resource.metadata.name, resource.status.phase))
return False
conditions = resource.status.conditions
for c in conditions:
if c.status != "True":
return False
print("Pod {} status is ready.".format(resource.metadata.name))
return True
def watch_for_delete(self, num):
v1 = client.CoreV1Api()
w = watch.Watch()
for event in w.stream(v1.list_pod_for_all_namespaces, timeout_seconds=5 * 60):
resource = event['object']
message_type = event['type']
if resource.metadata.namespace != "default":
continue
if self.name == "" and resource.metadata.labels.get("deployment") != self.deployment:
continue
if self.name != "" and resource.metadata.name != self.name:
continue
print("Event: %s %s" % (event['type'], event['object'].metadata.name))
if message_type == "DELETED":
if self.name == "":
self.pods.append(resource)
if len(self.pods) == num:
self.pods = []
return True
else:
return True
return False
def is_deleted(self):
try:
po = client.CoreV1Api().read_namespaced_pod(self.name, self.namespace)
except client.exceptions.ApiException as e:
if e.status == 404:
return True
raise e
return po.metadata.deletion_timestamp != ""
def get_log(self, container_name):
return client.CoreV1Api().read_namespaced_pod_log(self.name, self.namespace, container=container_name)
def mount_on_host(mount_path):
print(f"Mount {mount_path}")
try:
if IS_CE:
subprocess.check_output(
["sudo", "/usr/local/bin/juicefs", "format", f"--storage={STORAGE}", f"--access-key={ACCESS_KEY}",
f"--secret-key={SECRET_KEY}", f"--bucket={BUCKET}", META_URL, SECRET_NAME])
subprocess.check_output(["sudo", "/usr/local/bin/juicefs", "mount", "-d", META_URL, mount_path])
else:
subprocess.check_output(
["sudo", "/usr/bin/juicefs", "auth", f"--token={TOKEN}", f"--accesskey={ACCESS_KEY}",
f"--secretkey={SECRET_KEY}", f"--bucket={BUCKET}", SECRET_NAME])
subprocess.check_output(["sudo", "/usr/bin/juicefs", "mount", "-d", SECRET_NAME, mount_path])
print("Mount success.")
except Exception as e:
print("Error in juicefs mount: {}".format(e))
raise e
def check_mount_point(mount_path, check_path):
mount_on_host(mount_path)
for i in range(0, 60):
try:
print("Open file {}".format(check_path))
f = open(check_path)
content = f.read(1)
if content is not None and content != "":
f.close()
print(f"Umount {mount_path}.")
subprocess.run(["sudo", "umount", mount_path])
return True
time.sleep(5)
f.close()
except FileNotFoundError:
print(os.listdir(mount_path))
print("Can't find file: {}".format(check_path))
time.sleep(5)
continue
except Exception as e:
print(e)
log = open("/var/log/juicefs.log", "rt")
print(log.read())
raise e
print(f"Umount {mount_path}.")
subprocess.run(["sudo", "umount", mount_path])
return False
def get_mount_pod_name(volume_id):
nodes = client.CoreV1Api().list_node()
node_name = nodes.items[0].metadata.name
return "juicefs-{}-{}".format(node_name, volume_id)
def check_mount_pod_refs(pod_name, replicas):
pod = client.CoreV1Api().read_namespaced_pod(name=pod_name, namespace=KUBE_SYSTEM)
annotations = pod.metadata.annotations
if annotations is None:
if replicas == 0:
return True
else:
return False
num = 0
for k in annotations.keys():
if k.startswith("juicefs-"):
num += 1
return num == replicas
def deploy_secret_and_sc():
print("Deploy secret & storageClass..")
secret = Secret(secret_name=SECRET_NAME)
secret.create()
print("Deploy secret {}".format(secret.secret_name))
sc = StorageClass(name=STORAGECLASS_NAME, secret_name=secret.secret_name)
sc.create()
print("Deploy storageClass {}".format(sc.name))
def tear_down():
print("Tear down all resources begin..")
try:
for deploy in DEPLOYMENTs:
print("Delete deployment {}".format(deploy.name))
deploy = deploy.refresh()
deploy.delete()
pod = Pod(name="", deployment_name=deploy.name, replicas=deploy.replicas)
print("Watch for pods of deployment {} for delete.".format(deploy.name))
result = pod.watch_for_delete(deploy.replicas)
if not result:
raise Exception("Pods of deployment {} are not delete within 5 min.".format(deploy.name))
for pvc in PVCs:
print("Delete pvc {}".format(pvc.name))
pvc.delete()
for sc in STORAGECLASSs:
print("Delete storageclass {}".format(sc.name))
sc.delete()
for pv in PVs:
print("Delete pv {}".format(pv.name))
pv.delete()
for secret in SECRETs:
print("Delete secret {}".format(secret.secret_name))
secret.delete()
print("Delete all volumes in file system.")
clean_juicefs_volume("/mnt/jfs")
except Exception as e:
print("Error in tear down: {}".format(e))
print("Tear down success.")
def clean_juicefs_volume(mount_path):
mount_on_host(mount_path)
subprocess.run(["sudo", "rm", "-rf", mount_path + "/*"])
subprocess.run(["sudo", "umount", mount_path])
def die(e):
# csi_node_name = os.getenv("JUICEFS_CSI_NODE_POD")
# po = Pod(name=csi_node_name, deployment_name="", replicas=1, namespace=KUBE_SYSTEM)
# print("Get csi node log:")
# print(po.get_log("juicefs-plugin"))
print("Get csi controller log:")
controller_po = Pod(name="juicefs-csi-controller-0", deployment_name="", replicas=1, namespace=KUBE_SYSTEM)
print(controller_po.get_log("juicefs-plugin"))
print("Get event: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "event", "--all-namespaces"], check=True)
print("Get pvc: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "pvc", "--all-namespaces"], check=True)
print("Get pv: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "pv"], check=True)
print("Get sc: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "sc"], check=True)
raise Exception(e)
def gen_random_string(slen=10):
return ''.join(random.sample(string.ascii_letters + string.digits, slen))
###### test case in ci ######
def test_deployment_using_storage_rw():
print("[test case] Deployment using storageClass with rwm begin..")
# deploy pvc
pvc = PVC(name="pvc-dynamic-rw", access_mode="ReadWriteMany", storage_name=STORAGECLASS_NAME, pv="")
print("Deploy pvc {}".format(pvc.name))
pvc.create()
# deploy pod
deployment = Deployment(name="app-dynamic-rw", pvc=pvc.name, replicas=1)
print("Deploy deployment {}".format(deployment.name))
deployment.create()
pod = Pod(name="", deployment_name=deployment.name, replicas=deployment.replicas)
print("Watch for pods of {} for success.".format(deployment.name))
result = pod.watch_for_success()
if not result:
die("Pods of deployment {} are not ready within 5 min.".format(deployment.name))
# check mount point
print("Check mount point..")
volume_id = pvc.get_volume_id()
print("Get volume_id {}".format(volume_id))
mount_path = "/mnt/jfs"
check_path = mount_path + "/" + volume_id + "/out.txt"
result = check_mount_point(mount_path, check_path)
if not result:
die("mount Point of /jfs/{}/out.txt are not ready within 5 min.".format(volume_id))
print("Test pass.")
return
def test_deployment_using_storage_ro():
print("[test case] Deployment using storageClass with rom begin..")
# deploy pvc
pvc = PVC(name="pvc-dynamic-ro", access_mode="ReadOnlyMany", storage_name=STORAGECLASS_NAME, pv="")
print("Deploy pvc {}".format(pvc.name))
pvc.create()
# deploy pod
deployment = Deployment(name="app-dynamic-ro", pvc=pvc.name, replicas=1)
print("Deploy deployment {}".format(deployment.name))
deployment.create()
pod = Pod(name="", deployment_name=deployment.name, replicas=deployment.replicas)
print("Watch for pods of {} for success.".format(deployment.name))
result = pod.watch_for_success()
if not result:
die("Pods of deployment {} are not ready within 5 min.".format(deployment.name))
print("Test pass.")
return
def test_deployment_use_pv_rw():
print("[test case] Deployment using pv with rwm begin..")
# deploy pv
pv = PV(name="pv-rw", access_mode="ReadWriteMany", volume_handle="pv-rw", secret_name=SECRET_NAME)
print("Deploy pv {}".format(pv.name))
pv.create()
# deploy pvc
pvc = PVC(name="pvc-static-rw", access_mode="ReadWriteMany", storage_name="", pv=pv.name)
print("Deploy pvc {}".format(pvc.name))
pvc.create()
# deploy pod
out_put = gen_random_string(6) + ".txt"
deployment = Deployment(name="app-static-rw", pvc=pvc.name, replicas=1, out_put=out_put)
print("Deploy deployment {}".format(deployment.name))
deployment.create()
pod = Pod(name="", deployment_name=deployment.name, replicas=deployment.replicas)
print("Watch for pods of {} for success.".format(deployment.name))
result = pod.watch_for_success()
if not result:
die("Pods of deployment {} are not ready within 5 min.".format(deployment.name))
# check mount point
print("Check mount point..")
volume_id = pv.get_volume_id()
print("Get volume_id {}".format(volume_id))
mount_path = "/mnt/jfs"
check_path = mount_path + "/" + out_put
result = check_mount_point(mount_path, check_path)
if not result:
print("Get pvc: ")
subprocess.run(["sudo", "microk8s.kubectl", "-n", "default", "get", "pvc", pvc.name, "-oyaml"], check=True)
print("Get pv: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "pv", pv.name, "-oyaml"], check=True)
print("Get deployment: ")
subprocess.run(["sudo", "microk8s.kubectl", "-n", "default", "get", "deployment", deployment.name, "-oyaml"],
check=True)
try:
mount_pod_name = get_mount_pod_name(volume_id)
print("Get mount pod log:")
mount_pod = Pod(name=mount_pod_name, deployment_name="", replicas=1, namespace=KUBE_SYSTEM)
print(mount_pod.get_log("jfs-mount"))
except client.ApiException as e:
print("Get log error: {}".format(e))
die("Mount point of /mnt/jfs/{} are not ready within 5 min.".format(out_put))
print("Test pass.")
return
def test_deployment_use_pv_ro():
print("[test case] Deployment using pv with rwo begin..")
# deploy pv
pv = PV(name="pv-ro", access_mode="ReadOnlyMany", volume_handle="pv-ro", secret_name=SECRET_NAME)
print("Deploy pv {}".format(pv.name))
pv.create()
# deploy pvc
pvc = PVC(name="pvc-static-ro", access_mode="ReadOnlyMany", storage_name="", pv=pv.name)
print("Deploy pvc {}".format(pvc.name))
pvc.create()
# deploy pod
out_put = gen_random_string(6) + ".txt"
deployment = Deployment(name="app-static-ro", pvc=pvc.name, replicas=1, out_put=out_put)
print("Deploy deployment {}".format(deployment.name))
deployment.create()
pod = Pod(name="", deployment_name=deployment.name, replicas=deployment.replicas)
print("Watch for pods of {} for success.".format(deployment.name))
result = pod.watch_for_success()
if not result:
die("Pods of deployment {} are not ready within 5 min.".format(deployment.name))
print("Test pass.")
return
def test_delete_one():
print("[test case] Deployment with 3 replicas begin..")
# deploy pvc
pvc = PVC(name="pvc-replicas", access_mode="ReadWriteMany", storage_name=STORAGECLASS_NAME, pv="")
print("Deploy pvc {}".format(pvc.name))
pvc.create()
# deploy pod
deployment = Deployment(name="app-replicas", pvc=pvc.name, replicas=3)
print("Deploy deployment {}".format(deployment.name))
deployment.create()
pod = Pod(name="", deployment_name=deployment.name, replicas=deployment.replicas)
print("Watch for pods of {} for success.".format(deployment.name))
result = pod.watch_for_success()
if not result:
die("Pods of deployment {} are not ready within 5 min.".format(deployment.name))
volume_id = pvc.get_volume_id()
print("Get volume_id {}".format(volume_id))
# check mount pod refs
mount_pod_name = get_mount_pod_name(volume_id)
print("Check mount pod {} refs.".format(mount_pod_name))
result = check_mount_pod_refs(mount_pod_name, 3)
if not result:
die("Mount pod {} does not have {} juicefs- refs.".format(mount_pod_name, 3))
# update replicas = 1
print("Set deployment {} replicas to 1".format(deployment.name))
deployment.update_replicas(1)
pod = Pod(name="", deployment_name=deployment.name, replicas=deployment.replicas)
print("Watch for pods of deployment {} for delete.".format(deployment.name))
result = pod.watch_for_delete(2)
if not result:
die("Pods of deployment {} are not delete within 5 min.".format(deployment.name))
# check mount pod refs
result = check_mount_pod_refs(mount_pod_name, 1)
print("Check mount pod {} refs.".format(mount_pod_name))
if not result:
raise Exception("Mount pod {} does not have {} juicefs- refs.".format(mount_pod_name, 1))
print("Test pass.")
return
def test_delete_all():
print("[test case] Deployment and delete it begin..")
# deploy pvc
pvc = PVC(name="pvc-delete-deploy", access_mode="ReadWriteMany", storage_name=STORAGECLASS_NAME, pv="")
print("Deploy pvc {}".format(pvc.name))
pvc.create()
# deploy pod
deployment = Deployment(name="app-delete-deploy", pvc=pvc.name, replicas=3)
print("Deploy deployment {}".format(deployment.name))
deployment.create()
pod = Pod(name="", deployment_name=deployment.name, replicas=deployment.replicas)
print("Watch for pods of {} for success.".format(deployment.name))
result = pod.watch_for_success()
if not result:
die("Pods of deployment {} are not ready within 5 min.".format(deployment.name))
volume_id = pvc.get_volume_id()
print("Get volume_id {}".format(volume_id))
# check mount pod refs
mount_pod_name = get_mount_pod_name(volume_id)
print("Check mount pod {} refs.".format(mount_pod_name))
result = check_mount_pod_refs(mount_pod_name, 3)
if not result:
die("Mount pod {} does not have {} juicefs- refs.".format(mount_pod_name, 3))
# delete deploy
print("Delete deployment {}".format(deployment.name))
deployment.delete()
pod = Pod(name="", deployment_name=deployment.name, replicas=deployment.replicas)
print("Watch for pods of deployment {} for delete.".format(deployment.name))
result = pod.watch_for_delete(3)
if not result:
die("Pods of deployment {} are not delete within 5 min.".format(deployment.name))
# check mount pod is delete or not
print("Check mount pod {} is deleted or not.".format(mount_pod_name))
pod = Pod(name=mount_pod_name, deployment_name="", replicas=1)
result = pod.is_deleted()
if not result:
die("Mount pod {} does not been deleted within 5 min.".format(mount_pod_name))
print("Test pass.")
return
def test_delete_pvc():
print("[test case] Deployment and delete pvc begin..")
# deploy pvc
pvc = PVC(name="pvc-delete", access_mode="ReadWriteMany", storage_name=STORAGECLASS_NAME, pv="")
print("Deploy pvc {}".format(pvc.name))
pvc.create()
# deploy pod
deployment = Deployment(name="app-delete-pvc", pvc=pvc.name, replicas=1)
print("Deploy deployment {}".format(deployment.name))
deployment.create()
pod = Pod(name="", deployment_name=deployment.name, replicas=deployment.replicas)
print("Watch for pods of {} for success.".format(deployment.name))
result = pod.watch_for_success()
if not result:
die("Pods of deployment {} are not ready within 5 min.".format(deployment.name))
# check mount point
print("Check mount point..")
volume_id = pvc.get_volume_id()
print("Get volume_id {}".format(volume_id))
mount_path = "/mnt/jfs"
check_path = mount_path + "/" + volume_id + "/out.txt"
result = check_mount_point(mount_path, check_path)
if not result:
die("mount Point of /jfs/{}/out.txt are not ready within 5 min.".format(volume_id))
print("Development delete..")
deployment.delete()
print("Watch deployment deleteed..")
pod = Pod(name="", deployment_name=deployment.name, replicas=deployment.replicas)
print("Watch for pods of deployment {} for delete.".format(deployment.name))
result = pod.watch_for_delete(1)
if not result:
die("Pods of deployment {} are not delete within 5 min.".format(deployment.name))
print("PVC delete..")
pvc.delete()
for i in range(0, 60):
if pvc.check_is_deleted():
print("PVC is deleted.")
break
time.sleep(5)
print("Check dir is deleted or not..")
mount_on_host("/mnt/jfs")
file_exist = True
for i in range(0, 60):
f = pathlib.Path("/mnt/jfs/" + volume_id)
if f.exists() is False:
file_exist = False
break
time.sleep(5)
if file_exist:
die("SubPath of volume_id {} still exists.".format(volume_id))
print("Umount /mnt/jfs.")
subprocess.run(["sudo", "umount", "/mnt/jfs"])
print("Test pass.")
if __name__ == "__main__":
config.load_kube_config()
# clear juicefs volume first.
print("clean juicefs volume first.")
clean_juicefs_volume("/mnt/jfs")
try:
deploy_secret_and_sc()
test_deployment_using_storage_rw()
test_deployment_using_storage_ro()
test_deployment_use_pv_rw()
test_deployment_use_pv_ro()
test_delete_one()
test_delete_all()
test_delete_pvc()
finally:
tear_down()
| []
| []
| [
"JUICEFS_NAME",
"JUICEFS_BUCKET",
"JUICEFS_SECRET_KEY",
"JUICEFS_ACCESS_KEY",
"JUICEFS_META_URL",
"JUICEFS_TOKEN",
"JUICEFS_STORAGE",
"IS_CE",
"JUICEFS_CSI_NODE_POD"
]
| [] | ["JUICEFS_NAME", "JUICEFS_BUCKET", "JUICEFS_SECRET_KEY", "JUICEFS_ACCESS_KEY", "JUICEFS_META_URL", "JUICEFS_TOKEN", "JUICEFS_STORAGE", "IS_CE", "JUICEFS_CSI_NODE_POD"] | python | 9 | 0 | |
recipes/calceph/all/conanfile.py | import glob
import os
from conans import ConanFile, AutoToolsBuildEnvironment, VisualStudioBuildEnvironment, tools
from conans.errors import ConanInvalidConfiguration
class CalcephConan(ConanFile):
name = "calceph"
description = "C Library designed to access the binary planetary ephemeris " \
"files, such INPOPxx, JPL DExxx and SPICE ephemeris files."
license = ["CECILL-C", "CECILL-B", "CECILL-2.1"]
topics = ("conan", "calceph", "ephemeris", "astronomy", "space", "planet")
homepage = "https://www.imcce.fr/inpop/calceph"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"threadsafe": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"threadsafe": False
}
_autotools= None
_nmake_args = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.cppstd
del self.settings.compiler.libcxx
if self.settings.compiler == "Visual Studio":
del self.options.threadsafe
if self.options.shared:
raise ConanInvalidConfiguration("calceph doesn't support shared builds with Visual Studio yet")
def build_requirements(self):
if tools.os_info.is_windows and self.settings.compiler != "Visual Studio" and \
"CONAN_BASH_PATH" not in os.environ and tools.os_info.detect_windows_subsystem() != "msys2":
self.build_requires("msys2/20200517")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename(self.name + "-" + self.version, self._source_subfolder)
def build(self):
if self.settings.compiler == "Visual Studio":
tools.replace_in_file(os.path.join(self._source_subfolder, "Makefile.vc"),
"CFLAGS = /O2 /GR- /MD /nologo /EHs",
"CFLAGS = /nologo /EHs")
with tools.chdir(self._source_subfolder):
with tools.vcvars(self.settings):
with tools.environment_append(VisualStudioBuildEnvironment(self).vars):
self.run("nmake -f Makefile.vc {}".format(" ".join(self._get_nmake_args())))
else:
autotools = self._configure_autotools()
autotools.make()
def _get_nmake_args(self):
if self._nmake_args:
return self._nmake_args
self._nmake_args = []
self._nmake_args.append("DESTDIR=\"{}\"".format(self.package_folder))
self._nmake_args.extend(["ENABLEF2003=0", "ENABLEF77=0"])
return self._nmake_args
def _configure_autotools(self):
if self._autotools:
return self._autotools
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
args = [
"--disable-static" if self.options.shared else "--enable-static",
"--enable-shared" if self.options.shared else "--disable-shared",
"--enable-thread" if self.options.threadsafe else "--disable-thread",
"--disable-fortran",
"--disable-python",
"--disable-python-package-system",
"--disable-python-package-user",
"--disable-mex-octave"
]
self._autotools.configure(args=args, configure_dir=self._source_subfolder)
return self._autotools
def package(self):
self.copy(pattern="COPYING*", dst="licenses", src=self._source_subfolder)
if self.settings.compiler == "Visual Studio":
with tools.chdir(self._source_subfolder):
with tools.vcvars(self.settings):
with tools.environment_append(VisualStudioBuildEnvironment(self).vars):
self.run("nmake -f Makefile.vc install {}".format(" ".join(self._get_nmake_args())))
tools.rmdir(os.path.join(self.package_folder, "doc"))
else:
autotools = self._configure_autotools()
autotools.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
for la_file in glob.glob(os.path.join(self.package_folder, "lib", "*.la")):
os.remove(la_file)
tools.rmdir(os.path.join(self.package_folder, "libexec"))
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
if self.settings.os == "Linux":
self.cpp_info.system_libs.append("m")
if self.options.threadsafe:
self.cpp_info.system_libs.append("pthread")
if self.settings.compiler != "Visual Studio":
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/test/java/com/github/gpluscb/challonge_listener/ChallongeExtensionTest.java | package com.github.gpluscb.challonge_listener;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.gson.GsonBuilder;
import at.stefangeyer.challonge.exception.DataAccessException;
import at.stefangeyer.challonge.model.Credentials;
import at.stefangeyer.challonge.model.Match;
import at.stefangeyer.challonge.model.Participant;
import at.stefangeyer.challonge.model.Tournament;
import at.stefangeyer.challonge.model.query.AttachmentQuery;
import at.stefangeyer.challonge.model.query.ParticipantQuery;
import at.stefangeyer.challonge.model.query.TournamentQuery;
import at.stefangeyer.challonge.rest.retrofit.RetrofitRestClient;
import at.stefangeyer.challonge.serializer.Serializer;
import at.stefangeyer.challonge.serializer.gson.GsonSerializer;
public class ChallongeExtensionTest {
private static RetrofitRestClient client;
private static ChallongeExtension challonge;
private static Serializer serializer;
private static String randomUrl;
private static Tournament owned;
private static Tournament notOwned;
@BeforeClass
public static void beforeClass() throws DataAccessException {
final GsonBuilder builder = new GsonBuilder();
builder.serializeNulls().setPrettyPrinting();
serializer = new GsonSerializer(builder);
client = new RetrofitRestClient();
final Credentials credentials = new Credentials(System.getenv("ChallongeUsername"),
System.getenv("ChallongeToken"));
challonge = new ChallongeExtension(credentials, serializer, client);
randomUrl = generateRandomUrl(10);
final TournamentQuery tournamentQuery = TournamentQuery.builder().name("TestTournament")
.description("A test tournament").gameName("Chess").url(randomUrl).acceptAttachments(Boolean.TRUE)
.build();
owned = challonge.createTournament(tournamentQuery);
final List<ParticipantQuery> participantQueries = new ArrayList<>();
for(int i = 0; i < 10; i++) {
participantQueries.add(ParticipantQuery.builder().name(String.valueOf(i)).build());
}
challonge.bulkAddParticipants(owned, participantQueries);
owned = challonge.startTournament(owned);
final List<Match> ownedMatches = challonge.getMatches(owned);
for(int i = 0; i < ownedMatches.size(); i++) {
challonge.createAttachment(ownedMatches.get(i),
AttachmentQuery.builder().description(String.valueOf(i)).build());
}
}
private static String generateRandomUrl(final int length) {
// Only gets lower-case [a-z]{length} type strings
final int a = 97;
final int z = 122;
final Random rng = new Random();
final StringBuilder builder = new StringBuilder(length);
for(int i = 0; i < length; i++) {
final int random = rng.nextInt(z + 1 - a) + a;
builder.append((char) random);
}
return builder.toString();
}
@AfterClass
public static void afterClass() throws DataAccessException {
owned = challonge.getTournament(randomUrl);
challonge.deleteTournament(owned);
client.close();
}
@Before
public void beforeTest() throws DataAccessException {
owned = challonge.getTournament(randomUrl);
final List<Participant> ownedParticipants = challonge.getParticipants(owned);
for(final Participant participant : ownedParticipants) {
participant.setMatches(challonge.getMatches(owned, participant));
for(final Match match : participant.getMatches()) {
match.setAttachments(challonge.getAttachments(match));
}
}
owned.setParticipants(ownedParticipants);
final List<Match> ownedMatches = challonge.getMatches(owned);
for(final Match match : ownedMatches) {
match.setAttachments(challonge.getAttachments(match));
}
owned.setMatches(ownedMatches);
notOwned = challonge.getTournament(System.getenv("NotOwnedTournament"));
}
@Test
public void testDoesTournamentExist() throws DataAccessException {
assertTrue(challonge.doesTournamentExist(owned.getUrl()));
assertTrue(challonge.doesTournamentExist(String.valueOf(owned.getId())));
assertFalse(challonge.doesTournamentExist("As far as I know impossible to exist due to spaces in url"));
}
@Test
public void testDoesMatchExist() throws DataAccessException {
assertTrue(challonge.doesMatchExist(owned, owned.getMatches().get(0).getId().longValue()));
assertFalse(challonge.doesMatchExist(owned, -1));
try {
owned.setId(Long.valueOf(-1));
challonge.doesMatchExist(owned, owned.getMatches().get(0).getId().longValue());
fail();
} catch(@SuppressWarnings("unused") final DataAccessException e) {
// Expected
}
}
@Test
public void testDoesParticipantExist() throws DataAccessException {
assertTrue(challonge.doesParticipantExist(owned, owned.getParticipants().get(0).getId().longValue()));
assertFalse(challonge.doesParticipantExist(owned, -1));
try {
owned.setId(Long.valueOf(-1));
challonge.doesParticipantExist(owned, owned.getParticipants().get(0).getId().longValue());
fail();
} catch(@SuppressWarnings("unused") final DataAccessException e) {
// Expected
}
}
@Test
public void testDoesAttachmentExist() throws DataAccessException {
assertTrue(challonge.doesAttachmentExist(owned.getMatches().get(0),
owned.getMatches().get(0).getAttachments().get(0).getId().longValue()));
assertFalse(challonge.doesAttachmentExist(owned.getMatches().get(0), -1));
try {
owned.getMatches().get(0).setId(Long.valueOf(-1));
challonge.doesAttachmentExist(owned.getMatches().get(0),
owned.getMatches().get(0).getAttachments().get(0).getId().longValue());
fail();
} catch(@SuppressWarnings("unused") final DataAccessException e) {
// Expected
}
}
@Test
public void testDoesOwn() throws DataAccessException {
assertTrue(challonge.doesOwn(owned));
assertFalse(challonge.doesOwn(notOwned));
}
@Test
public void testGetTournamentStringBooleanBooleanBoolean() throws DataAccessException {
assertEquals(owned, challonge.getTournament(owned.getUrl(), true, true, true));
for(final Match match : owned.getMatches()) {
match.setAttachments(null);
}
for(final Participant participant : owned.getParticipants()) {
for(final Match match : participant.getMatches()) {
match.setAttachments(null);
}
}
assertEquals(owned, challonge.getTournament(owned.getUrl(), true, true, false));
owned.getMatches().clear();
for(final Participant participant : owned.getParticipants()) {
participant.setMatches(null);
}
assertEquals(owned, challonge.getTournament(owned.getUrl(), true, false, false));
owned.getParticipants().clear();
assertEquals(owned, challonge.getTournament(owned.getUrl(), false, false, false));
try {
challonge.getTournament("", true, false, true);
fail();
} catch(@SuppressWarnings("unused") final IllegalArgumentException e) {
// Expected
}
}
@Test
public void testGetTournamentsWithFullData() throws DataAccessException {
assertTrue(challonge.getTournamentsWithFullData().contains(owned));
}
@Test
public void testGetMatchesWithFullData() throws DataAccessException {
assertEquals(owned.getMatches(), challonge.getMatchesWithFullData(owned));
}
}
| [
"\"ChallongeUsername\"",
"\"ChallongeToken\"",
"\"NotOwnedTournament\""
]
| []
| [
"ChallongeUsername",
"NotOwnedTournament",
"ChallongeToken"
]
| [] | ["ChallongeUsername", "NotOwnedTournament", "ChallongeToken"] | java | 3 | 0 | |
pyrap/web/wsgi.py | """
WSGI Utilities
(from web.py)
"""
import os, sys
from . import http
from . import webapi as web
from .utils import listget, intget
from .net import validaddr, validip
from . import httpserver
def runfcgi(func, addr=('localhost', 8000)):
"""Runs a WSGI function as a FastCGI server."""
import flup.server.fcgi as flups
return flups.WSGIServer(func, multiplexed=True, bindAddress=addr, debug=False).run()
def runscgi(func, addr=('localhost', 4000)):
"""Runs a WSGI function as an SCGI server."""
import flup.server.scgi as flups
return flups.WSGIServer(func, bindAddress=addr, debug=False).run()
def runwsgi(func):
"""
Runs a WSGI-compatible `func` using FCGI, SCGI, or a simple web server,
as appropriate based on context and `sys.argv`.
"""
if 'SERVER_SOFTWARE' in os.environ: # cgi
os.environ['FCGI_FORCE_CGI'] = 'Y'
if ('PHP_FCGI_CHILDREN' in os.environ #lighttpd fastcgi
or 'SERVER_SOFTWARE') in os.environ:
return runfcgi(func, None)
if 'fcgi' in sys.argv or 'fastcgi' in sys.argv:
args = sys.argv[1:]
if 'fastcgi' in args: args.remove('fastcgi')
elif 'fcgi' in args: args.remove('fcgi')
if args:
return runfcgi(func, validaddr(args[0]))
else:
return runfcgi(func, None)
if 'scgi' in sys.argv:
args = sys.argv[1:]
args.remove('scgi')
if args:
return runscgi(func, validaddr(args[0]))
else:
return runscgi(func)
server_addr = validip(listget(sys.argv, 1, ''))
if 'PORT' in os.environ: # e.g. Heroku
server_addr = ('0.0.0.0', intget(os.environ['PORT']))
return httpserver.runsimple(func, server_addr)
def _is_dev_mode():
# Some embedded python interpreters won't have sys.arv
# For details, see https://github.com/webpy/webpy/issues/87
argv = getattr(sys, "argv", [])
# quick hack to check if the program is running in dev mode.
if 'SERVER_SOFTWARE' in os.environ \
or 'PHP_FCGI_CHILDREN' in os.environ \
or 'fcgi' in argv or 'fastcgi' in argv \
or 'mod_wsgi' in argv:
return False
return True
# When running the builtin-server, enable debug mode if not already set.
web.config.setdefault('debug', _is_dev_mode())
| []
| []
| [
"PORT",
"FCGI_FORCE_CGI"
]
| [] | ["PORT", "FCGI_FORCE_CGI"] | python | 2 | 0 | |
examples/certificates_server.go | package main
import (
"fmt"
"github.com/HewlettPackard/oneview-golang/ov"
"github.com/HewlettPackard/oneview-golang/utils"
"os"
"strconv"
)
func main() {
var (
ClientOV *ov.OVClient
server_certificate_ip = "<Server IP>"
server_certificate_name = "new_test_certificate"
new_cert_base64data utils.Nstring = "---BEGIN CERTIFICATE----END CERTIFICATE------"
)
apiversion, _ := strconv.Atoi(os.Getenv("ONEVIEW_APIVERSION"))
ovc := ClientOV.NewOVClient(
os.Getenv("ONEVIEW_OV_USER"),
os.Getenv("ONEVIEW_OV_PASSWORD"),
os.Getenv("ONEVIEW_OV_DOMAIN"),
os.Getenv("ONEVIEW_OV_ENDPOINT"),
false,
apiversion,
"")
server_cert, err := ovc.GetServerCertificateByIp(server_certificate_ip)
if err != nil {
fmt.Println(err)
} else {
fmt.Println(server_cert)
}
server_cert.CertificateDetails[0].AliasName = server_certificate_name
fmt.Println(server_cert.CertificateDetails[0].AliasName)
server_cert.Type = "" //Making Type field as empty as it is not required
er := ovc.CreateServerCertificate(server_cert)
if er != nil {
fmt.Println("............... Adding Server Certificate Failed:", er)
} else {
fmt.Println(".... Adding Server Certificate Success")
}
fmt.Println("#................... Server Certificate by Name ...............#")
server_certn, err := ovc.GetServerCertificateByName(server_certificate_name)
if err != nil {
fmt.Println(err)
} else {
fmt.Println(server_certn)
}
certificateDetails := new([]ov.CertificateDetail)
certificateDetail_new := ov.CertificateDetail{Type: "CertificateDetailV2", AliasName: server_certificate_name, Base64Data: new_cert_base64data}
*certificateDetails = append(*certificateDetails, certificateDetail_new)
server_certn.CertificateDetails = *certificateDetails
err = ovc.UpdateServerCertificate(server_certn)
if err != nil {
fmt.Println(err)
} else {
fmt.Println("#.................... Server Certificate after Updating ...........#")
server_cert_after_update, err := ovc.GetServerCertificateByName(server_certificate_name)
if err != nil {
fmt.Println(err)
} else {
fmt.Println("..............Server Certificate Successfully updated.........")
fmt.Println(server_cert_after_update)
}
}
err = ovc.DeleteServerCertificate(server_certificate_name)
if err != nil {
fmt.Println(err)
} else {
fmt.Println("#...................... Deleted Server Certificate Successfully .....#")
}
}
| [
"\"ONEVIEW_APIVERSION\"",
"\"ONEVIEW_OV_USER\"",
"\"ONEVIEW_OV_PASSWORD\"",
"\"ONEVIEW_OV_DOMAIN\"",
"\"ONEVIEW_OV_ENDPOINT\""
]
| []
| [
"ONEVIEW_OV_ENDPOINT",
"ONEVIEW_OV_DOMAIN",
"ONEVIEW_APIVERSION",
"ONEVIEW_OV_PASSWORD",
"ONEVIEW_OV_USER"
]
| [] | ["ONEVIEW_OV_ENDPOINT", "ONEVIEW_OV_DOMAIN", "ONEVIEW_APIVERSION", "ONEVIEW_OV_PASSWORD", "ONEVIEW_OV_USER"] | go | 5 | 0 | |
setup.py | #!/usr/bin/env python
# Copyright (c) 2013-2015, Kevin Greenan ([email protected])
# Copyright (c) 2013-2015, Tushar Gohad ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution. THIS SOFTWARE IS
# PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import platform
import re
import subprocess
import sys
import ctypes
from ctypes.util import find_library
from distutils.command.build import build as _build
from distutils.command.clean import clean as _clean
from distutils.sysconfig import get_python_inc
try:
from setuptools import setup
except ImportError:
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup
from setuptools import Extension
from setuptools.command.install import install as _install
platform_str = platform.platform()
default_python_incdir = get_python_inc()
# this is to be used only for library existence/version checks,
# not for rpath handling
def _find_library(name):
target_lib = find_library(name)
if platform_str.find("Darwin") > -1:
# If we didn't find it, try extending our search a bit
if not target_lib:
if 'DYLD_LIBRARY_PATH' in os.environ:
os.environ['DYLD_LIBRARY_PATH'] += ':%s/lib' % sys.prefix
else:
os.environ['DYLD_LIBRARY_PATH'] = '%s/lib' % sys.prefix
target_lib = find_library(name)
# If we *still* don't find it, bail
if not target_lib:
return target_lib
target_lib = os.path.abspath(target_lib)
if os.path.islink(target_lib):
p = os.readlink(target_lib)
if os.path.isabs(p):
target_lib = p
else:
target_lib = os.path.join(os.path.dirname(target_lib), p)
elif not target_lib:
# See https://bugs.python.org/issue9998
expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
cmd = ['ld', '-t']
libpath = os.environ.get('LD_LIBRARY_PATH')
if libpath:
for d in libpath.split(':'):
cmd.extend(['-L', d])
cmd.extend(['-o', os.devnull, '-l%s' % name])
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, _ = p.communicate()
if hasattr(os, 'fsdecode'):
out = os.fsdecode(out)
res = re.search(expr, out)
if res:
target_lib = res.group(0)
except Exception:
pass # result will be None
# return absolute path to the library if found
return target_lib
class build(_build):
def check_liberasure(self):
library_basename = "liberasurecode"
library_version = "1"
library_url = "https://opendev.org/openstack/liberasurecode"
found_path = _find_library("erasurecode")
if found_path:
libec = ctypes.CDLL(found_path)
try:
packed_version = libec.liberasurecode_get_version()
except AttributeError:
# If we don't have the version getter, we're probably
# pre-1.4.0; fall back to some heuristics based on name
version_parts = [x for x in found_path.split('.')
if x.isdigit()]
if not version_parts:
# A bare .so? Well, we haven't released a 2.x yet... but
# if we ever do, hopefully it'd still provide a
# liberasurecode_get_version()
return
if version_parts[0] == library_version:
return
# else, seems to be an unknown version -- assume it won't work
else:
version = (
packed_version >> 16,
(packed_version >> 8) & 0xff,
packed_version & 0xff)
if (1, 0, 0) <= version < (2, 0, 0):
return
if platform_str.find("Darwin") > -1:
liberasure_file = \
library_basename + "." + library_version + ".dylib"
else:
liberasure_file = \
library_basename + ".so." + library_version
print("**************************************************************")
print("*** ")
print("*** Can not locate %s" % (liberasure_file))
print("*** ")
print("*** Install - ")
print("*** Manual: %s" % library_url)
print("*** Fedora/Red Hat variants: liberasurecode-devel")
print("*** Debian/Ubuntu variants: liberasurecode-dev")
print("*** ")
print("**************************************************************")
sys.exit(-1)
def run(self):
self.check_liberasure()
_build.run(self)
class clean(_clean):
def run(self):
_clean.run(self)
class install(_install):
def run(self):
install_cmd = self.distribution.get_command_obj('install')
install_lib = self.distribution.get_command_obj('install_lib')
for cmd in (install_lib, install_cmd):
cmd.ensure_finalized()
# ensure that the paths are absolute so we don't get lost
opts = {'exec_prefix': install_cmd.exec_prefix,
'root': install_cmd.root}
for optname, value in list(opts.items()):
if value is not None:
opts[optname] = os.path.abspath(value)
installroot = install_lib.install_dir
_install.run(self)
# Another Mac-ism... If the libraries are installed
# in a strange place, DYLD_LIRBARY_PATH needs to be
# updated.
if platform_str.find("Darwin") > -1:
ldpath_str = "DYLD_LIBRARY_PATH"
else:
ldpath_str = "LD_LIBRARY_PATH"
print("***************************************************")
print("** ")
print("** PyECLib libraries have been installed to: ")
print("** %s" % installroot)
print("** ")
print("** Any user using this library must update: ")
print("** %s" % ldpath_str)
print("** ")
print("** Run 'ldconfig' or place this line: ")
print("** export %s=%s" % (ldpath_str, "%s"
% installroot))
print("** ")
print("** into .bashrc, .profile, or the appropriate shell")
print("** start-up script! Also look at ldconfig(8) man ")
print("** page for a more static LD configuration ")
print("** ")
print("***************************************************")
module = Extension('pyeclib_c',
define_macros=[('MAJOR VERSION', '1'),
('MINOR VERSION', '6')],
include_dirs=[default_python_incdir,
'src/c/pyeclib_c',
'/usr/include',
'/usr/include/liberasurecode',
'%s/include/liberasurecode' % sys.prefix,
'%s/include' % sys.prefix],
libraries=['erasurecode'],
library_dirs=['%s/lib' % sys.prefix],
runtime_library_dirs=['%s/lib' % sys.prefix],
# The extra arguments are for debugging
# extra_compile_args=['-g', '-O0'],
sources=['src/c/pyeclib_c/pyeclib_c.c'])
setup(name='pyeclib',
version='1.6.0',
author='Kevin Greenan',
author_email='[email protected]',
maintainer='Kevin Greenan and Tushar Gohad',
maintainer_email='[email protected], [email protected]',
url='https://opendev.org/openstack/pyeclib',
project_urls={
'Bug Tracker': 'https://bugs.launchpad.net/pyeclib',
},
description=('This library provides a simple Python interface for '
'implementing erasure codes. To obtain the best possible '
'performance, the underlying erasure code algorithms are '
'written in C.'),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
],
long_description=open('README.rst', 'r').read(),
platforms='Linux',
license='BSD',
ext_modules=[module],
packages=['pyeclib'],
package_dir={'pyeclib': 'pyeclib'},
cmdclass={'build': build, 'install': install, 'clean': clean},
py_modules=['pyeclib.ec_iface', 'pyeclib.core'],
command_options={
'build_sphinx': {
'build_dir': ('setup.py', 'doc/build')}},
test_suite='test')
| []
| []
| [
"LD_LIBRARY_PATH",
"DYLD_LIBRARY_PATH"
]
| [] | ["LD_LIBRARY_PATH", "DYLD_LIBRARY_PATH"] | python | 2 | 0 | |
pkg/cmd/roachprod/main.go | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"encoding/json"
"fmt"
"log"
"net"
"os"
"os/exec"
"os/user"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"text/tabwriter"
"time"
cld "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/cloud"
"github.com/cockroachdb/cockroach/pkg/cmd/roachprod/config"
rperrors "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/errors"
"github.com/cockroachdb/cockroach/pkg/cmd/roachprod/install"
"github.com/cockroachdb/cockroach/pkg/cmd/roachprod/ssh"
"github.com/cockroachdb/cockroach/pkg/cmd/roachprod/ui"
"github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm"
"github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm/aws"
_ "github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm/azure"
"github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm/gce"
"github.com/cockroachdb/cockroach/pkg/cmd/roachprod/vm/local"
"github.com/cockroachdb/cockroach/pkg/util/flagutil"
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/sys/unix"
)
var rootCmd = &cobra.Command{
Use: "roachprod [command] (flags)",
Short: "roachprod tool for manipulating test clusters",
Long: `roachprod is a tool for manipulating ephemeral test clusters, allowing easy
creating, destruction, starting, stopping and wiping of clusters along with
running load generators.
Examples:
roachprod create local -n 3
roachprod start local
roachprod sql local:2 -- -e "select * from crdb_internal.node_runtime_info"
roachprod stop local
roachprod wipe local
roachprod destroy local
The above commands will create a "local" 3 node cluster, start a cockroach
cluster on these nodes, run a sql command on the 2nd node, stop, wipe and
destroy the cluster.
`,
}
var (
numNodes int
numRacks int
username string
dryrun bool
destroyAllMine bool
extendLifetime time.Duration
wipePreserveCerts bool
listDetails bool
listJSON bool
listMine bool
clusterType = "cockroach"
secure = false
nodeEnv = "COCKROACH_ENABLE_RPC_COMPRESSION=false"
nodeArgs []string
tag string
external = false
adminurlOpen = false
adminurlPath = ""
adminurlIPs = false
useTreeDist = true
encrypt = false
quiet = false
sig = 9
waitFlag = false
stageOS string
logsDir string
logsFilter string
logsProgramFilter string
logsFrom time.Time
logsTo time.Time
logsInterval time.Duration
maxConcurrency int
monitorIgnoreEmptyNodes bool
monitorOneShot bool
cachedHostsCluster string
)
func sortedClusters() []string {
var r []string
for n := range install.Clusters {
r = append(r, n)
}
sort.Strings(r)
return r
}
func newCluster(name string) (*install.SyncedCluster, error) {
nodeNames := "all"
{
parts := strings.Split(name, ":")
switch len(parts) {
case 2:
nodeNames = parts[1]
fallthrough
case 1:
name = parts[0]
case 0:
return nil, fmt.Errorf("no cluster specified")
default:
return nil, fmt.Errorf("invalid cluster name: %s", name)
}
}
c, ok := install.Clusters[name]
if !ok {
// NB: We don't use fmt.Errorf due to a linter error about the error
// message containing capitals and punctuation. We don't use
// errors.New(fmt.Sprintf()) due to a linter error that we should use
// fmt.Errorf() instead. Sigh.
s := fmt.Sprintf(`unknown cluster: %s
Available clusters:
%s
Hint: use "roachprod sync" to update the list of available clusters.
`,
name, strings.Join(sortedClusters(), "\n "))
return nil, errors.New(s)
}
switch clusterType {
case "cockroach":
c.Impl = install.Cockroach{}
if numRacks > 0 {
for i := range c.Localities {
rack := fmt.Sprintf("rack=%d", i%numRacks)
if c.Localities[i] != "" {
rack = "," + rack
}
c.Localities[i] += rack
}
}
case "cassandra":
c.Impl = install.Cassandra{}
default:
return nil, fmt.Errorf("unknown cluster type: %s", clusterType)
}
nodes, err := install.ListNodes(nodeNames, len(c.VMs))
if err != nil {
return nil, err
}
for _, n := range nodes {
if n > len(c.VMs) {
return nil, fmt.Errorf("invalid node spec %s, cluster contains %d nodes",
nodeNames, len(c.VMs))
}
}
c.Nodes = nodes
c.Secure = secure
c.Env = nodeEnv
c.Args = nodeArgs
if tag != "" {
c.Tag = "/" + tag
}
c.UseTreeDist = useTreeDist
c.Quiet = quiet || !terminal.IsTerminal(int(os.Stdout.Fd()))
c.MaxConcurrency = maxConcurrency
return c, nil
}
// verifyClusterName ensures that the given name conforms to
// our naming pattern of "<username>-<clustername>". The
// username must match one of the vm.Provider account names
// or the --username override.
func verifyClusterName(clusterName string) (string, error) {
if len(clusterName) == 0 {
return "", fmt.Errorf("cluster name cannot be blank")
}
if clusterName == config.Local {
return clusterName, nil
}
alphaNum, err := regexp.Compile(`^[a-zA-Z0-9\-]+$`)
if err != nil {
return "", err
}
if !alphaNum.MatchString(clusterName) {
return "", errors.Errorf("cluster name must match %s", alphaNum.String())
}
// Use the vm.Provider account names, or --username.
var accounts []string
if len(username) > 0 {
accounts = []string{username}
} else {
seenAccounts := map[string]bool{}
active, err := vm.FindActiveAccounts()
if err != nil {
return "", err
}
for _, account := range active {
if !seenAccounts[account] {
seenAccounts[account] = true
accounts = append(accounts, account)
}
}
}
// If we see <account>-<something>, accept it.
for _, account := range accounts {
if strings.HasPrefix(clusterName, account+"-") && len(clusterName) > len(account)+1 {
return clusterName, nil
}
}
// Try to pick out a reasonable cluster name from the input.
i := strings.Index(clusterName, "-")
suffix := clusterName
if i != -1 {
// The user specified a username prefix, but it didn't match an active
// account name. For example, assuming the account is "peter", `roachprod
// create joe-perf` should be specified as `roachprod create joe-perf -u
// joe`.
suffix = clusterName[i+1:]
} else {
// The user didn't specify a username prefix. For example, assuming the
// account is "peter", `roachprod create perf` should be specified as
// `roachprod create peter-perf`.
_ = 0
}
// Suggest acceptable cluster names.
var suggestions []string
for _, account := range accounts {
suggestions = append(suggestions, fmt.Sprintf("%s-%s", account, suffix))
}
return "", fmt.Errorf("malformed cluster name %s, did you mean one of %s",
clusterName, suggestions)
}
// Provide `cobra.Command` functions with a standard return code handler.
// Exit codes come from rperrors.Error.ExitCode().
//
// If the wrapped error tree of an error does not contain an instance of
// rperrors.Error, the error will automatically be wrapped with
// rperrors.Unclassified.
func wrap(f func(cmd *cobra.Command, args []string) error) func(cmd *cobra.Command, args []string) {
return func(cmd *cobra.Command, args []string) {
err := f(cmd, args)
if err != nil {
roachprodError, ok := rperrors.AsError(err)
if !ok {
roachprodError = rperrors.Unclassified{Err: err}
err = roachprodError
}
cmd.Printf("Error: %+v\n", err)
os.Exit(roachprodError.ExitCode())
}
}
}
var createVMOpts vm.CreateOpts
type clusterAlreadyExistsError struct {
name string
}
func (e *clusterAlreadyExistsError) Error() string {
return fmt.Sprintf("cluster %s already exists", e.name)
}
func newClusterAlreadyExistsError(name string) error {
return &clusterAlreadyExistsError{name: name}
}
var createCmd = &cobra.Command{
Use: "create <cluster>",
Short: "create a cluster",
Long: `Create a local or cloud-based cluster.
A cluster is composed of a set of nodes, configured during cluster creation via
the --nodes flag. Creating a cluster does not start any processes on the nodes
other than the base system processes (e.g. sshd). See "roachprod start" for
starting cockroach nodes and "roachprod {run,ssh}" for running arbitrary
commands on the nodes of a cluster.
Cloud Clusters
Cloud-based clusters are ephemeral and come with a lifetime (specified by the
--lifetime flag) after which they will be automatically
destroyed. Cloud-based clusters require the associated command line tool for
the cloud to be installed and configured (e.g. "gcloud auth login").
Clusters names are required to be prefixed by the authenticated user of the
cloud service. The suffix is an arbitrary string used to distinguish
clusters. For example, "marc-test" is a valid cluster name for the user
"marc". The authenticated user for the cloud service is automatically
detected and can be override by the ROACHPROD_USER environment variable or
the --username flag.
The machine type and the use of local SSD storage can be specified during
cluster creation via the --{cloud}-machine-type and --local-ssd flags. The
machine-type is cloud specified. For example, --gce-machine-type=n1-highcpu-8
requests the "n1-highcpu-8" machine type for a GCE-based cluster. No attempt
is made (or desired) to abstract machine types across cloud providers. See
the cloud provider's documentation for details on the machine types
available.
Local Clusters
A local cluster stores the per-node data in ${HOME}/local on the machine
roachprod is being run on. Local clusters requires local ssh access. Unlike
cloud clusters there can be only a single local cluster, the local cluster is
always named "local", and has no expiration (unlimited lifetime).
`,
Args: cobra.ExactArgs(1),
Run: wrap(func(cmd *cobra.Command, args []string) (retErr error) {
if numNodes <= 0 || numNodes >= 1000 {
// Upper limit is just for safety.
return fmt.Errorf("number of nodes must be in [1..999]")
}
clusterName, err := verifyClusterName(args[0])
if err != nil {
return err
}
createVMOpts.ClusterName = clusterName
defer func() {
if retErr == nil || clusterName == config.Local {
return
}
if errors.HasType(retErr, (*clusterAlreadyExistsError)(nil)) {
return
}
fmt.Fprintf(os.Stderr, "Cleaning up partially-created cluster (prev err: %s)\n", retErr)
if err := cleanupFailedCreate(clusterName); err != nil {
fmt.Fprintf(os.Stderr, "Error while cleaning up partially-created cluster: %s\n", err)
} else {
fmt.Fprintf(os.Stderr, "Cleaning up OK\n")
}
}()
if clusterName != config.Local {
cloud, err := cld.ListCloud()
if err != nil {
return err
}
if _, ok := cloud.Clusters[clusterName]; ok {
return newClusterAlreadyExistsError(clusterName)
}
} else {
if _, ok := install.Clusters[clusterName]; ok {
return newClusterAlreadyExistsError(clusterName)
}
// If the local cluster is being created, force the local Provider to be used
createVMOpts.VMProviders = []string{local.ProviderName}
}
fmt.Printf("Creating cluster %s with %d nodes\n", clusterName, numNodes)
if createErr := cld.CreateCluster(numNodes, createVMOpts); createErr != nil {
return createErr
}
// Just create directories for the local cluster as there's no need for ssh.
if clusterName == config.Local {
for i := 0; i < numNodes; i++ {
err := os.MkdirAll(fmt.Sprintf(os.ExpandEnv("${HOME}/local/%d"), i+1), 0755)
if err != nil {
return err
}
}
return nil
}
return setupSSH(clusterName)
}),
}
var setupSSHCmd = &cobra.Command{
Use: "setup-ssh <cluster>",
Short: "set up ssh for a cluster",
Long: `Sets up the keys and host keys for the vms in the cluster.
It first resets the machine credentials as though the cluster were newly created
using the cloud provider APIs and then proceeds to ensure that the hosts can
SSH into eachother and lastly adds additional public keys to AWS hosts as read
from the GCP project. This operation is performed as the last step of creating
a new cluster but can be useful to re-run if the operation failed previously or
if the user would like to update the keys on the remote hosts.
`,
Args: cobra.ExactArgs(1),
Run: wrap(func(cmd *cobra.Command, args []string) (retErr error) {
clusterName, err := verifyClusterName(args[0])
if err != nil {
return err
}
return setupSSH(clusterName)
}),
}
func setupSSH(clusterName string) error {
cloud, err := syncCloud(quiet)
if err != nil {
return err
}
cloudCluster, ok := cloud.Clusters[clusterName]
if !ok {
return fmt.Errorf("could not find %s in list of cluster", clusterName)
}
cloudCluster.PrintDetails()
// Run ssh-keygen -R serially on each new VM in case an IP address has been recycled
for _, v := range cloudCluster.VMs {
cmd := exec.Command("ssh-keygen", "-R", v.PublicIP)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("could not clear ssh key for hostname %s:\n%s", v.PublicIP, string(out))
}
}
// Wait for the nodes in the cluster to start.
install.Clusters = map[string]*install.SyncedCluster{}
if err := loadClusters(); err != nil {
return err
}
installCluster, err := newCluster(clusterName)
if err != nil {
return err
}
// For GCP clusters we need to use the config.OSUser even if the client
// requested the shared user.
for i := range installCluster.VMs {
if cloudCluster.VMs[i].Provider == gce.ProviderName {
installCluster.Users[i] = config.OSUser.Username
}
}
if err := installCluster.Wait(); err != nil {
return err
}
// Fetch public keys from gcloud to set up ssh access for all users into the
// shared ubuntu user.
installCluster.AuthorizedKeys, err = gce.GetUserAuthorizedKeys()
if err != nil {
return errors.Wrap(err, "failed to retrieve authorized keys from gcloud")
}
return installCluster.SetupSSH()
}
func cleanupFailedCreate(clusterName string) error {
cloud, err := cld.ListCloud()
if err != nil {
return err
}
c, ok := cloud.Clusters[clusterName]
if !ok {
// If the cluster doesn't exist, we didn't manage to create any VMs
// before failing. Not an error.
return nil
}
return cld.DestroyCluster(c)
}
var destroyCmd = &cobra.Command{
Use: "destroy [ --all-mine | <cluster 1> [<cluster 2> ...] ]",
Short: "destroy clusters",
Long: `Destroy one or more local or cloud-based clusters.
The destroy command accepts the names of the clusters to destroy. Alternatively,
the --all-mine flag can be provided to destroy all clusters that are owned by the
current user.
Destroying a cluster releases the resources for a cluster. For a cloud-based
cluster the machine and associated disk resources are freed. For a local
cluster, any processes started by roachprod are stopped, and the ${HOME}/local
directory is removed.
`,
Args: cobra.ArbitraryArgs,
Run: wrap(func(cmd *cobra.Command, args []string) error {
switch len(args) {
case 0:
if !destroyAllMine {
return errors.New("no cluster name provided")
}
destroyPattern, err := userClusterNameRegexp()
if err != nil {
return err
}
cloud, err := cld.ListCloud()
if err != nil {
return err
}
var names []string
for name := range cloud.Clusters {
if destroyPattern.MatchString(name) {
names = append(names, name)
}
}
sort.Strings(names)
for _, clusterName := range names {
if err := destroyCluster(cloud, clusterName); err != nil {
return err
}
}
default:
if destroyAllMine {
return errors.New("--all-mine cannot be combined with cluster names")
}
var cloud *cld.Cloud
for _, arg := range args {
clusterName, err := verifyClusterName(arg)
if err != nil {
return err
}
if clusterName != config.Local {
if cloud == nil {
cloud, err = cld.ListCloud()
if err != nil {
return err
}
}
if err := destroyCluster(cloud, clusterName); err != nil {
return err
}
} else {
if err := destroyLocalCluster(); err != nil {
return err
}
}
}
}
fmt.Println("OK")
return nil
}),
}
func destroyCluster(cloud *cld.Cloud, clusterName string) error {
c, ok := cloud.Clusters[clusterName]
if !ok {
return fmt.Errorf("cluster %s does not exist", clusterName)
}
fmt.Printf("Destroying cluster %s with %d nodes\n", clusterName, len(c.VMs))
return cld.DestroyCluster(c)
}
func destroyLocalCluster() error {
if _, ok := install.Clusters[config.Local]; !ok {
return fmt.Errorf("cluster %s does not exist", config.Local)
}
c, err := newCluster(config.Local)
if err != nil {
return err
}
c.Wipe(false)
for _, i := range c.Nodes {
err := os.RemoveAll(fmt.Sprintf(os.ExpandEnv("${HOME}/local/%d"), i))
if err != nil {
return err
}
}
return os.Remove(filepath.Join(os.ExpandEnv(config.DefaultHostDir), c.Name))
}
var cachedHostsCmd = &cobra.Command{
Use: "cached-hosts",
Short: "list all clusters (and optionally their host numbers) from local cache",
Args: cobra.NoArgs,
Run: wrap(func(cmd *cobra.Command, args []string) error {
if err := loadClusters(); err != nil {
return err
}
names := make([]string, 0, len(install.Clusters))
for name := range install.Clusters {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
c := install.Clusters[name]
if strings.HasPrefix(c.Name, "teamcity") {
continue
}
fmt.Print(c.Name)
// when invokved by bash-completion, cachedHostsCluster is what the user
// has currently typed -- if this cluster matches that, expand its hosts.
if strings.HasPrefix(cachedHostsCluster, c.Name) {
for i := range c.VMs {
fmt.Printf(" %s:%d", c.Name, i+1)
}
}
fmt.Println()
}
return nil
}),
}
var listCmd = &cobra.Command{
Use: "list [--details] [ --mine | <cluster name regex> ]",
Short: "list all clusters",
Long: `List all clusters.
The list command accepts an optional positional argument, which is a regular
expression that will be matched against the cluster name pattern. Alternatively,
the --mine flag can be provided to list the clusters that are owned by the current
user.
The default output shows one line per cluster, including the local cluster if
it exists:
~ roachprod list
local: [local] 1 (-)
marc-test: [aws gce] 4 (5h34m35s)
Syncing...
The second column lists the cloud providers that host VMs for the cluster.
The third and fourth columns are the number of nodes in the cluster and the
time remaining before the cluster will be automatically destroyed. Note that
local clusters do not have an expiration.
The --details flag adjusts the output format to include per-node details:
~ roachprod list --details
local [local]: (no expiration)
localhost 127.0.0.1 127.0.0.1
marc-test: [aws gce] 5h33m57s remaining
marc-test-0001 marc-test-0001.us-east1-b.cockroach-ephemeral 10.142.0.18 35.229.60.91
marc-test-0002 marc-test-0002.us-east1-b.cockroach-ephemeral 10.142.0.17 35.231.0.44
marc-test-0003 marc-test-0003.us-east1-b.cockroach-ephemeral 10.142.0.19 35.229.111.100
marc-test-0004 marc-test-0004.us-east1-b.cockroach-ephemeral 10.142.0.20 35.231.102.125
Syncing...
The first and second column are the node hostname and fully qualified name
respectively. The third and fourth column are the private and public IP
addresses.
The --json flag sets the format of the command output to json.
Listing clusters has the side-effect of syncing ssh keys/configs and the local
hosts file.
`,
Args: cobra.RangeArgs(0, 1),
Run: wrap(func(cmd *cobra.Command, args []string) error {
listPattern := regexp.MustCompile(".*")
switch len(args) {
case 0:
if listMine {
var err error
listPattern, err = userClusterNameRegexp()
if err != nil {
return err
}
}
case 1:
if listMine {
return errors.New("--mine cannot be combined with a pattern")
}
var err error
listPattern, err = regexp.Compile(args[0])
if err != nil {
return errors.Wrapf(err, "could not compile regex pattern: %s", args[0])
}
default:
return errors.New("only a single pattern may be listed")
}
cloud, err := syncCloud(quiet)
if err != nil {
return err
}
// Filter and sort by cluster names for stable output.
var names []string
filteredCloud := cloud.Clone()
for name := range cloud.Clusters {
if listPattern.MatchString(name) {
names = append(names, name)
} else {
delete(filteredCloud.Clusters, name)
}
}
sort.Strings(names)
if listJSON {
if listDetails {
return errors.New("--json cannot be combined with --detail")
}
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
if err := enc.Encode(filteredCloud); err != nil {
return err
}
} else {
// Align columns left and separate with at least two spaces.
tw := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
for _, name := range names {
c := filteredCloud.Clusters[name]
if listDetails {
c.PrintDetails()
} else {
fmt.Fprintf(tw, "%s:\t%s\t%d", c.Name, c.Clouds(), len(c.VMs))
if !c.IsLocal() {
fmt.Fprintf(tw, "\t(%s)", c.LifetimeRemaining().Round(time.Second))
} else {
fmt.Fprintf(tw, "\t(-)")
}
fmt.Fprintf(tw, "\n")
}
}
if err := tw.Flush(); err != nil {
return err
}
// Optionally print any dangling instances with errors
if listDetails {
collated := filteredCloud.BadInstanceErrors()
// Sort by Error() value for stable output
var errors ui.ErrorsByError
for err := range collated {
errors = append(errors, err)
}
sort.Sort(errors)
for _, e := range errors {
fmt.Printf("%s: %s\n", e, collated[e].Names())
}
}
}
return nil
}),
}
// userClusterNameRegexp returns a regexp that matches all clusters owned by the
// current user.
func userClusterNameRegexp() (*regexp.Regexp, error) {
// In general, we expect that users will have the same
// account name across the services they're using,
// but we still want to function even if this is not
// the case.
seenAccounts := map[string]bool{}
accounts, err := vm.FindActiveAccounts()
if err != nil {
return nil, err
}
pattern := ""
for _, account := range accounts {
if !seenAccounts[account] {
seenAccounts[account] = true
if len(pattern) > 0 {
pattern += "|"
}
pattern += fmt.Sprintf("(^%s-)", regexp.QuoteMeta(account))
}
}
return regexp.Compile(pattern)
}
// TODO(peter): Do we need this command given that the "list" command syncs as
// a side-effect. If you don't care about the list output, just "roachprod list
// &>/dev/null".
var syncCmd = &cobra.Command{
Use: "sync",
Short: "sync ssh keys/config and hosts files",
Long: ``,
Args: cobra.NoArgs,
Run: wrap(func(cmd *cobra.Command, args []string) error {
_, err := syncCloud(quiet)
return err
}),
}
var lockFile = os.ExpandEnv("$HOME/.roachprod/LOCK")
var bashCompletion = os.ExpandEnv("$HOME/.roachprod/bash-completion.sh")
// syncCloud grabs an exclusive lock on the roachprod state and then proceeds to
// read the current state from the cloud and write it out to disk. The locking
// protects both the reading and the writing in order to prevent the hazard
// caused by concurrent goroutines reading cloud state in a different order
// than writing it to disk.
func syncCloud(quiet bool) (*cld.Cloud, error) {
if !quiet {
fmt.Println("Syncing...")
}
// Acquire a filesystem lock so that two concurrent synchronizations of
// roachprod state don't clobber each other.
f, err := os.Create(lockFile)
if err != nil {
return nil, errors.Wrapf(err, "creating lock file %q", lockFile)
}
if err := unix.Flock(int(f.Fd()), unix.LOCK_EX); err != nil {
return nil, errors.Wrap(err, "acquiring lock on %q")
}
defer f.Close()
cloud, err := cld.ListCloud()
if err != nil {
return nil, err
}
if err := syncHosts(cloud); err != nil {
return nil, err
}
var vms vm.List
for _, c := range cloud.Clusters {
vms = append(vms, c.VMs...)
}
// Figure out if we're going to overwrite the DNS entries. We don't want to
// overwrite if we don't have all the VMs of interest, so we only do it if we
// have a list of all VMs from both AWS and GCE (so if both providers have
// been used to get the VMs and for GCP also if we listed the VMs in the
// default project).
refreshDNS := true
if p := vm.Providers[gce.ProviderName]; !p.Active() {
refreshDNS = false
} else {
var defaultProjectFound bool
for _, prj := range p.(*gce.Provider).GetProjects() {
if prj == gce.DefaultProject() {
defaultProjectFound = true
break
}
}
if !defaultProjectFound {
refreshDNS = false
}
}
if !vm.Providers[aws.ProviderName].Active() {
refreshDNS = false
}
// DNS entries are maintained in the GCE DNS registry for all vms, from all
// clouds.
if refreshDNS {
if !quiet {
fmt.Println("Refreshing DNS entries...")
}
if err := gce.SyncDNS(vms); err != nil {
fmt.Fprintf(os.Stderr, "failed to update %s DNS: %v", gce.Subdomain, err)
}
} else {
if !quiet {
fmt.Println("Not refreshing DNS entries. We did not have all the VMs.")
}
}
if err := vm.ProvidersSequential(vm.AllProviderNames(), func(p vm.Provider) error {
return p.CleanSSH()
}); err != nil {
return nil, err
}
_ = rootCmd.GenBashCompletionFile(bashCompletion)
if err := vm.ProvidersSequential(vm.AllProviderNames(), func(p vm.Provider) error {
return p.ConfigSSH()
}); err != nil {
return nil, err
}
return cloud, nil
}
var gcCmd = &cobra.Command{
Use: "gc",
Short: "GC expired clusters\n",
Long: `Garbage collect expired clusters.
Destroys expired clusters, sending email if properly configured. Usually run
hourly by a cronjob so it is not necessary to run manually.
`,
Args: cobra.NoArgs,
Run: wrap(func(cmd *cobra.Command, args []string) error {
cloud, err := cld.ListCloud()
if err != nil {
return err
}
return cld.GCClusters(cloud, dryrun)
}),
}
var extendCmd = &cobra.Command{
Use: "extend <cluster>",
Short: "extend the lifetime of a cluster",
Long: `Extend the lifetime of the specified cluster to prevent it from being
destroyed:
roachprod extend marc-test --lifetime=6h
`,
Args: cobra.ExactArgs(1),
Run: wrap(func(cmd *cobra.Command, args []string) error {
clusterName, err := verifyClusterName(args[0])
if err != nil {
return err
}
cloud, err := cld.ListCloud()
if err != nil {
return err
}
c, ok := cloud.Clusters[clusterName]
if !ok {
return fmt.Errorf("cluster %s does not exist", clusterName)
}
if err := cld.ExtendCluster(c, extendLifetime); err != nil {
return err
}
// Reload the clusters and print details.
cloud, err = cld.ListCloud()
if err != nil {
return err
}
c, ok = cloud.Clusters[clusterName]
if !ok {
return fmt.Errorf("cluster %s does not exist", clusterName)
}
c.PrintDetails()
return nil
}),
}
const tagHelp = `
The --tag flag can be used to to associate a tag with the process. This tag can
then be used to restrict the processes which are operated on by the status and
stop commands. Tags can have a hierarchical component by utilizing a slash
separated string similar to a filesystem path. A tag matches if a prefix of the
components match. For example, the tag "a/b" will match both "a/b" and
"a/b/c/d".
`
var startCmd = &cobra.Command{
Use: "start <cluster>",
Short: "start nodes on a cluster",
Long: `Start nodes on a cluster.
The --secure flag can be used to start nodes in secure mode (i.e. using
certs). When specified, there is a one time initialization for the cluster to
create and distribute the certs. Note that running some modes in secure mode
and others in insecure mode is not a supported Cockroach configuration.
As a debugging aid, the --sequential flag starts the nodes sequentially so node
IDs match hostnames. Otherwise nodes are started are parallel.
The --binary flag specifies the remote binary to run. It is up to the roachprod
user to ensure this binary exists, usually via "roachprod put". Note that no
cockroach software is installed by default on a newly created cluster.
The --args and --env flags can be used to pass arbitrary command line flags and
environment variables to the cockroach process.
` + tagHelp + `
The "start" command takes care of setting up the --join address and specifying
reasonable defaults for other flags. One side-effect of this convenience is
that node 1 is special and must be started for the cluster to be initialized.
If the COCKROACH_DEV_LICENSE environment variable is set the enterprise.license
cluster setting will be set to its value.
`,
Args: cobra.ExactArgs(1),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
c.Start()
return nil
}),
}
var stopCmd = &cobra.Command{
Use: "stop <cluster> [--sig] [--wait]",
Short: "stop nodes on a cluster",
Long: `Stop nodes on a cluster.
Stop roachprod created processes running on the nodes in a cluster, including
processes started by the "start", "run" and "ssh" commands. Every process
started by roachprod is tagged with a ROACHPROD=<node> environment variable
which is used by "stop" to locate the processes and terminate them. By default
processes are killed with signal 9 (SIGKILL) giving them no chance for a graceful
exit.
The --sig flag will pass a signal to kill to allow us finer control over how we
shutdown cockroach. The --wait flag causes stop to loop waiting for all
processes with the ROACHPROD=<node> environment variable to exit. Note that
stop will wait forever if you specify --wait with a non-terminating signal
(e.g. SIGHUP). --wait defaults to true for signal 9 (SIGKILL) and false for all
other signals.
` + tagHelp + `
`,
Args: cobra.ExactArgs(1),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
wait := waitFlag
if sig == 9 /* SIGKILL */ && !cmd.Flags().Changed("wait") {
wait = true
}
c.Stop(sig, wait)
return nil
}),
}
var statusCmd = &cobra.Command{
Use: "status <cluster>",
Short: "retrieve the status of nodes in a cluster",
Long: `Retrieve the status of nodes in a cluster.
The "status" command outputs the binary and PID for the specified nodes:
~ roachprod status local
local: status 3/3
1: cockroach 29688
2: cockroach 29687
3: cockroach 29689
` + tagHelp + `
`,
Args: cobra.ExactArgs(1),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
c.Status()
return nil
}),
}
var logsCmd = &cobra.Command{
Use: "logs",
Short: "retrieve and merge logs in a cluster",
Long: `Retrieve and merge logs in a cluster.
The "logs" command runs until terminated. It works similarly to get but is
specifically focused on retrieving logs periodically and then merging them
into a single stream.
`,
Args: cobra.RangeArgs(1, 2),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
var dest string
if len(args) == 2 {
dest = args[1]
} else {
dest = c.Name + ".logs"
}
return c.Logs(logsDir, dest, username, logsFilter, logsProgramFilter, logsInterval, logsFrom, logsTo, cmd.OutOrStdout())
}),
}
var monitorCmd = &cobra.Command{
Use: "monitor",
Short: "monitor the status of nodes in a cluster",
Long: `Monitor the status of cockroach nodes in a cluster.
The "monitor" command runs until terminated. At startup it outputs a line for
each specified node indicating the status of the node (either the PID of the
node if alive, or "dead" otherwise). It then watches for changes in the status
of nodes, outputting a line whenever a change is detected:
~ roachprod monitor local
1: 29688
3: 29689
2: 29687
3: dead
3: 30718
`,
Args: cobra.ExactArgs(1),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
var errs []string
for msg := range c.Monitor(monitorIgnoreEmptyNodes, monitorOneShot) {
if msg.Err != nil {
msg.Msg += "error: " + msg.Err.Error()
}
s := fmt.Sprintf("%d: %s", msg.Index, msg.Msg)
if msg.Err != nil || strings.Contains(msg.Msg, "dead") {
errs = append(errs, s)
}
fmt.Println(s)
}
if len(errs) != 0 {
return errors.New(strings.Join(errs, ", "))
}
return nil
}),
}
var wipeCmd = &cobra.Command{
Use: "wipe <cluster>",
Short: "wipe a cluster",
Long: `Wipe the nodes in a cluster.
The "wipe" command first stops any processes running on the nodes in a cluster
(via the "stop" command) and then deletes the data directories used by the
nodes.
`,
Args: cobra.ExactArgs(1),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
c.Wipe(wipePreserveCerts)
return nil
}),
}
var reformatCmd = &cobra.Command{
Use: "reformat <cluster> <filesystem>",
Short: "reformat disks in a cluster\n",
Long: `
Reformat disks in a cluster to use the specified filesystem.
WARNING: Reformatting will delete all existing data in the cluster.
Filesystem options:
ext4
zfs
When running with ZFS, you can create a snapshot of the filesystem's current
state using the 'zfs snapshot' command:
$ roachprod run <cluster> 'sudo zfs snapshot data1@pristine'
You can then nearly instantaneously restore the filesystem to this state with
the 'zfs rollback' command:
$ roachprod run <cluster> 'sudo zfs rollback data1@pristine'
`,
Args: cobra.ExactArgs(2),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
var fsCmd string
switch fs := args[1]; fs {
case "zfs":
if err := install.Install(c, []string{"zfs"}); err != nil {
return err
}
fsCmd = `sudo zpool create -f data1 -m /mnt/data1 /dev/sdb`
case "ext4":
fsCmd = `sudo mkfs.ext4 -F /dev/sdb && sudo mount -o discard,defaults /dev/sdb /mnt/data1`
default:
return fmt.Errorf("unknown filesystem %q", fs)
}
err = c.Run(os.Stdout, os.Stderr, c.Nodes, install.OtherCmd, "reformatting", fmt.Sprintf(`
set -euo pipefail
if sudo zpool list -Ho name 2>/dev/null | grep ^data1$; then
sudo zpool destroy -f data1
fi
if mountpoint -q /mnt/data1; then
sudo umount -f /mnt/data1
fi
%s
sudo chmod 777 /mnt/data1
`, fsCmd))
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
}
return nil
}),
}
var runCmd = &cobra.Command{
Use: "run <cluster> <command> [args]",
Aliases: []string{"ssh"},
Short: "run a command on the nodes in a cluster",
Long: `Run a command on the nodes in a cluster.
`,
Args: cobra.MinimumNArgs(1),
Run: wrap(func(_ *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
// Use "ssh" if an interactive session was requested (i.e. there is no
// remote command to run).
if len(args) == 1 {
return c.SSH(nil, args[1:])
}
cmd := strings.TrimSpace(strings.Join(args[1:], " "))
title := cmd
if len(title) > 30 {
title = title[:27] + "..."
}
return c.Run(os.Stdout, os.Stderr, c.Nodes, install.CockroachCmd, title, cmd)
}),
}
var installCmd = &cobra.Command{
Use: "install <cluster> <software>",
Short: "install 3rd party software",
Long: `Install third party software. Currently available installation options are:
` + strings.Join(install.SortedCmds(), "\n ") + `
`,
Args: cobra.MinimumNArgs(2),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
return install.Install(c, args[1:])
}),
}
var stageCmd = &cobra.Command{
Use: "stage <cluster> <application> [<sha/version>]",
Short: "stage cockroach binaries",
Long: `Stages release and edge binaries to the cluster.
Currently available application options are:
cockroach - Cockroach Unofficial. Can provide an optional SHA, otherwise
latest build version is used.
workload - Cockroach workload application.
release - Official CockroachDB Release. Must provide a specific release
version.
Some examples of usage:
-- stage edge build of cockroach build at a specific SHA:
roachprod stage my-cluster cockroach e90e6903fee7dd0f88e20e345c2ddfe1af1e5a97
-- Stage the most recent edge build of the workload tool:
roachprod stage my-cluster workload
-- Stage the official release binary of CockroachDB at version 2.0.5
roachprod stage my-cluster release v2.0.5
`,
Args: cobra.RangeArgs(2, 3),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
os := "linux"
if stageOS != "" {
os = stageOS
} else if c.IsLocal() {
os = runtime.GOOS
}
var debugArch, releaseArch string
switch os {
case "linux":
debugArch, releaseArch = "linux-gnu-amd64", "linux-amd64"
case "darwin":
debugArch, releaseArch = "darwin-amd64", "darwin-10.9-amd64"
case "windows":
debugArch, releaseArch = "windows-amd64", "windows-6.2-amd64"
default:
return errors.Errorf("cannot stage binary on %s", os)
}
applicationName := args[1]
versionArg := ""
if len(args) == 3 {
versionArg = args[2]
}
switch applicationName {
case "cockroach":
return install.StageRemoteBinary(
c, applicationName, "cockroach/cockroach", versionArg, debugArch,
)
case "workload":
return install.StageRemoteBinary(
c, applicationName, "cockroach/workload", versionArg, "", /* arch */
)
case "release":
return install.StageCockroachRelease(c, versionArg, releaseArch)
default:
return fmt.Errorf("unknown application %s", applicationName)
}
}),
}
var distributeCertsCmd = &cobra.Command{
Use: "distribute-certs <cluster>",
Short: "distribute certificates to the nodes in a cluster",
Long: `Distribute certificates to the nodes in a cluster.
If the certificates already exist, no action is taken. Note that this command is
invoked automatically when a secure cluster is bootstrapped by "roachprod
start."
`,
Args: cobra.ExactArgs(1),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
c.DistributeCerts()
return nil
}),
}
var putCmd = &cobra.Command{
Use: "put <cluster> <src> [<dest>]",
Short: "copy a local file to the nodes in a cluster",
Long: `Copy a local file to the nodes in a cluster.
`,
Args: cobra.RangeArgs(2, 3),
Run: wrap(func(cmd *cobra.Command, args []string) error {
src := args[1]
dest := path.Base(src)
if len(args) == 3 {
dest = args[2]
}
c, err := newCluster(args[0])
if err != nil {
return err
}
c.Put(src, dest)
return nil
}),
}
var getCmd = &cobra.Command{
Use: "get <cluster> <src> [<dest>]",
Short: "copy a remote file from the nodes in a cluster",
Long: `Copy a remote file from the nodes in a cluster. If the file is retrieved from
multiple nodes the destination file name will be prefixed with the node number.
`,
Args: cobra.RangeArgs(2, 3),
Run: wrap(func(cmd *cobra.Command, args []string) error {
src := args[1]
dest := path.Base(src)
if len(args) == 3 {
dest = args[2]
}
c, err := newCluster(args[0])
if err != nil {
return err
}
c.Get(src, dest)
return nil
}),
}
var sqlCmd = &cobra.Command{
Use: "sql <cluster> -- [args]",
Short: "run `cockroach sql` on a remote cluster",
Long: "Run `cockroach sql` on a remote cluster.\n",
Args: cobra.MinimumNArgs(1),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
cockroach, ok := c.Impl.(install.Cockroach)
if !ok {
return errors.New("sql is only valid on cockroach clusters")
}
return cockroach.SQL(c, args[1:])
}),
}
var pgurlCmd = &cobra.Command{
Use: "pgurl <cluster>",
Short: "generate pgurls for the nodes in a cluster",
Long: `Generate pgurls for the nodes in a cluster.
`,
Args: cobra.ExactArgs(1),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
nodes := c.ServerNodes()
ips := make([]string, len(nodes))
if external {
for i := 0; i < len(nodes); i++ {
ips[i] = c.VMs[nodes[i]-1]
}
} else {
c.Parallel("", len(nodes), 0, func(i int) ([]byte, error) {
var err error
ips[i], err = c.GetInternalIP(nodes[i])
return nil, err
})
}
var urls []string
for i, ip := range ips {
if ip == "" {
return errors.Errorf("empty ip: %v", ips)
}
urls = append(urls, c.Impl.NodeURL(c, ip, c.Impl.NodePort(c, nodes[i])))
}
fmt.Println(strings.Join(urls, " "))
if len(urls) != len(nodes) {
return errors.Errorf("have nodes %v, but urls %v from ips %v", nodes, urls, ips)
}
return nil
}),
}
var adminurlCmd = &cobra.Command{
Use: "adminurl <cluster>",
Aliases: []string{"admin", "adminui"},
Short: "generate admin UI URLs for the nodes in a cluster\n",
Long: `Generate admin UI URLs for the nodes in a cluster.
`,
Args: cobra.ExactArgs(1),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
for i, node := range c.ServerNodes() {
host := vm.Name(c.Name, node) + "." + gce.Subdomain
// verify DNS is working / fallback to IPs if not.
if i == 0 && !adminurlIPs {
if _, err := net.LookupHost(host); err != nil {
fmt.Fprintf(os.Stderr, "no valid DNS (yet?). might need to re-run `sync`?\n")
adminurlIPs = true
}
}
if adminurlIPs {
host = c.VMs[node-1]
}
port := install.GetAdminUIPort(c.Impl.NodePort(c, node))
scheme := "http"
if c.Secure {
scheme = "https"
}
if !strings.HasPrefix(adminurlPath, "/") {
adminurlPath = "/" + adminurlPath
}
url := fmt.Sprintf("%s://%s:%d%s", scheme, host, port, adminurlPath)
if adminurlOpen {
if err := exec.Command("python", "-m", "webbrowser", url).Run(); err != nil {
return err
}
} else {
fmt.Println(url)
}
}
return nil
}),
}
var ipCmd = &cobra.Command{
Use: "ip <cluster>",
Short: "get the IP addresses of the nodes in a cluster",
Long: `Get the IP addresses of the nodes in a cluster.
`,
Args: cobra.ExactArgs(1),
Run: wrap(func(cmd *cobra.Command, args []string) error {
c, err := newCluster(args[0])
if err != nil {
return err
}
nodes := c.ServerNodes()
ips := make([]string, len(nodes))
if external {
for i := 0; i < len(nodes); i++ {
ips[i] = c.VMs[nodes[i]-1]
}
} else {
c.Parallel("", len(nodes), 0, func(i int) ([]byte, error) {
var err error
ips[i], err = c.GetInternalIP(nodes[i])
return nil, err
})
}
for _, ip := range ips {
fmt.Println(ip)
}
return nil
}),
}
func main() {
// The commands are displayed in the order they are added to rootCmd. Note
// that gcCmd and adminurlCmd contain a trailing \n in their Short help in
// order to separate the commands into logical groups.
cobra.EnableCommandSorting = false
rootCmd.AddCommand(
createCmd,
destroyCmd,
extendCmd,
listCmd,
syncCmd,
gcCmd,
setupSSHCmd,
statusCmd,
monitorCmd,
startCmd,
stopCmd,
runCmd,
wipeCmd,
reformatCmd,
installCmd,
distributeCertsCmd,
putCmd,
getCmd,
stageCmd,
sqlCmd,
ipCmd,
pgurlCmd,
adminurlCmd,
logsCmd,
cachedHostsCmd,
)
rootCmd.BashCompletionFunction = fmt.Sprintf(`__custom_func()
{
# only complete the 2nd arg, e.g. adminurl <foo>
if ! [ $c -eq 2 ]; then
return
fi
# don't complete commands which do not accept a cluster/host arg
case ${last_command} in
%s)
return
;;
esac
local hosts_out
if hosts_out=$(roachprod cached-hosts --cluster="${cur}" 2>/dev/null); then
COMPREPLY=( $( compgen -W "${hosts_out[*]}" -- "$cur" ) )
fi
}`,
strings.Join(func(cmds ...*cobra.Command) (s []string) {
for _, cmd := range cmds {
s = append(s, fmt.Sprintf("%s_%s", rootCmd.Name(), cmd.Name()))
}
return s
}(createCmd, listCmd, syncCmd, gcCmd), " | "),
)
rootCmd.PersistentFlags().BoolVarP(
&quiet, "quiet", "q", false, "disable fancy progress output")
rootCmd.PersistentFlags().IntVarP(
&maxConcurrency, "max-concurrency", "", 32,
"maximum number of operations to execute on nodes concurrently, set to zero for infinite")
for _, cmd := range []*cobra.Command{createCmd, destroyCmd, extendCmd, logsCmd} {
cmd.Flags().StringVarP(&username, "username", "u", os.Getenv("ROACHPROD_USER"),
"Username to run under, detect if blank")
}
for _, cmd := range []*cobra.Command{statusCmd, monitorCmd, startCmd,
stopCmd, runCmd, wipeCmd, reformatCmd, installCmd, putCmd, getCmd,
sqlCmd, pgurlCmd, adminurlCmd, ipCmd,
} {
cmd.Flags().BoolVar(
&ssh.InsecureIgnoreHostKey, "insecure-ignore-host-key", true, "don't check ssh host keys")
}
createCmd.Flags().DurationVarP(&createVMOpts.Lifetime,
"lifetime", "l", 12*time.Hour, "Lifetime of the cluster")
createCmd.Flags().BoolVar(&createVMOpts.SSDOpts.UseLocalSSD,
"local-ssd", true, "Use local SSD")
createCmd.Flags().BoolVar(&createVMOpts.SSDOpts.NoExt4Barrier,
"local-ssd-no-ext4-barrier", true,
`Mount the local SSD with the "-o nobarrier" flag. `+
`Ignored if --local-ssd=false is specified.`)
createCmd.Flags().IntVarP(&numNodes,
"nodes", "n", 4, "Total number of nodes, distributed across all clouds")
createCmd.Flags().StringSliceVarP(&createVMOpts.VMProviders,
"clouds", "c", []string{gce.ProviderName},
fmt.Sprintf("The cloud provider(s) to use when creating new vm instances: %s", vm.AllProviderNames()))
createCmd.Flags().BoolVar(&createVMOpts.GeoDistributed,
"geo", false, "Create geo-distributed cluster")
// Allow each Provider to inject additional configuration flags
for _, p := range vm.Providers {
p.Flags().ConfigureCreateFlags(createCmd.Flags())
for _, cmd := range []*cobra.Command{
destroyCmd, extendCmd, listCmd, syncCmd, gcCmd,
} {
p.Flags().ConfigureClusterFlags(cmd.Flags(), vm.AcceptMultipleProjects)
}
// createCmd only accepts a single GCE project, as opposed to all the other
// commands.
p.Flags().ConfigureClusterFlags(createCmd.Flags(), vm.SingleProject)
}
destroyCmd.Flags().BoolVarP(&destroyAllMine,
"all-mine", "m", false, "Destroy all clusters belonging to the current user")
extendCmd.Flags().DurationVarP(&extendLifetime,
"lifetime", "l", 12*time.Hour, "Lifetime of the cluster")
listCmd.Flags().BoolVarP(&listDetails,
"details", "d", false, "Show cluster details")
listCmd.Flags().BoolVar(&listJSON,
"json", false, "Show cluster specs in a json format")
listCmd.Flags().BoolVarP(&listMine,
"mine", "m", false, "Show only clusters belonging to the current user")
adminurlCmd.Flags().BoolVar(
&adminurlOpen, `open`, false, `Open the url in a browser`)
adminurlCmd.Flags().StringVar(
&adminurlPath, `path`, "/", `Path to add to URL (e.g. to open a same page on each node)`)
adminurlCmd.Flags().BoolVar(
&adminurlIPs, `ips`, false, `Use Public IPs instead of DNS names in URL`)
gcCmd.Flags().BoolVarP(
&dryrun, "dry-run", "n", dryrun, "dry run (don't perform any actions)")
gcCmd.Flags().StringVar(&config.SlackToken, "slack-token", "", "Slack bot token")
pgurlCmd.Flags().BoolVar(
&external, "external", false, "return pgurls for external connections")
ipCmd.Flags().BoolVar(
&external, "external", false, "return external IP addresses")
runCmd.Flags().BoolVar(
&secure, "secure", false, "use a secure cluster")
startCmd.Flags().IntVarP(&numRacks,
"racks", "r", 0, "the number of racks to partition the nodes into")
stopCmd.Flags().IntVar(&sig, "sig", sig, "signal to pass to kill")
stopCmd.Flags().BoolVar(&waitFlag, "wait", waitFlag, "wait for processes to exit")
wipeCmd.Flags().BoolVar(&wipePreserveCerts, "preserve-certs", false, "do not wipe certificates")
for _, cmd := range []*cobra.Command{
startCmd, statusCmd, stopCmd, runCmd,
} {
cmd.Flags().StringVar(
&tag, "tag", "", "the process tag")
}
for _, cmd := range []*cobra.Command{
startCmd, putCmd, getCmd,
} {
cmd.Flags().BoolVar(new(bool), "scp", false, "DEPRECATED")
_ = cmd.Flags().MarkDeprecated("scp", "always true")
}
putCmd.Flags().BoolVar(&useTreeDist, "treedist", useTreeDist, "use treedist copy algorithm")
stageCmd.Flags().StringVar(&stageOS, "os", "", "operating system override for staged binaries")
logsCmd.Flags().StringVar(
&logsFilter, "filter", "", "re to filter log messages")
logsCmd.Flags().Var(
flagutil.Time(&logsFrom), "from", "time from which to stream logs")
logsCmd.Flags().Var(
flagutil.Time(&logsTo), "to", "time to which to stream logs")
logsCmd.Flags().DurationVar(
&logsInterval, "interval", 200*time.Millisecond, "interval to poll logs from host")
logsCmd.Flags().StringVar(
&logsDir, "logs-dir", "logs", "path to the logs dir, if remote, relative to username's home dir, ignored if local")
logsCmd.Flags().StringVar(
&logsProgramFilter, "logs-program", "^cockroach$", "regular expression of the name of program in log files to search")
monitorCmd.Flags().BoolVar(
&monitorIgnoreEmptyNodes,
"ignore-empty-nodes",
false,
"Automatically detect the (subset of the given) nodes which to monitor "+
"based on the presence of a nontrivial data directory.")
monitorCmd.Flags().BoolVar(
&monitorOneShot,
"oneshot",
false,
"Report the status of all targeted nodes once, then exit. The exit "+
"status is nonzero if (and only if) any node was found not running.")
cachedHostsCmd.Flags().StringVar(&cachedHostsCluster, "cluster", "", "print hosts matching cluster")
for _, cmd := range []*cobra.Command{
getCmd, putCmd, runCmd, startCmd, statusCmd, stopCmd,
wipeCmd, pgurlCmd, adminurlCmd, sqlCmd, installCmd,
} {
switch cmd {
case startCmd:
cmd.Flags().BoolVar(
&install.StartOpts.Sequential, "sequential", true,
"start nodes sequentially so node IDs match hostnames")
cmd.Flags().StringArrayVarP(
&nodeArgs, "args", "a", nil, "node arguments")
cmd.Flags().StringVarP(
&nodeEnv, "env", "e", nodeEnv, "node environment variables")
cmd.Flags().StringVarP(
&clusterType, "type", "t", clusterType, `cluster type ("cockroach" or "cassandra")`)
cmd.Flags().BoolVar(
&install.StartOpts.Encrypt, "encrypt", encrypt, "start nodes with encryption at rest turned on")
fallthrough
case sqlCmd:
cmd.Flags().StringVarP(
&config.Binary, "binary", "b", config.Binary,
"the remote cockroach binary to use")
fallthrough
case pgurlCmd, adminurlCmd:
cmd.Flags().BoolVar(
&secure, "secure", false, "use a secure cluster")
}
if cmd.Long == "" {
cmd.Long = cmd.Short
}
cmd.Long += fmt.Sprintf(`
Node specification
By default the operation is performed on all nodes in <cluster>. A subset of
nodes can be specified by appending :<nodes> to the cluster name. The syntax
of <nodes> is a comma separated list of specific node IDs or range of
IDs. For example:
roachprod %[1]s marc-test:1-3,8-9
will perform %[1]s on:
marc-test-1
marc-test-2
marc-test-3
marc-test-8
marc-test-9
`, cmd.Name())
}
var err error
config.OSUser, err = user.Current()
if err != nil {
fmt.Fprintf(os.Stderr, "unable to lookup current user: %s\n", err)
os.Exit(1)
}
if err := initDirs(); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
if err := loadClusters(); err != nil {
// We don't want to exit as we may be looking at the help message.
fmt.Printf("problem loading clusters: %s\n", err)
}
if err := rootCmd.Execute(); err != nil {
// Cobra has already printed the error message.
os.Exit(1)
}
}
| [
"\"ROACHPROD_USER\""
]
| []
| [
"ROACHPROD_USER"
]
| [] | ["ROACHPROD_USER"] | go | 1 | 0 | |
clients/google-api-services-sqladmin/v1beta4/1.31.0/com/google/api/services/sqladmin/SQLAdmin.java | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.sqladmin;
/**
* Service definition for SQLAdmin (v1beta4).
*
* <p>
* API for Cloud SQL database instance management
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://developers.google.com/cloud-sql/" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link SQLAdminRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class SQLAdmin extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 ||
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 &&
com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)),
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.31.1 of google-api-client to run version " +
"1.32.1 of the Cloud SQL Admin API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://sqladmin.googleapis.com/";
/**
* The default encoded mTLS root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.31
*/
public static final String DEFAULT_MTLS_ROOT_URL = "https://sqladmin.mtls.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public SQLAdmin(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
SQLAdmin(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the BackupRuns collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code SQLAdmin sqladmin = new SQLAdmin(...);}
* {@code SQLAdmin.BackupRuns.List request = sqladmin.backupRuns().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public BackupRuns backupRuns() {
return new BackupRuns();
}
/**
* The "backupRuns" collection of methods.
*/
public class BackupRuns {
/**
* Deletes the backup taken by a backup run.
*
* Create a request for the method "backupRuns.delete".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param id The ID of the Backup Run to delete. To find a Backup Run ID, use the list method.
* @return the request
*/
public Delete delete(java.lang.String project, java.lang.String instance, java.lang.Long id) throws java.io.IOException {
Delete result = new Delete(project, instance, id);
initialize(result);
return result;
}
public class Delete extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/backupRuns/{id}";
/**
* Deletes the backup taken by a backup run.
*
* Create a request for the method "backupRuns.delete".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param id The ID of the Backup Run to delete. To find a Backup Run ID, use the list method.
* @since 1.13
*/
protected Delete(java.lang.String project, java.lang.String instance, java.lang.Long id) {
super(SQLAdmin.this, "DELETE", REST_PATH, null, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
this.id = com.google.api.client.util.Preconditions.checkNotNull(id, "Required parameter id must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Delete setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public Delete setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** The ID of the Backup Run to delete. To find a Backup Run ID, use the list method. */
@com.google.api.client.util.Key
private java.lang.Long id;
/** The ID of the Backup Run to delete. To find a Backup Run ID, use the list method.
*/
public java.lang.Long getId() {
return id;
}
/** The ID of the Backup Run to delete. To find a Backup Run ID, use the list method. */
public Delete setId(java.lang.Long id) {
this.id = id;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Retrieves a resource containing information about a backup run.
*
* Create a request for the method "backupRuns.get".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param id The ID of this Backup Run.
* @return the request
*/
public Get get(java.lang.String project, java.lang.String instance, java.lang.Long id) throws java.io.IOException {
Get result = new Get(project, instance, id);
initialize(result);
return result;
}
public class Get extends SQLAdminRequest<com.google.api.services.sqladmin.model.BackupRun> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/backupRuns/{id}";
/**
* Retrieves a resource containing information about a backup run.
*
* Create a request for the method "backupRuns.get".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param id The ID of this Backup Run.
* @since 1.13
*/
protected Get(java.lang.String project, java.lang.String instance, java.lang.Long id) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.BackupRun.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
this.id = com.google.api.client.util.Preconditions.checkNotNull(id, "Required parameter id must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Get setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public Get setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** The ID of this Backup Run. */
@com.google.api.client.util.Key
private java.lang.Long id;
/** The ID of this Backup Run.
*/
public java.lang.Long getId() {
return id;
}
/** The ID of this Backup Run. */
public Get setId(java.lang.Long id) {
this.id = id;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Creates a new backup run on demand. This method is applicable only to Second Generation
* instances.
*
* Create a request for the method "backupRuns.insert".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Insert#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.BackupRun}
* @return the request
*/
public Insert insert(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.BackupRun content) throws java.io.IOException {
Insert result = new Insert(project, instance, content);
initialize(result);
return result;
}
public class Insert extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/backupRuns";
/**
* Creates a new backup run on demand. This method is applicable only to Second Generation
* instances.
*
* Create a request for the method "backupRuns.insert".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Insert#execute()} method to invoke the remote operation.
* <p> {@link
* Insert#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.BackupRun}
* @since 1.13
*/
protected Insert(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.BackupRun content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public Insert set$Xgafv(java.lang.String $Xgafv) {
return (Insert) super.set$Xgafv($Xgafv);
}
@Override
public Insert setAccessToken(java.lang.String accessToken) {
return (Insert) super.setAccessToken(accessToken);
}
@Override
public Insert setAlt(java.lang.String alt) {
return (Insert) super.setAlt(alt);
}
@Override
public Insert setCallback(java.lang.String callback) {
return (Insert) super.setCallback(callback);
}
@Override
public Insert setFields(java.lang.String fields) {
return (Insert) super.setFields(fields);
}
@Override
public Insert setKey(java.lang.String key) {
return (Insert) super.setKey(key);
}
@Override
public Insert setOauthToken(java.lang.String oauthToken) {
return (Insert) super.setOauthToken(oauthToken);
}
@Override
public Insert setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Insert) super.setPrettyPrint(prettyPrint);
}
@Override
public Insert setQuotaUser(java.lang.String quotaUser) {
return (Insert) super.setQuotaUser(quotaUser);
}
@Override
public Insert setUploadType(java.lang.String uploadType) {
return (Insert) super.setUploadType(uploadType);
}
@Override
public Insert setUploadProtocol(java.lang.String uploadProtocol) {
return (Insert) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Insert setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public Insert setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public Insert set(String parameterName, Object value) {
return (Insert) super.set(parameterName, value);
}
}
/**
* Lists all backup runs associated with the project or a given instance and configuration in the
* reverse chronological order of the backup initiation time.
*
* Create a request for the method "backupRuns.list".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID, or "-" for all instances. This does not include the project ID.
* @return the request
*/
public List list(java.lang.String project, java.lang.String instance) throws java.io.IOException {
List result = new List(project, instance);
initialize(result);
return result;
}
public class List extends SQLAdminRequest<com.google.api.services.sqladmin.model.BackupRunsListResponse> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/backupRuns";
/**
* Lists all backup runs associated with the project or a given instance and configuration in the
* reverse chronological order of the backup initiation time.
*
* Create a request for the method "backupRuns.list".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID, or "-" for all instances. This does not include the project ID.
* @since 1.13
*/
protected List(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.BackupRunsListResponse.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public List setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID, or "-" for all instances. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID, or "-" for all instances. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID, or "-" for all instances. This does not include the project ID. */
public List setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** Maximum number of backup runs per response. */
@com.google.api.client.util.Key
private java.lang.Integer maxResults;
/** Maximum number of backup runs per response.
*/
public java.lang.Integer getMaxResults() {
return maxResults;
}
/** Maximum number of backup runs per response. */
public List setMaxResults(java.lang.Integer maxResults) {
this.maxResults = maxResults;
return this;
}
/**
* A previously-returned page token representing part of the larger set of results to view.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A previously-returned page token representing part of the larger set of results to view.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A previously-returned page token representing part of the larger set of results to view.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Connect collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code SQLAdmin sqladmin = new SQLAdmin(...);}
* {@code SQLAdmin.Connect.List request = sqladmin.connect().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Connect connect() {
return new Connect();
}
/**
* The "connect" collection of methods.
*/
public class Connect {
/**
* Generates a short-lived X509 certificate containing the provided public key and signed by a
* private key specific to the target instance. Users may use the certificate to authenticate as
* themselves when connecting to the database.
*
* Create a request for the method "connect.generateEphemeralCert".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link GenerateEphemeralCert#execute()} method to invoke the remote
* operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.GenerateEphemeralCertRequest}
* @return the request
*/
public GenerateEphemeralCert generateEphemeralCert(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.GenerateEphemeralCertRequest content) throws java.io.IOException {
GenerateEphemeralCert result = new GenerateEphemeralCert(project, instance, content);
initialize(result);
return result;
}
public class GenerateEphemeralCert extends SQLAdminRequest<com.google.api.services.sqladmin.model.GenerateEphemeralCertResponse> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}:generateEphemeralCert";
/**
* Generates a short-lived X509 certificate containing the provided public key and signed by a
* private key specific to the target instance. Users may use the certificate to authenticate as
* themselves when connecting to the database.
*
* Create a request for the method "connect.generateEphemeralCert".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link GenerateEphemeralCert#execute()} method to invoke the
* remote operation. <p> {@link GenerateEphemeralCert#initialize(com.google.api.client.googleapis.
* services.AbstractGoogleClientRequest)} must be called to initialize this instance immediately
* after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.GenerateEphemeralCertRequest}
* @since 1.13
*/
protected GenerateEphemeralCert(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.GenerateEphemeralCertRequest content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.GenerateEphemeralCertResponse.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public GenerateEphemeralCert set$Xgafv(java.lang.String $Xgafv) {
return (GenerateEphemeralCert) super.set$Xgafv($Xgafv);
}
@Override
public GenerateEphemeralCert setAccessToken(java.lang.String accessToken) {
return (GenerateEphemeralCert) super.setAccessToken(accessToken);
}
@Override
public GenerateEphemeralCert setAlt(java.lang.String alt) {
return (GenerateEphemeralCert) super.setAlt(alt);
}
@Override
public GenerateEphemeralCert setCallback(java.lang.String callback) {
return (GenerateEphemeralCert) super.setCallback(callback);
}
@Override
public GenerateEphemeralCert setFields(java.lang.String fields) {
return (GenerateEphemeralCert) super.setFields(fields);
}
@Override
public GenerateEphemeralCert setKey(java.lang.String key) {
return (GenerateEphemeralCert) super.setKey(key);
}
@Override
public GenerateEphemeralCert setOauthToken(java.lang.String oauthToken) {
return (GenerateEphemeralCert) super.setOauthToken(oauthToken);
}
@Override
public GenerateEphemeralCert setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GenerateEphemeralCert) super.setPrettyPrint(prettyPrint);
}
@Override
public GenerateEphemeralCert setQuotaUser(java.lang.String quotaUser) {
return (GenerateEphemeralCert) super.setQuotaUser(quotaUser);
}
@Override
public GenerateEphemeralCert setUploadType(java.lang.String uploadType) {
return (GenerateEphemeralCert) super.setUploadType(uploadType);
}
@Override
public GenerateEphemeralCert setUploadProtocol(java.lang.String uploadProtocol) {
return (GenerateEphemeralCert) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public GenerateEphemeralCert setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public GenerateEphemeralCert setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public GenerateEphemeralCert set(String parameterName, Object value) {
return (GenerateEphemeralCert) super.set(parameterName, value);
}
}
/**
* Retrieves connect settings about a Cloud SQL instance.
*
* Create a request for the method "connect.get".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @return the request
*/
public Get get(java.lang.String project, java.lang.String instance) throws java.io.IOException {
Get result = new Get(project, instance);
initialize(result);
return result;
}
public class Get extends SQLAdminRequest<com.google.api.services.sqladmin.model.ConnectSettings> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/connectSettings";
/**
* Retrieves connect settings about a Cloud SQL instance.
*
* Create a request for the method "connect.get".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @since 1.13
*/
protected Get(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.ConnectSettings.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Get setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public Get setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** Optional. Optional snapshot read timestamp to trade freshness for performance. */
@com.google.api.client.util.Key
private String readTime;
/** Optional. Optional snapshot read timestamp to trade freshness for performance.
*/
public String getReadTime() {
return readTime;
}
/** Optional. Optional snapshot read timestamp to trade freshness for performance. */
public Get setReadTime(String readTime) {
this.readTime = readTime;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Databases collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code SQLAdmin sqladmin = new SQLAdmin(...);}
* {@code SQLAdmin.Databases.List request = sqladmin.databases().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Databases databases() {
return new Databases();
}
/**
* The "databases" collection of methods.
*/
public class Databases {
/**
* Deletes a database from a Cloud SQL instance.
*
* Create a request for the method "databases.delete".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param database Name of the database to be deleted in the instance.
* @return the request
*/
public Delete delete(java.lang.String project, java.lang.String instance, java.lang.String database) throws java.io.IOException {
Delete result = new Delete(project, instance, database);
initialize(result);
return result;
}
public class Delete extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/databases/{database}";
/**
* Deletes a database from a Cloud SQL instance.
*
* Create a request for the method "databases.delete".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param database Name of the database to be deleted in the instance.
* @since 1.13
*/
protected Delete(java.lang.String project, java.lang.String instance, java.lang.String database) {
super(SQLAdmin.this, "DELETE", REST_PATH, null, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
this.database = com.google.api.client.util.Preconditions.checkNotNull(database, "Required parameter database must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Delete setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Database instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Database instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Database instance ID. This does not include the project ID. */
public Delete setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** Name of the database to be deleted in the instance. */
@com.google.api.client.util.Key
private java.lang.String database;
/** Name of the database to be deleted in the instance.
*/
public java.lang.String getDatabase() {
return database;
}
/** Name of the database to be deleted in the instance. */
public Delete setDatabase(java.lang.String database) {
this.database = database;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Retrieves a resource containing information about a database inside a Cloud SQL instance.
*
* Create a request for the method "databases.get".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param database Name of the database in the instance.
* @return the request
*/
public Get get(java.lang.String project, java.lang.String instance, java.lang.String database) throws java.io.IOException {
Get result = new Get(project, instance, database);
initialize(result);
return result;
}
public class Get extends SQLAdminRequest<com.google.api.services.sqladmin.model.Database> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/databases/{database}";
/**
* Retrieves a resource containing information about a database inside a Cloud SQL instance.
*
* Create a request for the method "databases.get".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param database Name of the database in the instance.
* @since 1.13
*/
protected Get(java.lang.String project, java.lang.String instance, java.lang.String database) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.Database.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
this.database = com.google.api.client.util.Preconditions.checkNotNull(database, "Required parameter database must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Get setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Database instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Database instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Database instance ID. This does not include the project ID. */
public Get setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** Name of the database in the instance. */
@com.google.api.client.util.Key
private java.lang.String database;
/** Name of the database in the instance.
*/
public java.lang.String getDatabase() {
return database;
}
/** Name of the database in the instance. */
public Get setDatabase(java.lang.String database) {
this.database = database;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Inserts a resource containing information about a database inside a Cloud SQL instance.
*
* Create a request for the method "databases.insert".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Insert#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.Database}
* @return the request
*/
public Insert insert(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.Database content) throws java.io.IOException {
Insert result = new Insert(project, instance, content);
initialize(result);
return result;
}
public class Insert extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/databases";
/**
* Inserts a resource containing information about a database inside a Cloud SQL instance.
*
* Create a request for the method "databases.insert".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Insert#execute()} method to invoke the remote operation.
* <p> {@link
* Insert#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.Database}
* @since 1.13
*/
protected Insert(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.Database content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public Insert set$Xgafv(java.lang.String $Xgafv) {
return (Insert) super.set$Xgafv($Xgafv);
}
@Override
public Insert setAccessToken(java.lang.String accessToken) {
return (Insert) super.setAccessToken(accessToken);
}
@Override
public Insert setAlt(java.lang.String alt) {
return (Insert) super.setAlt(alt);
}
@Override
public Insert setCallback(java.lang.String callback) {
return (Insert) super.setCallback(callback);
}
@Override
public Insert setFields(java.lang.String fields) {
return (Insert) super.setFields(fields);
}
@Override
public Insert setKey(java.lang.String key) {
return (Insert) super.setKey(key);
}
@Override
public Insert setOauthToken(java.lang.String oauthToken) {
return (Insert) super.setOauthToken(oauthToken);
}
@Override
public Insert setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Insert) super.setPrettyPrint(prettyPrint);
}
@Override
public Insert setQuotaUser(java.lang.String quotaUser) {
return (Insert) super.setQuotaUser(quotaUser);
}
@Override
public Insert setUploadType(java.lang.String uploadType) {
return (Insert) super.setUploadType(uploadType);
}
@Override
public Insert setUploadProtocol(java.lang.String uploadProtocol) {
return (Insert) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Insert setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Database instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Database instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Database instance ID. This does not include the project ID. */
public Insert setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public Insert set(String parameterName, Object value) {
return (Insert) super.set(parameterName, value);
}
}
/**
* Lists databases in the specified Cloud SQL instance.
*
* Create a request for the method "databases.list".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @return the request
*/
public List list(java.lang.String project, java.lang.String instance) throws java.io.IOException {
List result = new List(project, instance);
initialize(result);
return result;
}
public class List extends SQLAdminRequest<com.google.api.services.sqladmin.model.DatabasesListResponse> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/databases";
/**
* Lists databases in the specified Cloud SQL instance.
*
* Create a request for the method "databases.list".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @since 1.13
*/
protected List(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.DatabasesListResponse.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public List setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public List setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Partially updates a resource containing information about a database inside a Cloud SQL instance.
* This method supports patch semantics.
*
* Create a request for the method "databases.patch".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param database Name of the database to be updated in the instance.
* @param content the {@link com.google.api.services.sqladmin.model.Database}
* @return the request
*/
public Patch patch(java.lang.String project, java.lang.String instance, java.lang.String database, com.google.api.services.sqladmin.model.Database content) throws java.io.IOException {
Patch result = new Patch(project, instance, database, content);
initialize(result);
return result;
}
public class Patch extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/databases/{database}";
/**
* Partially updates a resource containing information about a database inside a Cloud SQL
* instance. This method supports patch semantics.
*
* Create a request for the method "databases.patch".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param database Name of the database to be updated in the instance.
* @param content the {@link com.google.api.services.sqladmin.model.Database}
* @since 1.13
*/
protected Patch(java.lang.String project, java.lang.String instance, java.lang.String database, com.google.api.services.sqladmin.model.Database content) {
super(SQLAdmin.this, "PATCH", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
this.database = com.google.api.client.util.Preconditions.checkNotNull(database, "Required parameter database must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Patch setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Database instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Database instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Database instance ID. This does not include the project ID. */
public Patch setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** Name of the database to be updated in the instance. */
@com.google.api.client.util.Key
private java.lang.String database;
/** Name of the database to be updated in the instance.
*/
public java.lang.String getDatabase() {
return database;
}
/** Name of the database to be updated in the instance. */
public Patch setDatabase(java.lang.String database) {
this.database = database;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
/**
* Updates a resource containing information about a database inside a Cloud SQL instance.
*
* Create a request for the method "databases.update".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Update#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param database Name of the database to be updated in the instance.
* @param content the {@link com.google.api.services.sqladmin.model.Database}
* @return the request
*/
public Update update(java.lang.String project, java.lang.String instance, java.lang.String database, com.google.api.services.sqladmin.model.Database content) throws java.io.IOException {
Update result = new Update(project, instance, database, content);
initialize(result);
return result;
}
public class Update extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/databases/{database}";
/**
* Updates a resource containing information about a database inside a Cloud SQL instance.
*
* Create a request for the method "databases.update".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Update#execute()} method to invoke the remote operation.
* <p> {@link
* Update#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param database Name of the database to be updated in the instance.
* @param content the {@link com.google.api.services.sqladmin.model.Database}
* @since 1.13
*/
protected Update(java.lang.String project, java.lang.String instance, java.lang.String database, com.google.api.services.sqladmin.model.Database content) {
super(SQLAdmin.this, "PUT", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
this.database = com.google.api.client.util.Preconditions.checkNotNull(database, "Required parameter database must be specified.");
}
@Override
public Update set$Xgafv(java.lang.String $Xgafv) {
return (Update) super.set$Xgafv($Xgafv);
}
@Override
public Update setAccessToken(java.lang.String accessToken) {
return (Update) super.setAccessToken(accessToken);
}
@Override
public Update setAlt(java.lang.String alt) {
return (Update) super.setAlt(alt);
}
@Override
public Update setCallback(java.lang.String callback) {
return (Update) super.setCallback(callback);
}
@Override
public Update setFields(java.lang.String fields) {
return (Update) super.setFields(fields);
}
@Override
public Update setKey(java.lang.String key) {
return (Update) super.setKey(key);
}
@Override
public Update setOauthToken(java.lang.String oauthToken) {
return (Update) super.setOauthToken(oauthToken);
}
@Override
public Update setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Update) super.setPrettyPrint(prettyPrint);
}
@Override
public Update setQuotaUser(java.lang.String quotaUser) {
return (Update) super.setQuotaUser(quotaUser);
}
@Override
public Update setUploadType(java.lang.String uploadType) {
return (Update) super.setUploadType(uploadType);
}
@Override
public Update setUploadProtocol(java.lang.String uploadProtocol) {
return (Update) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Update setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Database instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Database instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Database instance ID. This does not include the project ID. */
public Update setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** Name of the database to be updated in the instance. */
@com.google.api.client.util.Key
private java.lang.String database;
/** Name of the database to be updated in the instance.
*/
public java.lang.String getDatabase() {
return database;
}
/** Name of the database to be updated in the instance. */
public Update setDatabase(java.lang.String database) {
this.database = database;
return this;
}
@Override
public Update set(String parameterName, Object value) {
return (Update) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Flags collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code SQLAdmin sqladmin = new SQLAdmin(...);}
* {@code SQLAdmin.Flags.List request = sqladmin.flags().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Flags flags() {
return new Flags();
}
/**
* The "flags" collection of methods.
*/
public class Flags {
/**
* List all available database flags for Cloud SQL instances.
*
* Create a request for the method "flags.list".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends SQLAdminRequest<com.google.api.services.sqladmin.model.FlagsListResponse> {
private static final String REST_PATH = "sql/v1beta4/flags";
/**
* List all available database flags for Cloud SQL instances.
*
* Create a request for the method "flags.list".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.FlagsListResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* Database type and version you want to retrieve flags for. By default, this method returns
* flags for all database types and versions.
*/
@com.google.api.client.util.Key
private java.lang.String databaseVersion;
/** Database type and version you want to retrieve flags for. By default, this method returns flags for
all database types and versions.
*/
public java.lang.String getDatabaseVersion() {
return databaseVersion;
}
/**
* Database type and version you want to retrieve flags for. By default, this method returns
* flags for all database types and versions.
*/
public List setDatabaseVersion(java.lang.String databaseVersion) {
this.databaseVersion = databaseVersion;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Instances collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code SQLAdmin sqladmin = new SQLAdmin(...);}
* {@code SQLAdmin.Instances.List request = sqladmin.instances().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Instances instances() {
return new Instances();
}
/**
* The "instances" collection of methods.
*/
public class Instances {
/**
* Add a new trusted Certificate Authority (CA) version for the specified instance. Required to
* prepare for a certificate rotation. If a CA version was previously added but never used in a
* certificate rotation, this operation replaces that version. There cannot be more than one CA
* version waiting to be rotated in.
*
* Create a request for the method "instances.addServerCa".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link AddServerCa#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @return the request
*/
public AddServerCa addServerCa(java.lang.String project, java.lang.String instance) throws java.io.IOException {
AddServerCa result = new AddServerCa(project, instance);
initialize(result);
return result;
}
public class AddServerCa extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/addServerCa";
/**
* Add a new trusted Certificate Authority (CA) version for the specified instance. Required to
* prepare for a certificate rotation. If a CA version was previously added but never used in a
* certificate rotation, this operation replaces that version. There cannot be more than one CA
* version waiting to be rotated in.
*
* Create a request for the method "instances.addServerCa".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link AddServerCa#execute()} method to invoke the remote
* operation. <p> {@link
* AddServerCa#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @since 1.13
*/
protected AddServerCa(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "POST", REST_PATH, null, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public AddServerCa set$Xgafv(java.lang.String $Xgafv) {
return (AddServerCa) super.set$Xgafv($Xgafv);
}
@Override
public AddServerCa setAccessToken(java.lang.String accessToken) {
return (AddServerCa) super.setAccessToken(accessToken);
}
@Override
public AddServerCa setAlt(java.lang.String alt) {
return (AddServerCa) super.setAlt(alt);
}
@Override
public AddServerCa setCallback(java.lang.String callback) {
return (AddServerCa) super.setCallback(callback);
}
@Override
public AddServerCa setFields(java.lang.String fields) {
return (AddServerCa) super.setFields(fields);
}
@Override
public AddServerCa setKey(java.lang.String key) {
return (AddServerCa) super.setKey(key);
}
@Override
public AddServerCa setOauthToken(java.lang.String oauthToken) {
return (AddServerCa) super.setOauthToken(oauthToken);
}
@Override
public AddServerCa setPrettyPrint(java.lang.Boolean prettyPrint) {
return (AddServerCa) super.setPrettyPrint(prettyPrint);
}
@Override
public AddServerCa setQuotaUser(java.lang.String quotaUser) {
return (AddServerCa) super.setQuotaUser(quotaUser);
}
@Override
public AddServerCa setUploadType(java.lang.String uploadType) {
return (AddServerCa) super.setUploadType(uploadType);
}
@Override
public AddServerCa setUploadProtocol(java.lang.String uploadProtocol) {
return (AddServerCa) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public AddServerCa setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public AddServerCa setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public AddServerCa set(String parameterName, Object value) {
return (AddServerCa) super.set(parameterName, value);
}
}
/**
* Creates a Cloud SQL instance as a clone of the source instance. Using this operation might cause
* your instance to restart.
*
* Create a request for the method "instances.clone".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Clone#execute()} method to invoke the remote operation.
*
* @param project Project ID of the source as well as the clone Cloud SQL instance.
* @param instance The ID of the Cloud SQL instance to be cloned (source). This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesCloneRequest}
* @return the request
*/
public Clone clone(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesCloneRequest content) throws java.io.IOException {
Clone result = new Clone(project, instance, content);
initialize(result);
return result;
}
public class Clone extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/clone";
/**
* Creates a Cloud SQL instance as a clone of the source instance. Using this operation might
* cause your instance to restart.
*
* Create a request for the method "instances.clone".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Clone#execute()} method to invoke the remote operation.
* <p> {@link
* Clone#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the source as well as the clone Cloud SQL instance.
* @param instance The ID of the Cloud SQL instance to be cloned (source). This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesCloneRequest}
* @since 1.13
*/
protected Clone(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesCloneRequest content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public Clone set$Xgafv(java.lang.String $Xgafv) {
return (Clone) super.set$Xgafv($Xgafv);
}
@Override
public Clone setAccessToken(java.lang.String accessToken) {
return (Clone) super.setAccessToken(accessToken);
}
@Override
public Clone setAlt(java.lang.String alt) {
return (Clone) super.setAlt(alt);
}
@Override
public Clone setCallback(java.lang.String callback) {
return (Clone) super.setCallback(callback);
}
@Override
public Clone setFields(java.lang.String fields) {
return (Clone) super.setFields(fields);
}
@Override
public Clone setKey(java.lang.String key) {
return (Clone) super.setKey(key);
}
@Override
public Clone setOauthToken(java.lang.String oauthToken) {
return (Clone) super.setOauthToken(oauthToken);
}
@Override
public Clone setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Clone) super.setPrettyPrint(prettyPrint);
}
@Override
public Clone setQuotaUser(java.lang.String quotaUser) {
return (Clone) super.setQuotaUser(quotaUser);
}
@Override
public Clone setUploadType(java.lang.String uploadType) {
return (Clone) super.setUploadType(uploadType);
}
@Override
public Clone setUploadProtocol(java.lang.String uploadProtocol) {
return (Clone) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the source as well as the clone Cloud SQL instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the source as well as the clone Cloud SQL instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the source as well as the clone Cloud SQL instance. */
public Clone setProject(java.lang.String project) {
this.project = project;
return this;
}
/**
* The ID of the Cloud SQL instance to be cloned (source). This does not include the project
* ID.
*/
@com.google.api.client.util.Key
private java.lang.String instance;
/** The ID of the Cloud SQL instance to be cloned (source). This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/**
* The ID of the Cloud SQL instance to be cloned (source). This does not include the project
* ID.
*/
public Clone setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public Clone set(String parameterName, Object value) {
return (Clone) super.set(parameterName, value);
}
}
/**
* Deletes a Cloud SQL instance.
*
* Create a request for the method "instances.delete".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance to be deleted.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @return the request
*/
public Delete delete(java.lang.String project, java.lang.String instance) throws java.io.IOException {
Delete result = new Delete(project, instance);
initialize(result);
return result;
}
public class Delete extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}";
/**
* Deletes a Cloud SQL instance.
*
* Create a request for the method "instances.delete".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance to be deleted.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @since 1.13
*/
protected Delete(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "DELETE", REST_PATH, null, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance to be deleted. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance to be deleted.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance to be deleted. */
public Delete setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public Delete setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Demotes the stand-alone instance to be a Cloud SQL read replica for an external database server.
*
* Create a request for the method "instances.demoteMaster".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link DemoteMaster#execute()} method to invoke the remote operation.
*
* @param project ID of the project that contains the instance.
* @param instance Cloud SQL instance name.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesDemoteMasterRequest}
* @return the request
*/
public DemoteMaster demoteMaster(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesDemoteMasterRequest content) throws java.io.IOException {
DemoteMaster result = new DemoteMaster(project, instance, content);
initialize(result);
return result;
}
public class DemoteMaster extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/demoteMaster";
/**
* Demotes the stand-alone instance to be a Cloud SQL read replica for an external database
* server.
*
* Create a request for the method "instances.demoteMaster".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link DemoteMaster#execute()} method to invoke the remote
* operation. <p> {@link
* DemoteMaster#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project ID of the project that contains the instance.
* @param instance Cloud SQL instance name.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesDemoteMasterRequest}
* @since 1.13
*/
protected DemoteMaster(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesDemoteMasterRequest content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public DemoteMaster set$Xgafv(java.lang.String $Xgafv) {
return (DemoteMaster) super.set$Xgafv($Xgafv);
}
@Override
public DemoteMaster setAccessToken(java.lang.String accessToken) {
return (DemoteMaster) super.setAccessToken(accessToken);
}
@Override
public DemoteMaster setAlt(java.lang.String alt) {
return (DemoteMaster) super.setAlt(alt);
}
@Override
public DemoteMaster setCallback(java.lang.String callback) {
return (DemoteMaster) super.setCallback(callback);
}
@Override
public DemoteMaster setFields(java.lang.String fields) {
return (DemoteMaster) super.setFields(fields);
}
@Override
public DemoteMaster setKey(java.lang.String key) {
return (DemoteMaster) super.setKey(key);
}
@Override
public DemoteMaster setOauthToken(java.lang.String oauthToken) {
return (DemoteMaster) super.setOauthToken(oauthToken);
}
@Override
public DemoteMaster setPrettyPrint(java.lang.Boolean prettyPrint) {
return (DemoteMaster) super.setPrettyPrint(prettyPrint);
}
@Override
public DemoteMaster setQuotaUser(java.lang.String quotaUser) {
return (DemoteMaster) super.setQuotaUser(quotaUser);
}
@Override
public DemoteMaster setUploadType(java.lang.String uploadType) {
return (DemoteMaster) super.setUploadType(uploadType);
}
@Override
public DemoteMaster setUploadProtocol(java.lang.String uploadProtocol) {
return (DemoteMaster) super.setUploadProtocol(uploadProtocol);
}
/** ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** ID of the project that contains the instance. */
public DemoteMaster setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance name. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance name.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance name. */
public DemoteMaster setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public DemoteMaster set(String parameterName, Object value) {
return (DemoteMaster) super.set(parameterName, value);
}
}
/**
* Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL dump or CSV file.
*
* Create a request for the method "instances.export".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Export#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance to be exported.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesExportRequest}
* @return the request
*/
public Export export(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesExportRequest content) throws java.io.IOException {
Export result = new Export(project, instance, content);
initialize(result);
return result;
}
public class Export extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/export";
/**
* Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL dump or CSV file.
*
* Create a request for the method "instances.export".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Export#execute()} method to invoke the remote operation.
* <p> {@link
* Export#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance to be exported.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesExportRequest}
* @since 1.13
*/
protected Export(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesExportRequest content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public Export set$Xgafv(java.lang.String $Xgafv) {
return (Export) super.set$Xgafv($Xgafv);
}
@Override
public Export setAccessToken(java.lang.String accessToken) {
return (Export) super.setAccessToken(accessToken);
}
@Override
public Export setAlt(java.lang.String alt) {
return (Export) super.setAlt(alt);
}
@Override
public Export setCallback(java.lang.String callback) {
return (Export) super.setCallback(callback);
}
@Override
public Export setFields(java.lang.String fields) {
return (Export) super.setFields(fields);
}
@Override
public Export setKey(java.lang.String key) {
return (Export) super.setKey(key);
}
@Override
public Export setOauthToken(java.lang.String oauthToken) {
return (Export) super.setOauthToken(oauthToken);
}
@Override
public Export setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Export) super.setPrettyPrint(prettyPrint);
}
@Override
public Export setQuotaUser(java.lang.String quotaUser) {
return (Export) super.setQuotaUser(quotaUser);
}
@Override
public Export setUploadType(java.lang.String uploadType) {
return (Export) super.setUploadType(uploadType);
}
@Override
public Export setUploadProtocol(java.lang.String uploadProtocol) {
return (Export) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance to be exported. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance to be exported.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance to be exported. */
public Export setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public Export setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public Export set(String parameterName, Object value) {
return (Export) super.set(parameterName, value);
}
}
/**
* Failover the instance to its failover replica instance. Using this operation might cause your
* instance to restart.
*
* Create a request for the method "instances.failover".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Failover#execute()} method to invoke the remote operation.
*
* @param project ID of the project that contains the read replica.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesFailoverRequest}
* @return the request
*/
public Failover failover(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesFailoverRequest content) throws java.io.IOException {
Failover result = new Failover(project, instance, content);
initialize(result);
return result;
}
public class Failover extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/failover";
/**
* Failover the instance to its failover replica instance. Using this operation might cause your
* instance to restart.
*
* Create a request for the method "instances.failover".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Failover#execute()} method to invoke the remote operation.
* <p> {@link
* Failover#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project ID of the project that contains the read replica.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesFailoverRequest}
* @since 1.13
*/
protected Failover(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesFailoverRequest content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public Failover set$Xgafv(java.lang.String $Xgafv) {
return (Failover) super.set$Xgafv($Xgafv);
}
@Override
public Failover setAccessToken(java.lang.String accessToken) {
return (Failover) super.setAccessToken(accessToken);
}
@Override
public Failover setAlt(java.lang.String alt) {
return (Failover) super.setAlt(alt);
}
@Override
public Failover setCallback(java.lang.String callback) {
return (Failover) super.setCallback(callback);
}
@Override
public Failover setFields(java.lang.String fields) {
return (Failover) super.setFields(fields);
}
@Override
public Failover setKey(java.lang.String key) {
return (Failover) super.setKey(key);
}
@Override
public Failover setOauthToken(java.lang.String oauthToken) {
return (Failover) super.setOauthToken(oauthToken);
}
@Override
public Failover setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Failover) super.setPrettyPrint(prettyPrint);
}
@Override
public Failover setQuotaUser(java.lang.String quotaUser) {
return (Failover) super.setQuotaUser(quotaUser);
}
@Override
public Failover setUploadType(java.lang.String uploadType) {
return (Failover) super.setUploadType(uploadType);
}
@Override
public Failover setUploadProtocol(java.lang.String uploadProtocol) {
return (Failover) super.setUploadProtocol(uploadProtocol);
}
/** ID of the project that contains the read replica. */
@com.google.api.client.util.Key
private java.lang.String project;
/** ID of the project that contains the read replica.
*/
public java.lang.String getProject() {
return project;
}
/** ID of the project that contains the read replica. */
public Failover setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public Failover setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public Failover set(String parameterName, Object value) {
return (Failover) super.set(parameterName, value);
}
}
/**
* Retrieves a resource containing information about a Cloud SQL instance.
*
* Create a request for the method "instances.get".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @return the request
*/
public Get get(java.lang.String project, java.lang.String instance) throws java.io.IOException {
Get result = new Get(project, instance);
initialize(result);
return result;
}
public class Get extends SQLAdminRequest<com.google.api.services.sqladmin.model.DatabaseInstance> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}";
/**
* Retrieves a resource containing information about a Cloud SQL instance.
*
* Create a request for the method "instances.get".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @since 1.13
*/
protected Get(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.DatabaseInstance.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Get setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Database instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Database instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Database instance ID. This does not include the project ID. */
public Get setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Imports data into a Cloud SQL instance from a SQL dump or CSV file in Cloud Storage.
*
* Create a request for the method "instances.import".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link SQLAdminImport#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesImportRequest}
* @return the request
*/
public SQLAdminImport sqladminImport(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesImportRequest content) throws java.io.IOException {
SQLAdminImport result = new SQLAdminImport(project, instance, content);
initialize(result);
return result;
}
public class SQLAdminImport extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/import";
/**
* Imports data into a Cloud SQL instance from a SQL dump or CSV file in Cloud Storage.
*
* Create a request for the method "instances.import".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link SQLAdminImport#execute()} method to invoke the remote
* operation. <p> {@link SQLAdminImport#initialize(com.google.api.client.googleapis.services.Abstr
* actGoogleClientRequest)} must be called to initialize this instance immediately after invoking
* the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesImportRequest}
* @since 1.13
*/
protected SQLAdminImport(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesImportRequest content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public SQLAdminImport set$Xgafv(java.lang.String $Xgafv) {
return (SQLAdminImport) super.set$Xgafv($Xgafv);
}
@Override
public SQLAdminImport setAccessToken(java.lang.String accessToken) {
return (SQLAdminImport) super.setAccessToken(accessToken);
}
@Override
public SQLAdminImport setAlt(java.lang.String alt) {
return (SQLAdminImport) super.setAlt(alt);
}
@Override
public SQLAdminImport setCallback(java.lang.String callback) {
return (SQLAdminImport) super.setCallback(callback);
}
@Override
public SQLAdminImport setFields(java.lang.String fields) {
return (SQLAdminImport) super.setFields(fields);
}
@Override
public SQLAdminImport setKey(java.lang.String key) {
return (SQLAdminImport) super.setKey(key);
}
@Override
public SQLAdminImport setOauthToken(java.lang.String oauthToken) {
return (SQLAdminImport) super.setOauthToken(oauthToken);
}
@Override
public SQLAdminImport setPrettyPrint(java.lang.Boolean prettyPrint) {
return (SQLAdminImport) super.setPrettyPrint(prettyPrint);
}
@Override
public SQLAdminImport setQuotaUser(java.lang.String quotaUser) {
return (SQLAdminImport) super.setQuotaUser(quotaUser);
}
@Override
public SQLAdminImport setUploadType(java.lang.String uploadType) {
return (SQLAdminImport) super.setUploadType(uploadType);
}
@Override
public SQLAdminImport setUploadProtocol(java.lang.String uploadProtocol) {
return (SQLAdminImport) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public SQLAdminImport setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public SQLAdminImport setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public SQLAdminImport set(String parameterName, Object value) {
return (SQLAdminImport) super.set(parameterName, value);
}
}
/**
* Creates a new Cloud SQL instance.
*
* Create a request for the method "instances.insert".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Insert#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project to which the newly created Cloud SQL instances should belong.
* @param content the {@link com.google.api.services.sqladmin.model.DatabaseInstance}
* @return the request
*/
public Insert insert(java.lang.String project, com.google.api.services.sqladmin.model.DatabaseInstance content) throws java.io.IOException {
Insert result = new Insert(project, content);
initialize(result);
return result;
}
public class Insert extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances";
/**
* Creates a new Cloud SQL instance.
*
* Create a request for the method "instances.insert".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Insert#execute()} method to invoke the remote operation.
* <p> {@link
* Insert#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project to which the newly created Cloud SQL instances should belong.
* @param content the {@link com.google.api.services.sqladmin.model.DatabaseInstance}
* @since 1.13
*/
protected Insert(java.lang.String project, com.google.api.services.sqladmin.model.DatabaseInstance content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
}
@Override
public Insert set$Xgafv(java.lang.String $Xgafv) {
return (Insert) super.set$Xgafv($Xgafv);
}
@Override
public Insert setAccessToken(java.lang.String accessToken) {
return (Insert) super.setAccessToken(accessToken);
}
@Override
public Insert setAlt(java.lang.String alt) {
return (Insert) super.setAlt(alt);
}
@Override
public Insert setCallback(java.lang.String callback) {
return (Insert) super.setCallback(callback);
}
@Override
public Insert setFields(java.lang.String fields) {
return (Insert) super.setFields(fields);
}
@Override
public Insert setKey(java.lang.String key) {
return (Insert) super.setKey(key);
}
@Override
public Insert setOauthToken(java.lang.String oauthToken) {
return (Insert) super.setOauthToken(oauthToken);
}
@Override
public Insert setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Insert) super.setPrettyPrint(prettyPrint);
}
@Override
public Insert setQuotaUser(java.lang.String quotaUser) {
return (Insert) super.setQuotaUser(quotaUser);
}
@Override
public Insert setUploadType(java.lang.String uploadType) {
return (Insert) super.setUploadType(uploadType);
}
@Override
public Insert setUploadProtocol(java.lang.String uploadProtocol) {
return (Insert) super.setUploadProtocol(uploadProtocol);
}
/**
* Project ID of the project to which the newly created Cloud SQL instances should belong.
*/
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project to which the newly created Cloud SQL instances should belong.
*/
public java.lang.String getProject() {
return project;
}
/**
* Project ID of the project to which the newly created Cloud SQL instances should belong.
*/
public Insert setProject(java.lang.String project) {
this.project = project;
return this;
}
@Override
public Insert set(String parameterName, Object value) {
return (Insert) super.set(parameterName, value);
}
}
/**
* Lists instances under a given project.
*
* Create a request for the method "instances.list".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project for which to list Cloud SQL instances.
* @return the request
*/
public List list(java.lang.String project) throws java.io.IOException {
List result = new List(project);
initialize(result);
return result;
}
public class List extends SQLAdminRequest<com.google.api.services.sqladmin.model.InstancesListResponse> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances";
/**
* Lists instances under a given project.
*
* Create a request for the method "instances.list".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project for which to list Cloud SQL instances.
* @since 1.13
*/
protected List(java.lang.String project) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.InstancesListResponse.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project for which to list Cloud SQL instances. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project for which to list Cloud SQL instances.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project for which to list Cloud SQL instances. */
public List setProject(java.lang.String project) {
this.project = project;
return this;
}
/**
* A filter expression that filters resources listed in the response. The expression is in the
* form of field:value. For example, 'instanceType:CLOUD_SQL_INSTANCE'. Fields can be nested
* as needed as per their JSON representation, such as 'settings.userLabels.auto_start:true'.
* Multiple filter queries are space-separated. For example. 'state:RUNNABLE
* instanceType:CLOUD_SQL_INSTANCE'. By default, each expression is an AND expression.
* However, you can include AND and OR expressions explicitly.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** A filter expression that filters resources listed in the response. The expression is in the form of
field:value. For example, 'instanceType:CLOUD_SQL_INSTANCE'. Fields can be nested as needed as per
their JSON representation, such as 'settings.userLabels.auto_start:true'. Multiple filter queries
are space-separated. For example. 'state:RUNNABLE instanceType:CLOUD_SQL_INSTANCE'. By default,
each expression is an AND expression. However, you can include AND and OR expressions explicitly.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* A filter expression that filters resources listed in the response. The expression is in the
* form of field:value. For example, 'instanceType:CLOUD_SQL_INSTANCE'. Fields can be nested
* as needed as per their JSON representation, such as 'settings.userLabels.auto_start:true'.
* Multiple filter queries are space-separated. For example. 'state:RUNNABLE
* instanceType:CLOUD_SQL_INSTANCE'. By default, each expression is an AND expression.
* However, you can include AND and OR expressions explicitly.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/** The maximum number of results to return per response. */
@com.google.api.client.util.Key
private java.lang.Long maxResults;
/** The maximum number of results to return per response.
*/
public java.lang.Long getMaxResults() {
return maxResults;
}
/** The maximum number of results to return per response. */
public List setMaxResults(java.lang.Long maxResults) {
this.maxResults = maxResults;
return this;
}
/**
* A previously-returned page token representing part of the larger set of results to view.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A previously-returned page token representing part of the larger set of results to view.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A previously-returned page token representing part of the larger set of results to view.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Lists all of the trusted Certificate Authorities (CAs) for the specified instance. There can be
* up to three CAs listed: the CA that was used to sign the certificate that is currently in use, a
* CA that has been added but not yet used to sign a certificate, and a CA used to sign a
* certificate that has previously rotated out.
*
* Create a request for the method "instances.listServerCas".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link ListServerCas#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @return the request
*/
public ListServerCas listServerCas(java.lang.String project, java.lang.String instance) throws java.io.IOException {
ListServerCas result = new ListServerCas(project, instance);
initialize(result);
return result;
}
public class ListServerCas extends SQLAdminRequest<com.google.api.services.sqladmin.model.InstancesListServerCasResponse> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/listServerCas";
/**
* Lists all of the trusted Certificate Authorities (CAs) for the specified instance. There can be
* up to three CAs listed: the CA that was used to sign the certificate that is currently in use,
* a CA that has been added but not yet used to sign a certificate, and a CA used to sign a
* certificate that has previously rotated out.
*
* Create a request for the method "instances.listServerCas".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link ListServerCas#execute()} method to invoke the remote
* operation. <p> {@link ListServerCas#initialize(com.google.api.client.googleapis.services.Abstra
* ctGoogleClientRequest)} must be called to initialize this instance immediately after invoking
* the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @since 1.13
*/
protected ListServerCas(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.InstancesListServerCasResponse.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public ListServerCas set$Xgafv(java.lang.String $Xgafv) {
return (ListServerCas) super.set$Xgafv($Xgafv);
}
@Override
public ListServerCas setAccessToken(java.lang.String accessToken) {
return (ListServerCas) super.setAccessToken(accessToken);
}
@Override
public ListServerCas setAlt(java.lang.String alt) {
return (ListServerCas) super.setAlt(alt);
}
@Override
public ListServerCas setCallback(java.lang.String callback) {
return (ListServerCas) super.setCallback(callback);
}
@Override
public ListServerCas setFields(java.lang.String fields) {
return (ListServerCas) super.setFields(fields);
}
@Override
public ListServerCas setKey(java.lang.String key) {
return (ListServerCas) super.setKey(key);
}
@Override
public ListServerCas setOauthToken(java.lang.String oauthToken) {
return (ListServerCas) super.setOauthToken(oauthToken);
}
@Override
public ListServerCas setPrettyPrint(java.lang.Boolean prettyPrint) {
return (ListServerCas) super.setPrettyPrint(prettyPrint);
}
@Override
public ListServerCas setQuotaUser(java.lang.String quotaUser) {
return (ListServerCas) super.setQuotaUser(quotaUser);
}
@Override
public ListServerCas setUploadType(java.lang.String uploadType) {
return (ListServerCas) super.setUploadType(uploadType);
}
@Override
public ListServerCas setUploadProtocol(java.lang.String uploadProtocol) {
return (ListServerCas) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public ListServerCas setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public ListServerCas setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public ListServerCas set(String parameterName, Object value) {
return (ListServerCas) super.set(parameterName, value);
}
}
/**
* Updates settings of a Cloud SQL instance. This method supports patch semantics.
*
* Create a request for the method "instances.patch".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.DatabaseInstance}
* @return the request
*/
public Patch patch(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.DatabaseInstance content) throws java.io.IOException {
Patch result = new Patch(project, instance, content);
initialize(result);
return result;
}
public class Patch extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}";
/**
* Updates settings of a Cloud SQL instance. This method supports patch semantics.
*
* Create a request for the method "instances.patch".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.DatabaseInstance}
* @since 1.13
*/
protected Patch(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.DatabaseInstance content) {
super(SQLAdmin.this, "PATCH", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Patch setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public Patch setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
/**
* Promotes the read replica instance to be a stand-alone Cloud SQL instance. Using this operation
* might cause your instance to restart.
*
* Create a request for the method "instances.promoteReplica".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link PromoteReplica#execute()} method to invoke the remote operation.
*
* @param project ID of the project that contains the read replica.
* @param instance Cloud SQL read replica instance name.
* @return the request
*/
public PromoteReplica promoteReplica(java.lang.String project, java.lang.String instance) throws java.io.IOException {
PromoteReplica result = new PromoteReplica(project, instance);
initialize(result);
return result;
}
public class PromoteReplica extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/promoteReplica";
/**
* Promotes the read replica instance to be a stand-alone Cloud SQL instance. Using this operation
* might cause your instance to restart.
*
* Create a request for the method "instances.promoteReplica".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link PromoteReplica#execute()} method to invoke the remote
* operation. <p> {@link PromoteReplica#initialize(com.google.api.client.googleapis.services.Abstr
* actGoogleClientRequest)} must be called to initialize this instance immediately after invoking
* the constructor. </p>
*
* @param project ID of the project that contains the read replica.
* @param instance Cloud SQL read replica instance name.
* @since 1.13
*/
protected PromoteReplica(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "POST", REST_PATH, null, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public PromoteReplica set$Xgafv(java.lang.String $Xgafv) {
return (PromoteReplica) super.set$Xgafv($Xgafv);
}
@Override
public PromoteReplica setAccessToken(java.lang.String accessToken) {
return (PromoteReplica) super.setAccessToken(accessToken);
}
@Override
public PromoteReplica setAlt(java.lang.String alt) {
return (PromoteReplica) super.setAlt(alt);
}
@Override
public PromoteReplica setCallback(java.lang.String callback) {
return (PromoteReplica) super.setCallback(callback);
}
@Override
public PromoteReplica setFields(java.lang.String fields) {
return (PromoteReplica) super.setFields(fields);
}
@Override
public PromoteReplica setKey(java.lang.String key) {
return (PromoteReplica) super.setKey(key);
}
@Override
public PromoteReplica setOauthToken(java.lang.String oauthToken) {
return (PromoteReplica) super.setOauthToken(oauthToken);
}
@Override
public PromoteReplica setPrettyPrint(java.lang.Boolean prettyPrint) {
return (PromoteReplica) super.setPrettyPrint(prettyPrint);
}
@Override
public PromoteReplica setQuotaUser(java.lang.String quotaUser) {
return (PromoteReplica) super.setQuotaUser(quotaUser);
}
@Override
public PromoteReplica setUploadType(java.lang.String uploadType) {
return (PromoteReplica) super.setUploadType(uploadType);
}
@Override
public PromoteReplica setUploadProtocol(java.lang.String uploadProtocol) {
return (PromoteReplica) super.setUploadProtocol(uploadProtocol);
}
/** ID of the project that contains the read replica. */
@com.google.api.client.util.Key
private java.lang.String project;
/** ID of the project that contains the read replica.
*/
public java.lang.String getProject() {
return project;
}
/** ID of the project that contains the read replica. */
public PromoteReplica setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL read replica instance name. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL read replica instance name.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL read replica instance name. */
public PromoteReplica setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public PromoteReplica set(String parameterName, Object value) {
return (PromoteReplica) super.set(parameterName, value);
}
}
/**
* Deletes all client certificates and generates a new server SSL certificate for the instance.
*
* Create a request for the method "instances.resetSslConfig".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link ResetSslConfig#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @return the request
*/
public ResetSslConfig resetSslConfig(java.lang.String project, java.lang.String instance) throws java.io.IOException {
ResetSslConfig result = new ResetSslConfig(project, instance);
initialize(result);
return result;
}
public class ResetSslConfig extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/resetSslConfig";
/**
* Deletes all client certificates and generates a new server SSL certificate for the instance.
*
* Create a request for the method "instances.resetSslConfig".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link ResetSslConfig#execute()} method to invoke the remote
* operation. <p> {@link ResetSslConfig#initialize(com.google.api.client.googleapis.services.Abstr
* actGoogleClientRequest)} must be called to initialize this instance immediately after invoking
* the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @since 1.13
*/
protected ResetSslConfig(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "POST", REST_PATH, null, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public ResetSslConfig set$Xgafv(java.lang.String $Xgafv) {
return (ResetSslConfig) super.set$Xgafv($Xgafv);
}
@Override
public ResetSslConfig setAccessToken(java.lang.String accessToken) {
return (ResetSslConfig) super.setAccessToken(accessToken);
}
@Override
public ResetSslConfig setAlt(java.lang.String alt) {
return (ResetSslConfig) super.setAlt(alt);
}
@Override
public ResetSslConfig setCallback(java.lang.String callback) {
return (ResetSslConfig) super.setCallback(callback);
}
@Override
public ResetSslConfig setFields(java.lang.String fields) {
return (ResetSslConfig) super.setFields(fields);
}
@Override
public ResetSslConfig setKey(java.lang.String key) {
return (ResetSslConfig) super.setKey(key);
}
@Override
public ResetSslConfig setOauthToken(java.lang.String oauthToken) {
return (ResetSslConfig) super.setOauthToken(oauthToken);
}
@Override
public ResetSslConfig setPrettyPrint(java.lang.Boolean prettyPrint) {
return (ResetSslConfig) super.setPrettyPrint(prettyPrint);
}
@Override
public ResetSslConfig setQuotaUser(java.lang.String quotaUser) {
return (ResetSslConfig) super.setQuotaUser(quotaUser);
}
@Override
public ResetSslConfig setUploadType(java.lang.String uploadType) {
return (ResetSslConfig) super.setUploadType(uploadType);
}
@Override
public ResetSslConfig setUploadProtocol(java.lang.String uploadProtocol) {
return (ResetSslConfig) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public ResetSslConfig setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public ResetSslConfig setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public ResetSslConfig set(String parameterName, Object value) {
return (ResetSslConfig) super.set(parameterName, value);
}
}
/**
* Restarts a Cloud SQL instance.
*
* Create a request for the method "instances.restart".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Restart#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance to be restarted.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @return the request
*/
public Restart restart(java.lang.String project, java.lang.String instance) throws java.io.IOException {
Restart result = new Restart(project, instance);
initialize(result);
return result;
}
public class Restart extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/restart";
/**
* Restarts a Cloud SQL instance.
*
* Create a request for the method "instances.restart".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Restart#execute()} method to invoke the remote operation.
* <p> {@link
* Restart#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance to be restarted.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @since 1.13
*/
protected Restart(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "POST", REST_PATH, null, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public Restart set$Xgafv(java.lang.String $Xgafv) {
return (Restart) super.set$Xgafv($Xgafv);
}
@Override
public Restart setAccessToken(java.lang.String accessToken) {
return (Restart) super.setAccessToken(accessToken);
}
@Override
public Restart setAlt(java.lang.String alt) {
return (Restart) super.setAlt(alt);
}
@Override
public Restart setCallback(java.lang.String callback) {
return (Restart) super.setCallback(callback);
}
@Override
public Restart setFields(java.lang.String fields) {
return (Restart) super.setFields(fields);
}
@Override
public Restart setKey(java.lang.String key) {
return (Restart) super.setKey(key);
}
@Override
public Restart setOauthToken(java.lang.String oauthToken) {
return (Restart) super.setOauthToken(oauthToken);
}
@Override
public Restart setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Restart) super.setPrettyPrint(prettyPrint);
}
@Override
public Restart setQuotaUser(java.lang.String quotaUser) {
return (Restart) super.setQuotaUser(quotaUser);
}
@Override
public Restart setUploadType(java.lang.String uploadType) {
return (Restart) super.setUploadType(uploadType);
}
@Override
public Restart setUploadProtocol(java.lang.String uploadProtocol) {
return (Restart) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance to be restarted. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance to be restarted.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance to be restarted. */
public Restart setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public Restart setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public Restart set(String parameterName, Object value) {
return (Restart) super.set(parameterName, value);
}
}
/**
* Restores a backup of a Cloud SQL instance. Using this operation might cause your instance to
* restart.
*
* Create a request for the method "instances.restoreBackup".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link RestoreBackup#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesRestoreBackupRequest}
* @return the request
*/
public RestoreBackup restoreBackup(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesRestoreBackupRequest content) throws java.io.IOException {
RestoreBackup result = new RestoreBackup(project, instance, content);
initialize(result);
return result;
}
public class RestoreBackup extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/restoreBackup";
/**
* Restores a backup of a Cloud SQL instance. Using this operation might cause your instance to
* restart.
*
* Create a request for the method "instances.restoreBackup".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link RestoreBackup#execute()} method to invoke the remote
* operation. <p> {@link RestoreBackup#initialize(com.google.api.client.googleapis.services.Abstra
* ctGoogleClientRequest)} must be called to initialize this instance immediately after invoking
* the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesRestoreBackupRequest}
* @since 1.13
*/
protected RestoreBackup(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesRestoreBackupRequest content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public RestoreBackup set$Xgafv(java.lang.String $Xgafv) {
return (RestoreBackup) super.set$Xgafv($Xgafv);
}
@Override
public RestoreBackup setAccessToken(java.lang.String accessToken) {
return (RestoreBackup) super.setAccessToken(accessToken);
}
@Override
public RestoreBackup setAlt(java.lang.String alt) {
return (RestoreBackup) super.setAlt(alt);
}
@Override
public RestoreBackup setCallback(java.lang.String callback) {
return (RestoreBackup) super.setCallback(callback);
}
@Override
public RestoreBackup setFields(java.lang.String fields) {
return (RestoreBackup) super.setFields(fields);
}
@Override
public RestoreBackup setKey(java.lang.String key) {
return (RestoreBackup) super.setKey(key);
}
@Override
public RestoreBackup setOauthToken(java.lang.String oauthToken) {
return (RestoreBackup) super.setOauthToken(oauthToken);
}
@Override
public RestoreBackup setPrettyPrint(java.lang.Boolean prettyPrint) {
return (RestoreBackup) super.setPrettyPrint(prettyPrint);
}
@Override
public RestoreBackup setQuotaUser(java.lang.String quotaUser) {
return (RestoreBackup) super.setQuotaUser(quotaUser);
}
@Override
public RestoreBackup setUploadType(java.lang.String uploadType) {
return (RestoreBackup) super.setUploadType(uploadType);
}
@Override
public RestoreBackup setUploadProtocol(java.lang.String uploadProtocol) {
return (RestoreBackup) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public RestoreBackup setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public RestoreBackup setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public RestoreBackup set(String parameterName, Object value) {
return (RestoreBackup) super.set(parameterName, value);
}
}
/**
* Rotates the server certificate to one signed by the Certificate Authority (CA) version previously
* added with the addServerCA method.
*
* Create a request for the method "instances.rotateServerCa".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link RotateServerCa#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesRotateServerCaRequest}
* @return the request
*/
public RotateServerCa rotateServerCa(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesRotateServerCaRequest content) throws java.io.IOException {
RotateServerCa result = new RotateServerCa(project, instance, content);
initialize(result);
return result;
}
public class RotateServerCa extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/rotateServerCa";
/**
* Rotates the server certificate to one signed by the Certificate Authority (CA) version
* previously added with the addServerCA method.
*
* Create a request for the method "instances.rotateServerCa".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link RotateServerCa#execute()} method to invoke the remote
* operation. <p> {@link RotateServerCa#initialize(com.google.api.client.googleapis.services.Abstr
* actGoogleClientRequest)} must be called to initialize this instance immediately after invoking
* the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesRotateServerCaRequest}
* @since 1.13
*/
protected RotateServerCa(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesRotateServerCaRequest content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public RotateServerCa set$Xgafv(java.lang.String $Xgafv) {
return (RotateServerCa) super.set$Xgafv($Xgafv);
}
@Override
public RotateServerCa setAccessToken(java.lang.String accessToken) {
return (RotateServerCa) super.setAccessToken(accessToken);
}
@Override
public RotateServerCa setAlt(java.lang.String alt) {
return (RotateServerCa) super.setAlt(alt);
}
@Override
public RotateServerCa setCallback(java.lang.String callback) {
return (RotateServerCa) super.setCallback(callback);
}
@Override
public RotateServerCa setFields(java.lang.String fields) {
return (RotateServerCa) super.setFields(fields);
}
@Override
public RotateServerCa setKey(java.lang.String key) {
return (RotateServerCa) super.setKey(key);
}
@Override
public RotateServerCa setOauthToken(java.lang.String oauthToken) {
return (RotateServerCa) super.setOauthToken(oauthToken);
}
@Override
public RotateServerCa setPrettyPrint(java.lang.Boolean prettyPrint) {
return (RotateServerCa) super.setPrettyPrint(prettyPrint);
}
@Override
public RotateServerCa setQuotaUser(java.lang.String quotaUser) {
return (RotateServerCa) super.setQuotaUser(quotaUser);
}
@Override
public RotateServerCa setUploadType(java.lang.String uploadType) {
return (RotateServerCa) super.setUploadType(uploadType);
}
@Override
public RotateServerCa setUploadProtocol(java.lang.String uploadProtocol) {
return (RotateServerCa) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public RotateServerCa setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public RotateServerCa setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public RotateServerCa set(String parameterName, Object value) {
return (RotateServerCa) super.set(parameterName, value);
}
}
/**
* Starts the replication in the read replica instance.
*
* Create a request for the method "instances.startReplica".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link StartReplica#execute()} method to invoke the remote operation.
*
* @param project ID of the project that contains the read replica.
* @param instance Cloud SQL read replica instance name.
* @return the request
*/
public StartReplica startReplica(java.lang.String project, java.lang.String instance) throws java.io.IOException {
StartReplica result = new StartReplica(project, instance);
initialize(result);
return result;
}
public class StartReplica extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/startReplica";
/**
* Starts the replication in the read replica instance.
*
* Create a request for the method "instances.startReplica".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link StartReplica#execute()} method to invoke the remote
* operation. <p> {@link
* StartReplica#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project ID of the project that contains the read replica.
* @param instance Cloud SQL read replica instance name.
* @since 1.13
*/
protected StartReplica(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "POST", REST_PATH, null, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public StartReplica set$Xgafv(java.lang.String $Xgafv) {
return (StartReplica) super.set$Xgafv($Xgafv);
}
@Override
public StartReplica setAccessToken(java.lang.String accessToken) {
return (StartReplica) super.setAccessToken(accessToken);
}
@Override
public StartReplica setAlt(java.lang.String alt) {
return (StartReplica) super.setAlt(alt);
}
@Override
public StartReplica setCallback(java.lang.String callback) {
return (StartReplica) super.setCallback(callback);
}
@Override
public StartReplica setFields(java.lang.String fields) {
return (StartReplica) super.setFields(fields);
}
@Override
public StartReplica setKey(java.lang.String key) {
return (StartReplica) super.setKey(key);
}
@Override
public StartReplica setOauthToken(java.lang.String oauthToken) {
return (StartReplica) super.setOauthToken(oauthToken);
}
@Override
public StartReplica setPrettyPrint(java.lang.Boolean prettyPrint) {
return (StartReplica) super.setPrettyPrint(prettyPrint);
}
@Override
public StartReplica setQuotaUser(java.lang.String quotaUser) {
return (StartReplica) super.setQuotaUser(quotaUser);
}
@Override
public StartReplica setUploadType(java.lang.String uploadType) {
return (StartReplica) super.setUploadType(uploadType);
}
@Override
public StartReplica setUploadProtocol(java.lang.String uploadProtocol) {
return (StartReplica) super.setUploadProtocol(uploadProtocol);
}
/** ID of the project that contains the read replica. */
@com.google.api.client.util.Key
private java.lang.String project;
/** ID of the project that contains the read replica.
*/
public java.lang.String getProject() {
return project;
}
/** ID of the project that contains the read replica. */
public StartReplica setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL read replica instance name. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL read replica instance name.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL read replica instance name. */
public StartReplica setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public StartReplica set(String parameterName, Object value) {
return (StartReplica) super.set(parameterName, value);
}
}
/**
* Stops the replication in the read replica instance.
*
* Create a request for the method "instances.stopReplica".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link StopReplica#execute()} method to invoke the remote operation.
*
* @param project ID of the project that contains the read replica.
* @param instance Cloud SQL read replica instance name.
* @return the request
*/
public StopReplica stopReplica(java.lang.String project, java.lang.String instance) throws java.io.IOException {
StopReplica result = new StopReplica(project, instance);
initialize(result);
return result;
}
public class StopReplica extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/stopReplica";
/**
* Stops the replication in the read replica instance.
*
* Create a request for the method "instances.stopReplica".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link StopReplica#execute()} method to invoke the remote
* operation. <p> {@link
* StopReplica#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project ID of the project that contains the read replica.
* @param instance Cloud SQL read replica instance name.
* @since 1.13
*/
protected StopReplica(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "POST", REST_PATH, null, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public StopReplica set$Xgafv(java.lang.String $Xgafv) {
return (StopReplica) super.set$Xgafv($Xgafv);
}
@Override
public StopReplica setAccessToken(java.lang.String accessToken) {
return (StopReplica) super.setAccessToken(accessToken);
}
@Override
public StopReplica setAlt(java.lang.String alt) {
return (StopReplica) super.setAlt(alt);
}
@Override
public StopReplica setCallback(java.lang.String callback) {
return (StopReplica) super.setCallback(callback);
}
@Override
public StopReplica setFields(java.lang.String fields) {
return (StopReplica) super.setFields(fields);
}
@Override
public StopReplica setKey(java.lang.String key) {
return (StopReplica) super.setKey(key);
}
@Override
public StopReplica setOauthToken(java.lang.String oauthToken) {
return (StopReplica) super.setOauthToken(oauthToken);
}
@Override
public StopReplica setPrettyPrint(java.lang.Boolean prettyPrint) {
return (StopReplica) super.setPrettyPrint(prettyPrint);
}
@Override
public StopReplica setQuotaUser(java.lang.String quotaUser) {
return (StopReplica) super.setQuotaUser(quotaUser);
}
@Override
public StopReplica setUploadType(java.lang.String uploadType) {
return (StopReplica) super.setUploadType(uploadType);
}
@Override
public StopReplica setUploadProtocol(java.lang.String uploadProtocol) {
return (StopReplica) super.setUploadProtocol(uploadProtocol);
}
/** ID of the project that contains the read replica. */
@com.google.api.client.util.Key
private java.lang.String project;
/** ID of the project that contains the read replica.
*/
public java.lang.String getProject() {
return project;
}
/** ID of the project that contains the read replica. */
public StopReplica setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL read replica instance name. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL read replica instance name.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL read replica instance name. */
public StopReplica setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public StopReplica set(String parameterName, Object value) {
return (StopReplica) super.set(parameterName, value);
}
}
/**
* Truncate MySQL general and slow query log tables MySQL only.
*
* Create a request for the method "instances.truncateLog".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link TruncateLog#execute()} method to invoke the remote operation.
*
* @param project Project ID of the Cloud SQL project.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesTruncateLogRequest}
* @return the request
*/
public TruncateLog truncateLog(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesTruncateLogRequest content) throws java.io.IOException {
TruncateLog result = new TruncateLog(project, instance, content);
initialize(result);
return result;
}
public class TruncateLog extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/truncateLog";
/**
* Truncate MySQL general and slow query log tables MySQL only.
*
* Create a request for the method "instances.truncateLog".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link TruncateLog#execute()} method to invoke the remote
* operation. <p> {@link
* TruncateLog#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the Cloud SQL project.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.InstancesTruncateLogRequest}
* @since 1.13
*/
protected TruncateLog(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.InstancesTruncateLogRequest content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public TruncateLog set$Xgafv(java.lang.String $Xgafv) {
return (TruncateLog) super.set$Xgafv($Xgafv);
}
@Override
public TruncateLog setAccessToken(java.lang.String accessToken) {
return (TruncateLog) super.setAccessToken(accessToken);
}
@Override
public TruncateLog setAlt(java.lang.String alt) {
return (TruncateLog) super.setAlt(alt);
}
@Override
public TruncateLog setCallback(java.lang.String callback) {
return (TruncateLog) super.setCallback(callback);
}
@Override
public TruncateLog setFields(java.lang.String fields) {
return (TruncateLog) super.setFields(fields);
}
@Override
public TruncateLog setKey(java.lang.String key) {
return (TruncateLog) super.setKey(key);
}
@Override
public TruncateLog setOauthToken(java.lang.String oauthToken) {
return (TruncateLog) super.setOauthToken(oauthToken);
}
@Override
public TruncateLog setPrettyPrint(java.lang.Boolean prettyPrint) {
return (TruncateLog) super.setPrettyPrint(prettyPrint);
}
@Override
public TruncateLog setQuotaUser(java.lang.String quotaUser) {
return (TruncateLog) super.setQuotaUser(quotaUser);
}
@Override
public TruncateLog setUploadType(java.lang.String uploadType) {
return (TruncateLog) super.setUploadType(uploadType);
}
@Override
public TruncateLog setUploadProtocol(java.lang.String uploadProtocol) {
return (TruncateLog) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the Cloud SQL project. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the Cloud SQL project.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the Cloud SQL project. */
public TruncateLog setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public TruncateLog setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public TruncateLog set(String parameterName, Object value) {
return (TruncateLog) super.set(parameterName, value);
}
}
/**
* Updates settings of a Cloud SQL instance. Using this operation might cause your instance to
* restart.
*
* Create a request for the method "instances.update".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Update#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.DatabaseInstance}
* @return the request
*/
public Update update(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.DatabaseInstance content) throws java.io.IOException {
Update result = new Update(project, instance, content);
initialize(result);
return result;
}
public class Update extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}";
/**
* Updates settings of a Cloud SQL instance. Using this operation might cause your instance to
* restart.
*
* Create a request for the method "instances.update".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Update#execute()} method to invoke the remote operation.
* <p> {@link
* Update#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.DatabaseInstance}
* @since 1.13
*/
protected Update(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.DatabaseInstance content) {
super(SQLAdmin.this, "PUT", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public Update set$Xgafv(java.lang.String $Xgafv) {
return (Update) super.set$Xgafv($Xgafv);
}
@Override
public Update setAccessToken(java.lang.String accessToken) {
return (Update) super.setAccessToken(accessToken);
}
@Override
public Update setAlt(java.lang.String alt) {
return (Update) super.setAlt(alt);
}
@Override
public Update setCallback(java.lang.String callback) {
return (Update) super.setCallback(callback);
}
@Override
public Update setFields(java.lang.String fields) {
return (Update) super.setFields(fields);
}
@Override
public Update setKey(java.lang.String key) {
return (Update) super.setKey(key);
}
@Override
public Update setOauthToken(java.lang.String oauthToken) {
return (Update) super.setOauthToken(oauthToken);
}
@Override
public Update setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Update) super.setPrettyPrint(prettyPrint);
}
@Override
public Update setQuotaUser(java.lang.String quotaUser) {
return (Update) super.setQuotaUser(quotaUser);
}
@Override
public Update setUploadType(java.lang.String uploadType) {
return (Update) super.setUploadType(uploadType);
}
@Override
public Update setUploadProtocol(java.lang.String uploadProtocol) {
return (Update) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Update setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public Update setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public Update set(String parameterName, Object value) {
return (Update) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Operations collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code SQLAdmin sqladmin = new SQLAdmin(...);}
* {@code SQLAdmin.Operations.List request = sqladmin.operations().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Operations operations() {
return new Operations();
}
/**
* The "operations" collection of methods.
*/
public class Operations {
/**
* Retrieves an instance operation that has been performed on an instance.
*
* Create a request for the method "operations.get".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param operation Instance operation ID.
* @return the request
*/
public Get get(java.lang.String project, java.lang.String operation) throws java.io.IOException {
Get result = new Get(project, operation);
initialize(result);
return result;
}
public class Get extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/operations/{operation}";
/**
* Retrieves an instance operation that has been performed on an instance.
*
* Create a request for the method "operations.get".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param operation Instance operation ID.
* @since 1.13
*/
protected Get(java.lang.String project, java.lang.String operation) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.operation = com.google.api.client.util.Preconditions.checkNotNull(operation, "Required parameter operation must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Get setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Instance operation ID. */
@com.google.api.client.util.Key
private java.lang.String operation;
/** Instance operation ID.
*/
public java.lang.String getOperation() {
return operation;
}
/** Instance operation ID. */
public Get setOperation(java.lang.String operation) {
this.operation = operation;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists all instance operations that have been performed on the given Cloud SQL instance in the
* reverse chronological order of the start time.
*
* Create a request for the method "operations.list".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @return the request
*/
public List list(java.lang.String project) throws java.io.IOException {
List result = new List(project);
initialize(result);
return result;
}
public class List extends SQLAdminRequest<com.google.api.services.sqladmin.model.OperationsListResponse> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/operations";
/**
* Lists all instance operations that have been performed on the given Cloud SQL instance in the
* reverse chronological order of the start time.
*
* Create a request for the method "operations.list".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @since 1.13
*/
protected List(java.lang.String project) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.OperationsListResponse.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public List setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public List setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** Maximum number of operations per response. */
@com.google.api.client.util.Key
private java.lang.Long maxResults;
/** Maximum number of operations per response.
*/
public java.lang.Long getMaxResults() {
return maxResults;
}
/** Maximum number of operations per response. */
public List setMaxResults(java.lang.Long maxResults) {
this.maxResults = maxResults;
return this;
}
/**
* A previously-returned page token representing part of the larger set of results to view.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A previously-returned page token representing part of the larger set of results to view.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A previously-returned page token representing part of the larger set of results to view.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Projects collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code SQLAdmin sqladmin = new SQLAdmin(...);}
* {@code SQLAdmin.Projects.List request = sqladmin.projects().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Projects projects() {
return new Projects();
}
/**
* The "projects" collection of methods.
*/
public class Projects {
/**
* An accessor for creating requests from the Instances collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code SQLAdmin sqladmin = new SQLAdmin(...);}
* {@code SQLAdmin.Instances.List request = sqladmin.instances().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Instances instances() {
return new Instances();
}
/**
* The "instances" collection of methods.
*/
public class Instances {
/**
* Reschedules the maintenance on the given instance.
*
* Create a request for the method "instances.rescheduleMaintenance".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link RescheduleMaintenance#execute()} method to invoke the remote
* operation.
*
* @param project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.SqlInstancesRescheduleMaintenanceRequestBody}
* @return the request
*/
public RescheduleMaintenance rescheduleMaintenance(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.SqlInstancesRescheduleMaintenanceRequestBody content) throws java.io.IOException {
RescheduleMaintenance result = new RescheduleMaintenance(project, instance, content);
initialize(result);
return result;
}
public class RescheduleMaintenance extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/rescheduleMaintenance";
/**
* Reschedules the maintenance on the given instance.
*
* Create a request for the method "instances.rescheduleMaintenance".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link RescheduleMaintenance#execute()} method to invoke the
* remote operation. <p> {@link RescheduleMaintenance#initialize(com.google.api.client.googleapis.
* services.AbstractGoogleClientRequest)} must be called to initialize this instance immediately
* after invoking the constructor. </p>
*
* @param project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.SqlInstancesRescheduleMaintenanceRequestBody}
* @since 1.13
*/
protected RescheduleMaintenance(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.SqlInstancesRescheduleMaintenanceRequestBody content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public RescheduleMaintenance set$Xgafv(java.lang.String $Xgafv) {
return (RescheduleMaintenance) super.set$Xgafv($Xgafv);
}
@Override
public RescheduleMaintenance setAccessToken(java.lang.String accessToken) {
return (RescheduleMaintenance) super.setAccessToken(accessToken);
}
@Override
public RescheduleMaintenance setAlt(java.lang.String alt) {
return (RescheduleMaintenance) super.setAlt(alt);
}
@Override
public RescheduleMaintenance setCallback(java.lang.String callback) {
return (RescheduleMaintenance) super.setCallback(callback);
}
@Override
public RescheduleMaintenance setFields(java.lang.String fields) {
return (RescheduleMaintenance) super.setFields(fields);
}
@Override
public RescheduleMaintenance setKey(java.lang.String key) {
return (RescheduleMaintenance) super.setKey(key);
}
@Override
public RescheduleMaintenance setOauthToken(java.lang.String oauthToken) {
return (RescheduleMaintenance) super.setOauthToken(oauthToken);
}
@Override
public RescheduleMaintenance setPrettyPrint(java.lang.Boolean prettyPrint) {
return (RescheduleMaintenance) super.setPrettyPrint(prettyPrint);
}
@Override
public RescheduleMaintenance setQuotaUser(java.lang.String quotaUser) {
return (RescheduleMaintenance) super.setQuotaUser(quotaUser);
}
@Override
public RescheduleMaintenance setUploadType(java.lang.String uploadType) {
return (RescheduleMaintenance) super.setUploadType(uploadType);
}
@Override
public RescheduleMaintenance setUploadProtocol(java.lang.String uploadProtocol) {
return (RescheduleMaintenance) super.setUploadProtocol(uploadProtocol);
}
/** ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** ID of the project that contains the instance. */
public RescheduleMaintenance setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public RescheduleMaintenance setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public RescheduleMaintenance set(String parameterName, Object value) {
return (RescheduleMaintenance) super.set(parameterName, value);
}
}
/**
* Start External primary instance migration.
*
* Create a request for the method "instances.startExternalSync".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link StartExternalSync#execute()} method to invoke the remote operation.
*
* @param project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @return the request
*/
public StartExternalSync startExternalSync(java.lang.String project, java.lang.String instance) throws java.io.IOException {
StartExternalSync result = new StartExternalSync(project, instance);
initialize(result);
return result;
}
public class StartExternalSync extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/startExternalSync";
/**
* Start External primary instance migration.
*
* Create a request for the method "instances.startExternalSync".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link StartExternalSync#execute()} method to invoke the remote
* operation. <p> {@link StartExternalSync#initialize(com.google.api.client.googleapis.services.Ab
* stractGoogleClientRequest)} must be called to initialize this instance immediately after
* invoking the constructor. </p>
*
* @param project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @since 1.13
*/
protected StartExternalSync(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "POST", REST_PATH, null, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public StartExternalSync set$Xgafv(java.lang.String $Xgafv) {
return (StartExternalSync) super.set$Xgafv($Xgafv);
}
@Override
public StartExternalSync setAccessToken(java.lang.String accessToken) {
return (StartExternalSync) super.setAccessToken(accessToken);
}
@Override
public StartExternalSync setAlt(java.lang.String alt) {
return (StartExternalSync) super.setAlt(alt);
}
@Override
public StartExternalSync setCallback(java.lang.String callback) {
return (StartExternalSync) super.setCallback(callback);
}
@Override
public StartExternalSync setFields(java.lang.String fields) {
return (StartExternalSync) super.setFields(fields);
}
@Override
public StartExternalSync setKey(java.lang.String key) {
return (StartExternalSync) super.setKey(key);
}
@Override
public StartExternalSync setOauthToken(java.lang.String oauthToken) {
return (StartExternalSync) super.setOauthToken(oauthToken);
}
@Override
public StartExternalSync setPrettyPrint(java.lang.Boolean prettyPrint) {
return (StartExternalSync) super.setPrettyPrint(prettyPrint);
}
@Override
public StartExternalSync setQuotaUser(java.lang.String quotaUser) {
return (StartExternalSync) super.setQuotaUser(quotaUser);
}
@Override
public StartExternalSync setUploadType(java.lang.String uploadType) {
return (StartExternalSync) super.setUploadType(uploadType);
}
@Override
public StartExternalSync setUploadProtocol(java.lang.String uploadProtocol) {
return (StartExternalSync) super.setUploadProtocol(uploadProtocol);
}
/** ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** ID of the project that contains the instance. */
public StartExternalSync setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public StartExternalSync setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** Whether to skip the verification step (VESS). */
@com.google.api.client.util.Key
private java.lang.Boolean skipVerification;
/** Whether to skip the verification step (VESS).
*/
public java.lang.Boolean getSkipVerification() {
return skipVerification;
}
/** Whether to skip the verification step (VESS). */
public StartExternalSync setSkipVerification(java.lang.Boolean skipVerification) {
this.skipVerification = skipVerification;
return this;
}
/** External sync mode. */
@com.google.api.client.util.Key
private java.lang.String syncMode;
/** External sync mode.
*/
public java.lang.String getSyncMode() {
return syncMode;
}
/** External sync mode. */
public StartExternalSync setSyncMode(java.lang.String syncMode) {
this.syncMode = syncMode;
return this;
}
@Override
public StartExternalSync set(String parameterName, Object value) {
return (StartExternalSync) super.set(parameterName, value);
}
}
/**
* Verify External primary instance external sync settings.
*
* Create a request for the method "instances.verifyExternalSyncSettings".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link VerifyExternalSyncSettings#execute()} method to invoke the remote
* operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @return the request
*/
public VerifyExternalSyncSettings verifyExternalSyncSettings(java.lang.String project, java.lang.String instance) throws java.io.IOException {
VerifyExternalSyncSettings result = new VerifyExternalSyncSettings(project, instance);
initialize(result);
return result;
}
public class VerifyExternalSyncSettings extends SQLAdminRequest<com.google.api.services.sqladmin.model.SqlInstancesVerifyExternalSyncSettingsResponse> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/verifyExternalSyncSettings";
/**
* Verify External primary instance external sync settings.
*
* Create a request for the method "instances.verifyExternalSyncSettings".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link VerifyExternalSyncSettings#execute()} method to invoke the
* remote operation. <p> {@link VerifyExternalSyncSettings#initialize(com.google.api.client.google
* apis.services.AbstractGoogleClientRequest)} must be called to initialize this instance
* immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @since 1.13
*/
protected VerifyExternalSyncSettings(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "POST", REST_PATH, null, com.google.api.services.sqladmin.model.SqlInstancesVerifyExternalSyncSettingsResponse.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public VerifyExternalSyncSettings set$Xgafv(java.lang.String $Xgafv) {
return (VerifyExternalSyncSettings) super.set$Xgafv($Xgafv);
}
@Override
public VerifyExternalSyncSettings setAccessToken(java.lang.String accessToken) {
return (VerifyExternalSyncSettings) super.setAccessToken(accessToken);
}
@Override
public VerifyExternalSyncSettings setAlt(java.lang.String alt) {
return (VerifyExternalSyncSettings) super.setAlt(alt);
}
@Override
public VerifyExternalSyncSettings setCallback(java.lang.String callback) {
return (VerifyExternalSyncSettings) super.setCallback(callback);
}
@Override
public VerifyExternalSyncSettings setFields(java.lang.String fields) {
return (VerifyExternalSyncSettings) super.setFields(fields);
}
@Override
public VerifyExternalSyncSettings setKey(java.lang.String key) {
return (VerifyExternalSyncSettings) super.setKey(key);
}
@Override
public VerifyExternalSyncSettings setOauthToken(java.lang.String oauthToken) {
return (VerifyExternalSyncSettings) super.setOauthToken(oauthToken);
}
@Override
public VerifyExternalSyncSettings setPrettyPrint(java.lang.Boolean prettyPrint) {
return (VerifyExternalSyncSettings) super.setPrettyPrint(prettyPrint);
}
@Override
public VerifyExternalSyncSettings setQuotaUser(java.lang.String quotaUser) {
return (VerifyExternalSyncSettings) super.setQuotaUser(quotaUser);
}
@Override
public VerifyExternalSyncSettings setUploadType(java.lang.String uploadType) {
return (VerifyExternalSyncSettings) super.setUploadType(uploadType);
}
@Override
public VerifyExternalSyncSettings setUploadProtocol(java.lang.String uploadProtocol) {
return (VerifyExternalSyncSettings) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public VerifyExternalSyncSettings setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public VerifyExternalSyncSettings setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** External sync mode */
@com.google.api.client.util.Key
private java.lang.String syncMode;
/** External sync mode
*/
public java.lang.String getSyncMode() {
return syncMode;
}
/** External sync mode */
public VerifyExternalSyncSettings setSyncMode(java.lang.String syncMode) {
this.syncMode = syncMode;
return this;
}
/** Flag to enable verifying connection only */
@com.google.api.client.util.Key
private java.lang.Boolean verifyConnectionOnly;
/** Flag to enable verifying connection only
*/
public java.lang.Boolean getVerifyConnectionOnly() {
return verifyConnectionOnly;
}
/** Flag to enable verifying connection only */
public VerifyExternalSyncSettings setVerifyConnectionOnly(java.lang.Boolean verifyConnectionOnly) {
this.verifyConnectionOnly = verifyConnectionOnly;
return this;
}
@Override
public VerifyExternalSyncSettings set(String parameterName, Object value) {
return (VerifyExternalSyncSettings) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the SslCerts collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code SQLAdmin sqladmin = new SQLAdmin(...);}
* {@code SQLAdmin.SslCerts.List request = sqladmin.sslCerts().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public SslCerts sslCerts() {
return new SslCerts();
}
/**
* The "sslCerts" collection of methods.
*/
public class SslCerts {
/**
* Generates a short-lived X509 certificate containing the provided public key and signed by a
* private key specific to the target instance. Users may use the certificate to authenticate as
* themselves when connecting to the database.
*
* Create a request for the method "sslCerts.createEphemeral".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link CreateEphemeral#execute()} method to invoke the remote operation.
*
* @param project Project ID of the Cloud SQL project.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.SslCertsCreateEphemeralRequest}
* @return the request
*/
public CreateEphemeral createEphemeral(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.SslCertsCreateEphemeralRequest content) throws java.io.IOException {
CreateEphemeral result = new CreateEphemeral(project, instance, content);
initialize(result);
return result;
}
public class CreateEphemeral extends SQLAdminRequest<com.google.api.services.sqladmin.model.SslCert> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/createEphemeral";
/**
* Generates a short-lived X509 certificate containing the provided public key and signed by a
* private key specific to the target instance. Users may use the certificate to authenticate as
* themselves when connecting to the database.
*
* Create a request for the method "sslCerts.createEphemeral".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link CreateEphemeral#execute()} method to invoke the remote
* operation. <p> {@link CreateEphemeral#initialize(com.google.api.client.googleapis.services.Abst
* ractGoogleClientRequest)} must be called to initialize this instance immediately after invoking
* the constructor. </p>
*
* @param project Project ID of the Cloud SQL project.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.SslCertsCreateEphemeralRequest}
* @since 1.13
*/
protected CreateEphemeral(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.SslCertsCreateEphemeralRequest content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.SslCert.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public CreateEphemeral set$Xgafv(java.lang.String $Xgafv) {
return (CreateEphemeral) super.set$Xgafv($Xgafv);
}
@Override
public CreateEphemeral setAccessToken(java.lang.String accessToken) {
return (CreateEphemeral) super.setAccessToken(accessToken);
}
@Override
public CreateEphemeral setAlt(java.lang.String alt) {
return (CreateEphemeral) super.setAlt(alt);
}
@Override
public CreateEphemeral setCallback(java.lang.String callback) {
return (CreateEphemeral) super.setCallback(callback);
}
@Override
public CreateEphemeral setFields(java.lang.String fields) {
return (CreateEphemeral) super.setFields(fields);
}
@Override
public CreateEphemeral setKey(java.lang.String key) {
return (CreateEphemeral) super.setKey(key);
}
@Override
public CreateEphemeral setOauthToken(java.lang.String oauthToken) {
return (CreateEphemeral) super.setOauthToken(oauthToken);
}
@Override
public CreateEphemeral setPrettyPrint(java.lang.Boolean prettyPrint) {
return (CreateEphemeral) super.setPrettyPrint(prettyPrint);
}
@Override
public CreateEphemeral setQuotaUser(java.lang.String quotaUser) {
return (CreateEphemeral) super.setQuotaUser(quotaUser);
}
@Override
public CreateEphemeral setUploadType(java.lang.String uploadType) {
return (CreateEphemeral) super.setUploadType(uploadType);
}
@Override
public CreateEphemeral setUploadProtocol(java.lang.String uploadProtocol) {
return (CreateEphemeral) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the Cloud SQL project. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the Cloud SQL project.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the Cloud SQL project. */
public CreateEphemeral setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public CreateEphemeral setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public CreateEphemeral set(String parameterName, Object value) {
return (CreateEphemeral) super.set(parameterName, value);
}
}
/**
* Deletes the SSL certificate. For First Generation instances, the certificate remains valid until
* the instance is restarted.
*
* Create a request for the method "sslCerts.delete".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param sha1Fingerprint Sha1 FingerPrint.
* @return the request
*/
public Delete delete(java.lang.String project, java.lang.String instance, java.lang.String sha1Fingerprint) throws java.io.IOException {
Delete result = new Delete(project, instance, sha1Fingerprint);
initialize(result);
return result;
}
public class Delete extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}";
/**
* Deletes the SSL certificate. For First Generation instances, the certificate remains valid
* until the instance is restarted.
*
* Create a request for the method "sslCerts.delete".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param sha1Fingerprint Sha1 FingerPrint.
* @since 1.13
*/
protected Delete(java.lang.String project, java.lang.String instance, java.lang.String sha1Fingerprint) {
super(SQLAdmin.this, "DELETE", REST_PATH, null, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
this.sha1Fingerprint = com.google.api.client.util.Preconditions.checkNotNull(sha1Fingerprint, "Required parameter sha1Fingerprint must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Delete setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public Delete setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** Sha1 FingerPrint. */
@com.google.api.client.util.Key
private java.lang.String sha1Fingerprint;
/** Sha1 FingerPrint.
*/
public java.lang.String getSha1Fingerprint() {
return sha1Fingerprint;
}
/** Sha1 FingerPrint. */
public Delete setSha1Fingerprint(java.lang.String sha1Fingerprint) {
this.sha1Fingerprint = sha1Fingerprint;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Retrieves a particular SSL certificate. Does not include the private key (required for usage).
* The private key must be saved from the response to initial creation.
*
* Create a request for the method "sslCerts.get".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param sha1Fingerprint Sha1 FingerPrint.
* @return the request
*/
public Get get(java.lang.String project, java.lang.String instance, java.lang.String sha1Fingerprint) throws java.io.IOException {
Get result = new Get(project, instance, sha1Fingerprint);
initialize(result);
return result;
}
public class Get extends SQLAdminRequest<com.google.api.services.sqladmin.model.SslCert> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/sslCerts/{sha1Fingerprint}";
/**
* Retrieves a particular SSL certificate. Does not include the private key (required for usage).
* The private key must be saved from the response to initial creation.
*
* Create a request for the method "sslCerts.get".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param sha1Fingerprint Sha1 FingerPrint.
* @since 1.13
*/
protected Get(java.lang.String project, java.lang.String instance, java.lang.String sha1Fingerprint) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.SslCert.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
this.sha1Fingerprint = com.google.api.client.util.Preconditions.checkNotNull(sha1Fingerprint, "Required parameter sha1Fingerprint must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Get setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public Get setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** Sha1 FingerPrint. */
@com.google.api.client.util.Key
private java.lang.String sha1Fingerprint;
/** Sha1 FingerPrint.
*/
public java.lang.String getSha1Fingerprint() {
return sha1Fingerprint;
}
/** Sha1 FingerPrint. */
public Get setSha1Fingerprint(java.lang.String sha1Fingerprint) {
this.sha1Fingerprint = sha1Fingerprint;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Creates an SSL certificate and returns it along with the private key and server certificate
* authority. The new certificate will not be usable until the instance is restarted.
*
* Create a request for the method "sslCerts.insert".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Insert#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.SslCertsInsertRequest}
* @return the request
*/
public Insert insert(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.SslCertsInsertRequest content) throws java.io.IOException {
Insert result = new Insert(project, instance, content);
initialize(result);
return result;
}
public class Insert extends SQLAdminRequest<com.google.api.services.sqladmin.model.SslCertsInsertResponse> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/sslCerts";
/**
* Creates an SSL certificate and returns it along with the private key and server certificate
* authority. The new certificate will not be usable until the instance is restarted.
*
* Create a request for the method "sslCerts.insert".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Insert#execute()} method to invoke the remote operation.
* <p> {@link
* Insert#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.SslCertsInsertRequest}
* @since 1.13
*/
protected Insert(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.SslCertsInsertRequest content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.SslCertsInsertResponse.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public Insert set$Xgafv(java.lang.String $Xgafv) {
return (Insert) super.set$Xgafv($Xgafv);
}
@Override
public Insert setAccessToken(java.lang.String accessToken) {
return (Insert) super.setAccessToken(accessToken);
}
@Override
public Insert setAlt(java.lang.String alt) {
return (Insert) super.setAlt(alt);
}
@Override
public Insert setCallback(java.lang.String callback) {
return (Insert) super.setCallback(callback);
}
@Override
public Insert setFields(java.lang.String fields) {
return (Insert) super.setFields(fields);
}
@Override
public Insert setKey(java.lang.String key) {
return (Insert) super.setKey(key);
}
@Override
public Insert setOauthToken(java.lang.String oauthToken) {
return (Insert) super.setOauthToken(oauthToken);
}
@Override
public Insert setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Insert) super.setPrettyPrint(prettyPrint);
}
@Override
public Insert setQuotaUser(java.lang.String quotaUser) {
return (Insert) super.setQuotaUser(quotaUser);
}
@Override
public Insert setUploadType(java.lang.String uploadType) {
return (Insert) super.setUploadType(uploadType);
}
@Override
public Insert setUploadProtocol(java.lang.String uploadProtocol) {
return (Insert) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Insert setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public Insert setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public Insert set(String parameterName, Object value) {
return (Insert) super.set(parameterName, value);
}
}
/**
* Lists all of the current SSL certificates for the instance.
*
* Create a request for the method "sslCerts.list".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @return the request
*/
public List list(java.lang.String project, java.lang.String instance) throws java.io.IOException {
List result = new List(project, instance);
initialize(result);
return result;
}
public class List extends SQLAdminRequest<com.google.api.services.sqladmin.model.SslCertsListResponse> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/sslCerts";
/**
* Lists all of the current SSL certificates for the instance.
*
* Create a request for the method "sslCerts.list".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Cloud SQL instance ID. This does not include the project ID.
* @since 1.13
*/
protected List(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.SslCertsListResponse.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public List setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Cloud SQL instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Cloud SQL instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Cloud SQL instance ID. This does not include the project ID. */
public List setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Tiers collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code SQLAdmin sqladmin = new SQLAdmin(...);}
* {@code SQLAdmin.Tiers.List request = sqladmin.tiers().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Tiers tiers() {
return new Tiers();
}
/**
* The "tiers" collection of methods.
*/
public class Tiers {
/**
* Lists all available machine types (tiers) for Cloud SQL, for example, db-custom-1-3840. For
* related information, see Pricing.
*
* Create a request for the method "tiers.list".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project for which to list tiers.
* @return the request
*/
public List list(java.lang.String project) throws java.io.IOException {
List result = new List(project);
initialize(result);
return result;
}
public class List extends SQLAdminRequest<com.google.api.services.sqladmin.model.TiersListResponse> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/tiers";
/**
* Lists all available machine types (tiers) for Cloud SQL, for example, db-custom-1-3840. For
* related information, see Pricing.
*
* Create a request for the method "tiers.list".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project for which to list tiers.
* @since 1.13
*/
protected List(java.lang.String project) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.TiersListResponse.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project for which to list tiers. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project for which to list tiers.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project for which to list tiers. */
public List setProject(java.lang.String project) {
this.project = project;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Users collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code SQLAdmin sqladmin = new SQLAdmin(...);}
* {@code SQLAdmin.Users.List request = sqladmin.users().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Users users() {
return new Users();
}
/**
* The "users" collection of methods.
*/
public class Users {
/**
* Deletes a user from a Cloud SQL instance.
*
* Create a request for the method "users.delete".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @return the request
*/
public Delete delete(java.lang.String project, java.lang.String instance) throws java.io.IOException {
Delete result = new Delete(project, instance);
initialize(result);
return result;
}
public class Delete extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/users";
/**
* Deletes a user from a Cloud SQL instance.
*
* Create a request for the method "users.delete".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @since 1.13
*/
protected Delete(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "DELETE", REST_PATH, null, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Delete setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Database instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Database instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Database instance ID. This does not include the project ID. */
public Delete setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** Host of the user in the instance. */
@com.google.api.client.util.Key
private java.lang.String host;
/** Host of the user in the instance.
*/
public java.lang.String getHost() {
return host;
}
/** Host of the user in the instance. */
public Delete setHost(java.lang.String host) {
this.host = host;
return this;
}
/** Name of the user in the instance. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the user in the instance.
*/
public java.lang.String getName() {
return name;
}
/** Name of the user in the instance. */
public Delete setName(java.lang.String name) {
this.name = name;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Creates a new user in a Cloud SQL instance.
*
* Create a request for the method "users.insert".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Insert#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.User}
* @return the request
*/
public Insert insert(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.User content) throws java.io.IOException {
Insert result = new Insert(project, instance, content);
initialize(result);
return result;
}
public class Insert extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/users";
/**
* Creates a new user in a Cloud SQL instance.
*
* Create a request for the method "users.insert".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Insert#execute()} method to invoke the remote operation.
* <p> {@link
* Insert#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.User}
* @since 1.13
*/
protected Insert(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.User content) {
super(SQLAdmin.this, "POST", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public Insert set$Xgafv(java.lang.String $Xgafv) {
return (Insert) super.set$Xgafv($Xgafv);
}
@Override
public Insert setAccessToken(java.lang.String accessToken) {
return (Insert) super.setAccessToken(accessToken);
}
@Override
public Insert setAlt(java.lang.String alt) {
return (Insert) super.setAlt(alt);
}
@Override
public Insert setCallback(java.lang.String callback) {
return (Insert) super.setCallback(callback);
}
@Override
public Insert setFields(java.lang.String fields) {
return (Insert) super.setFields(fields);
}
@Override
public Insert setKey(java.lang.String key) {
return (Insert) super.setKey(key);
}
@Override
public Insert setOauthToken(java.lang.String oauthToken) {
return (Insert) super.setOauthToken(oauthToken);
}
@Override
public Insert setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Insert) super.setPrettyPrint(prettyPrint);
}
@Override
public Insert setQuotaUser(java.lang.String quotaUser) {
return (Insert) super.setQuotaUser(quotaUser);
}
@Override
public Insert setUploadType(java.lang.String uploadType) {
return (Insert) super.setUploadType(uploadType);
}
@Override
public Insert setUploadProtocol(java.lang.String uploadProtocol) {
return (Insert) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Insert setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Database instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Database instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Database instance ID. This does not include the project ID. */
public Insert setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public Insert set(String parameterName, Object value) {
return (Insert) super.set(parameterName, value);
}
}
/**
* Lists users in the specified Cloud SQL instance.
*
* Create a request for the method "users.list".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @return the request
*/
public List list(java.lang.String project, java.lang.String instance) throws java.io.IOException {
List result = new List(project, instance);
initialize(result);
return result;
}
public class List extends SQLAdminRequest<com.google.api.services.sqladmin.model.UsersListResponse> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/users";
/**
* Lists users in the specified Cloud SQL instance.
*
* Create a request for the method "users.list".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @since 1.13
*/
protected List(java.lang.String project, java.lang.String instance) {
super(SQLAdmin.this, "GET", REST_PATH, null, com.google.api.services.sqladmin.model.UsersListResponse.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public List setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Database instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Database instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Database instance ID. This does not include the project ID. */
public List setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates an existing user in a Cloud SQL instance.
*
* Create a request for the method "users.update".
*
* This request holds the parameters needed by the sqladmin server. After setting any optional
* parameters, call the {@link Update#execute()} method to invoke the remote operation.
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.User}
* @return the request
*/
public Update update(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.User content) throws java.io.IOException {
Update result = new Update(project, instance, content);
initialize(result);
return result;
}
public class Update extends SQLAdminRequest<com.google.api.services.sqladmin.model.Operation> {
private static final String REST_PATH = "sql/v1beta4/projects/{project}/instances/{instance}/users";
/**
* Updates an existing user in a Cloud SQL instance.
*
* Create a request for the method "users.update".
*
* This request holds the parameters needed by the the sqladmin server. After setting any
* optional parameters, call the {@link Update#execute()} method to invoke the remote operation.
* <p> {@link
* Update#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project Project ID of the project that contains the instance.
* @param instance Database instance ID. This does not include the project ID.
* @param content the {@link com.google.api.services.sqladmin.model.User}
* @since 1.13
*/
protected Update(java.lang.String project, java.lang.String instance, com.google.api.services.sqladmin.model.User content) {
super(SQLAdmin.this, "PUT", REST_PATH, content, com.google.api.services.sqladmin.model.Operation.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
this.instance = com.google.api.client.util.Preconditions.checkNotNull(instance, "Required parameter instance must be specified.");
}
@Override
public Update set$Xgafv(java.lang.String $Xgafv) {
return (Update) super.set$Xgafv($Xgafv);
}
@Override
public Update setAccessToken(java.lang.String accessToken) {
return (Update) super.setAccessToken(accessToken);
}
@Override
public Update setAlt(java.lang.String alt) {
return (Update) super.setAlt(alt);
}
@Override
public Update setCallback(java.lang.String callback) {
return (Update) super.setCallback(callback);
}
@Override
public Update setFields(java.lang.String fields) {
return (Update) super.setFields(fields);
}
@Override
public Update setKey(java.lang.String key) {
return (Update) super.setKey(key);
}
@Override
public Update setOauthToken(java.lang.String oauthToken) {
return (Update) super.setOauthToken(oauthToken);
}
@Override
public Update setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Update) super.setPrettyPrint(prettyPrint);
}
@Override
public Update setQuotaUser(java.lang.String quotaUser) {
return (Update) super.setQuotaUser(quotaUser);
}
@Override
public Update setUploadType(java.lang.String uploadType) {
return (Update) super.setUploadType(uploadType);
}
@Override
public Update setUploadProtocol(java.lang.String uploadProtocol) {
return (Update) super.setUploadProtocol(uploadProtocol);
}
/** Project ID of the project that contains the instance. */
@com.google.api.client.util.Key
private java.lang.String project;
/** Project ID of the project that contains the instance.
*/
public java.lang.String getProject() {
return project;
}
/** Project ID of the project that contains the instance. */
public Update setProject(java.lang.String project) {
this.project = project;
return this;
}
/** Database instance ID. This does not include the project ID. */
@com.google.api.client.util.Key
private java.lang.String instance;
/** Database instance ID. This does not include the project ID.
*/
public java.lang.String getInstance() {
return instance;
}
/** Database instance ID. This does not include the project ID. */
public Update setInstance(java.lang.String instance) {
this.instance = instance;
return this;
}
/** Optional. Host of the user in the instance. */
@com.google.api.client.util.Key
private java.lang.String host;
/** Optional. Host of the user in the instance.
*/
public java.lang.String getHost() {
return host;
}
/** Optional. Host of the user in the instance. */
public Update setHost(java.lang.String host) {
this.host = host;
return this;
}
/** Name of the user in the instance. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Name of the user in the instance.
*/
public java.lang.String getName() {
return name;
}
/** Name of the user in the instance. */
public Update setName(java.lang.String name) {
this.name = name;
return this;
}
@Override
public Update set(String parameterName, Object value) {
return (Update) super.set(parameterName, value);
}
}
}
/**
* Builder for {@link SQLAdmin}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) {
// If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint.
// If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS.
// Use the regular endpoint for all other cases.
String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT");
useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint;
if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) {
return DEFAULT_MTLS_ROOT_URL;
}
return DEFAULT_ROOT_URL;
}
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
Builder.chooseEndpoint(transport),
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link SQLAdmin}. */
@Override
public SQLAdmin build() {
return new SQLAdmin(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link SQLAdminRequestInitializer}.
*
* @since 1.12
*/
public Builder setSQLAdminRequestInitializer(
SQLAdminRequestInitializer sqladminRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(sqladminRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
| [
"\"GOOGLE_API_USE_MTLS_ENDPOINT\""
]
| []
| [
"GOOGLE_API_USE_MTLS_ENDPOINT"
]
| [] | ["GOOGLE_API_USE_MTLS_ENDPOINT"] | java | 1 | 0 | |
mesonbuild/cmake/interpreter.py | # Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
from .common import CMakeException
from .client import CMakeClient, RequestCMakeInputs, RequestConfigure, RequestCompute, RequestCodeModel, CMakeTarget
from .executor import CMakeExecutor
from .traceparser import CMakeTraceParser, CMakeGeneratorTarget
from .. import mlog
from ..environment import Environment
from ..mesonlib import MachineChoice
from ..compilers.compilers import lang_suffixes, header_suffixes, obj_suffixes, lib_suffixes, is_header
from subprocess import Popen, PIPE
from typing import Any, List, Dict, Optional, TYPE_CHECKING
from threading import Thread
import os, re
from ..mparser import (
Token,
BaseNode,
CodeBlockNode,
FunctionNode,
ArrayNode,
ArgumentNode,
AssignmentNode,
BooleanNode,
StringNode,
IdNode,
IndexNode,
MethodNode,
NumberNode,
)
if TYPE_CHECKING:
from ..build import Build
from ..backend.backends import Backend
backend_generator_map = {
'ninja': 'Ninja',
'xcode': 'Xcode',
'vs2010': 'Visual Studio 10 2010',
'vs2015': 'Visual Studio 15 2017',
'vs2017': 'Visual Studio 15 2017',
'vs2019': 'Visual Studio 16 2019',
}
language_map = {
'c': 'C',
'cpp': 'CXX',
'cuda': 'CUDA',
'cs': 'CSharp',
'java': 'Java',
'fortran': 'Fortran',
'swift': 'Swift',
}
target_type_map = {
'STATIC_LIBRARY': 'static_library',
'MODULE_LIBRARY': 'shared_module',
'SHARED_LIBRARY': 'shared_library',
'EXECUTABLE': 'executable',
'OBJECT_LIBRARY': 'static_library',
'INTERFACE_LIBRARY': 'header_only'
}
target_type_requires_trace = ['INTERFACE_LIBRARY']
skip_targets = ['UTILITY']
blacklist_compiler_flags = [
'/W1', '/W2', '/W3', '/W4', '/Wall',
'/O1', '/O2', '/Ob', '/Od', '/Og', '/Oi', '/Os', '/Ot', '/Ox', '/Oy', '/Ob0',
'/RTC1', '/RTCc', '/RTCs', '/RTCu',
'/Z7', '/Zi', '/ZI',
]
blacklist_link_flags = [
'/machine:x64', '/machine:x86', '/machine:arm', '/machine:ebc',
'/debug', '/debug:fastlink', '/debug:full', '/debug:none',
'/incremental',
]
blacklist_clang_cl_link_flags = ['/GR', '/EHsc', '/MDd', '/Zi', '/RTC1']
blacklist_link_libs = [
'kernel32.lib',
'user32.lib',
'gdi32.lib',
'winspool.lib',
'shell32.lib',
'ole32.lib',
'oleaut32.lib',
'uuid.lib',
'comdlg32.lib',
'advapi32.lib'
]
# Utility functions to generate local keys
def _target_key(tgt_name: str) -> str:
return '__tgt_{}__'.format(tgt_name)
def _generated_file_key(fname: str) -> str:
return '__gen_{}__'.format(os.path.basename(fname))
class ConverterTarget:
lang_cmake_to_meson = {val.lower(): key for key, val in language_map.items()}
rm_so_version = re.compile(r'(\.[0-9]+)+$')
def __init__(self, target: CMakeTarget, env: Environment):
self.env = env
self.artifacts = target.artifacts
self.src_dir = target.src_dir
self.build_dir = target.build_dir
self.name = target.name
self.full_name = target.full_name
self.type = target.type
self.install = target.install
self.install_dir = ''
self.link_libraries = target.link_libraries
self.link_flags = target.link_flags + target.link_lang_flags
if target.install_paths:
self.install_dir = target.install_paths[0]
self.languages = []
self.sources = []
self.generated = []
self.includes = []
self.link_with = []
self.object_libs = []
self.compile_opts = {}
self.public_compile_opts = []
self.pie = False
# Project default override options (c_std, cpp_std, etc.)
self.override_options = []
for i in target.files:
# Determine the meson language
lang = ConverterTarget.lang_cmake_to_meson.get(i.language.lower(), 'c')
if lang not in self.languages:
self.languages += [lang]
if lang not in self.compile_opts:
self.compile_opts[lang] = []
# Add arguments, but avoid duplicates
args = i.flags
args += ['-D{}'.format(x) for x in i.defines]
self.compile_opts[lang] += [x for x in args if x not in self.compile_opts[lang]]
# Handle include directories
self.includes += [x for x in i.includes if x not in self.includes]
# Add sources to the right array
if i.is_generated:
self.generated += i.sources
else:
self.sources += i.sources
def __repr__(self) -> str:
return '<{}: {}>'.format(self.__class__.__name__, self.name)
std_regex = re.compile(r'([-]{1,2}std=|/std:v?|[-]{1,2}std:)(.*)')
def postprocess(self, output_target_map: dict, root_src_dir: str, subdir: str, install_prefix: str, trace: CMakeTraceParser) -> None:
# Detect setting the C and C++ standard
for i in ['c', 'cpp']:
if i not in self.compile_opts:
continue
temp = []
for j in self.compile_opts[i]:
m = ConverterTarget.std_regex.match(j)
if m:
self.override_options += ['{}_std={}'.format(i, m.group(2))]
elif j in ['-fPIC', '-fpic', '-fPIE', '-fpie']:
self.pie = True
elif j in blacklist_compiler_flags:
pass
else:
temp += [j]
self.compile_opts[i] = temp
# Make sure to force enable -fPIC for OBJECT libraries
if self.type.upper() == 'OBJECT_LIBRARY':
self.pie = True
# Use the CMake trace, if required
if self.type.upper() in target_type_requires_trace:
if self.name in trace.targets:
props = trace.targets[self.name].properties
self.includes += props.get('INTERFACE_INCLUDE_DIRECTORIES', [])
self.public_compile_opts += props.get('INTERFACE_COMPILE_DEFINITIONS', [])
self.public_compile_opts += props.get('INTERFACE_COMPILE_OPTIONS', [])
self.link_flags += props.get('INTERFACE_LINK_OPTIONS', [])
else:
mlog.warning('CMake: Target', mlog.bold(self.name), 'not found in CMake trace. This can lead to build errors')
# Fix link libraries
def try_resolve_link_with(path: str) -> Optional[str]:
basename = os.path.basename(path)
candidates = [basename, ConverterTarget.rm_so_version.sub('', basename)]
for i in lib_suffixes:
if not basename.endswith('.' + i):
continue
new_basename = basename[:-len(i) - 1]
new_basename = ConverterTarget.rm_so_version.sub('', new_basename)
new_basename = '{}.{}'.format(new_basename, i)
candidates += [new_basename]
for i in candidates:
if i in output_target_map:
return output_target_map[i]
return None
temp = []
for i in self.link_libraries:
# Let meson handle this arcane magic
if ',-rpath,' in i:
continue
if not os.path.isabs(i):
link_with = try_resolve_link_with(i)
if link_with:
self.link_with += [link_with]
continue
temp += [i]
self.link_libraries = temp
# Filter out files that are not supported by the language
supported = list(header_suffixes) + list(obj_suffixes)
for i in self.languages:
supported += list(lang_suffixes[i])
supported = ['.{}'.format(x) for x in supported]
self.sources = [x for x in self.sources if any([x.endswith(y) for y in supported])]
self.generated = [x for x in self.generated if any([x.endswith(y) for y in supported])]
# Make paths relative
def rel_path(x: str, is_header: bool, is_generated: bool) -> Optional[str]:
if not os.path.isabs(x):
x = os.path.normpath(os.path.join(self.src_dir, x))
if not os.path.exists(x) and not any([x.endswith(y) for y in obj_suffixes]) and not is_generated:
mlog.warning('CMake: path', mlog.bold(x), 'does not exist. Ignoring. This can lead to build errors')
return None
if os.path.isabs(x) and os.path.commonpath([x, self.env.get_build_dir()]) == self.env.get_build_dir():
if is_header:
return os.path.relpath(x, os.path.join(self.env.get_build_dir(), subdir))
else:
return os.path.relpath(x, root_src_dir)
if os.path.isabs(x) and os.path.commonpath([x, root_src_dir]) == root_src_dir:
return os.path.relpath(x, root_src_dir)
return x
def custom_target(x: str):
key = _generated_file_key(x)
if key in output_target_map:
ctgt = output_target_map[key]
assert(isinstance(ctgt, ConverterCustomTarget))
ref = ctgt.get_ref(x)
assert(isinstance(ref, CustomTargetReference) and ref.valid())
return ref
return x
build_dir_rel = os.path.relpath(self.build_dir, os.path.join(self.env.get_build_dir(), subdir))
self.includes = list(set([rel_path(x, True, False) for x in set(self.includes)] + [build_dir_rel]))
self.sources = [rel_path(x, False, False) for x in self.sources]
self.generated = [rel_path(x, False, True) for x in self.generated]
# Resolve custom targets
self.generated = [custom_target(x) for x in self.generated]
# Remove delete entries
self.includes = [x for x in self.includes if x is not None]
self.sources = [x for x in self.sources if x is not None]
self.generated = [x for x in self.generated if x is not None]
# Make sure '.' is always in the include directories
if '.' not in self.includes:
self.includes += ['.']
# make install dir relative to the install prefix
if self.install_dir and os.path.isabs(self.install_dir):
if os.path.commonpath([self.install_dir, install_prefix]) == install_prefix:
self.install_dir = os.path.relpath(self.install_dir, install_prefix)
# Remove blacklisted options and libs
def check_flag(flag: str) -> bool:
if flag.lower() in blacklist_link_flags or flag in blacklist_compiler_flags + blacklist_clang_cl_link_flags:
return False
if flag.startswith('/D'):
return False
return True
self.link_libraries = [x for x in self.link_libraries if x.lower() not in blacklist_link_libs]
self.link_flags = [x for x in self.link_flags if check_flag(x)]
def process_object_libs(self, obj_target_list: List['ConverterTarget']):
# Try to detect the object library(s) from the generated input sources
temp = [x for x in self.generated if isinstance(x, str)]
temp = [os.path.basename(x) for x in temp]
temp = [x for x in temp if any([x.endswith('.' + y) for y in obj_suffixes])]
temp = [os.path.splitext(x)[0] for x in temp]
# Temp now stores the source filenames of the object files
for i in obj_target_list:
source_files = [os.path.basename(x) for x in i.sources + i.generated]
for j in source_files:
if j in temp:
self.object_libs += [i]
break
# Filter out object files from the sources
self.generated = [x for x in self.generated if not isinstance(x, str) or not any([x.endswith('.' + y) for y in obj_suffixes])]
def meson_func(self) -> str:
return target_type_map.get(self.type.upper())
def log(self) -> None:
mlog.log('Target', mlog.bold(self.name))
mlog.log(' -- artifacts: ', mlog.bold(str(self.artifacts)))
mlog.log(' -- full_name: ', mlog.bold(self.full_name))
mlog.log(' -- type: ', mlog.bold(self.type))
mlog.log(' -- install: ', mlog.bold('true' if self.install else 'false'))
mlog.log(' -- install_dir: ', mlog.bold(self.install_dir))
mlog.log(' -- link_libraries: ', mlog.bold(str(self.link_libraries)))
mlog.log(' -- link_with: ', mlog.bold(str(self.link_with)))
mlog.log(' -- object_libs: ', mlog.bold(str(self.object_libs)))
mlog.log(' -- link_flags: ', mlog.bold(str(self.link_flags)))
mlog.log(' -- languages: ', mlog.bold(str(self.languages)))
mlog.log(' -- includes: ', mlog.bold(str(self.includes)))
mlog.log(' -- sources: ', mlog.bold(str(self.sources)))
mlog.log(' -- generated: ', mlog.bold(str(self.generated)))
mlog.log(' -- pie: ', mlog.bold('true' if self.pie else 'false'))
mlog.log(' -- override_opts: ', mlog.bold(str(self.override_options)))
mlog.log(' -- options:')
for key, val in self.compile_opts.items():
mlog.log(' -', key, '=', mlog.bold(str(val)))
class CustomTargetReference:
def __init__(self, ctgt: 'ConverterCustomTarget', index: int):
self.ctgt = ctgt # type: ConverterCustomTarget
self.index = index # type: int
def __repr__(self) -> str:
if self.valid():
return '<{}: {} [{}]>'.format(self.__class__.__name__, self.ctgt.name, self.ctgt.outputs[self.index])
else:
return '<{}: INVALID REFERENCE>'.format(self.__class__.__name__)
def valid(self) -> bool:
return self.ctgt is not None and self.index >= 0
def filename(self) -> str:
return self.ctgt.outputs[self.index]
class ConverterCustomTarget:
tgt_counter = 0 # type: int
def __init__(self, target: CMakeGeneratorTarget):
self.name = 'custom_tgt_{}'.format(ConverterCustomTarget.tgt_counter)
self.original_outputs = list(target.outputs)
self.outputs = [os.path.basename(x) for x in self.original_outputs]
self.command = target.command
self.working_dir = target.working_dir
self.depends_raw = target.depends
self.inputs = []
self.depends = []
ConverterCustomTarget.tgt_counter += 1
def __repr__(self) -> str:
return '<{}: {}>'.format(self.__class__.__name__, self.outputs)
def postprocess(self, output_target_map: dict, root_src_dir: str, subdir: str, build_dir: str) -> None:
# Default the working directory to the CMake build dir. This
# is not 100% correct, since it should be the value of
# ${CMAKE_CURRENT_BINARY_DIR} when add_custom_command is
# called. However, keeping track of this variable is not
# trivial and the current solution should work in most cases.
if not self.working_dir:
self.working_dir = build_dir
# relative paths in the working directory are always relative
# to ${CMAKE_CURRENT_BINARY_DIR} (see note above)
if not os.path.isabs(self.working_dir):
self.working_dir = os.path.normpath(os.path.join(build_dir, self.working_dir))
# Modify the original outputs if they are relative. Again,
# relative paths are relative to ${CMAKE_CURRENT_BINARY_DIR}
# and the first disclaimer is stil in effect
def ensure_absolute(x: str):
if os.path.isabs(x):
return x
else:
return os.path.normpath(os.path.join(build_dir, x))
self.original_outputs = [ensure_absolute(x) for x in self.original_outputs]
# Check if the command is a build target
commands = []
for i in self.command:
assert(isinstance(i, list))
cmd = []
for j in i:
target_key = _target_key(j)
if target_key in output_target_map:
cmd += [output_target_map[target_key]]
else:
cmd += [j]
commands += [cmd]
self.command = commands
# Check dependencies and input files
for i in self.depends_raw:
tgt_key = _target_key(i)
gen_key = _generated_file_key(i)
if os.path.basename(i) in output_target_map:
self.depends += [output_target_map[os.path.basename(i)]]
elif tgt_key in output_target_map:
self.depends += [output_target_map[tgt_key]]
elif gen_key in output_target_map:
self.inputs += [output_target_map[gen_key].get_ref(i)]
elif not os.path.isabs(i) and os.path.exists(os.path.join(root_src_dir, i)):
self.inputs += [i]
elif os.path.isabs(i) and os.path.exists(i) and os.path.commonpath([i, root_src_dir]) == root_src_dir:
self.inputs += [os.path.relpath(i, root_src_dir)]
def get_ref(self, fname: str) -> Optional[CustomTargetReference]:
try:
idx = self.outputs.index(os.path.basename(fname))
return CustomTargetReference(self, idx)
except ValueError:
return None
def log(self) -> None:
mlog.log('Custom Target', mlog.bold(self.name))
mlog.log(' -- command: ', mlog.bold(str(self.command)))
mlog.log(' -- outputs: ', mlog.bold(str(self.outputs)))
mlog.log(' -- working_dir: ', mlog.bold(str(self.working_dir)))
mlog.log(' -- depends_raw: ', mlog.bold(str(self.depends_raw)))
mlog.log(' -- inputs: ', mlog.bold(str(self.inputs)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
class CMakeInterpreter:
def __init__(self, build: 'Build', subdir: str, src_dir: str, install_prefix: str, env: Environment, backend: 'Backend'):
assert(hasattr(backend, 'name'))
self.build = build
self.subdir = subdir
self.src_dir = src_dir
self.build_dir_rel = os.path.join(subdir, '__CMake_build')
self.build_dir = os.path.join(env.get_build_dir(), self.build_dir_rel)
self.install_prefix = install_prefix
self.env = env
self.backend_name = backend.name
self.client = CMakeClient(self.env)
# Raw CMake results
self.bs_files = []
self.codemodel = None
self.raw_trace = None
# Analysed data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = [] # type: List[ConverterCustomTarget]
self.trace = CMakeTraceParser()
# Generated meson data
self.generated_targets = {}
def configure(self, extra_cmake_options: List[str]) -> None:
for_machine = MachineChoice.HOST # TODO make parameter
# Find CMake
cmake_exe = CMakeExecutor(self.env, '>=3.7', for_machine)
if not cmake_exe.found():
raise CMakeException('Unable to find CMake')
generator = backend_generator_map[self.backend_name]
cmake_args = cmake_exe.get_command()
# Map meson compiler to CMake variables
for lang, comp in self.env.coredata.compilers[for_machine].items():
if lang not in language_map:
continue
cmake_lang = language_map[lang]
exelist = comp.get_exelist()
if len(exelist) == 1:
cmake_args += ['-DCMAKE_{}_COMPILER={}'.format(cmake_lang, exelist[0])]
elif len(exelist) == 2:
cmake_args += ['-DCMAKE_{}_COMPILER_LAUNCHER={}'.format(cmake_lang, exelist[0]),
'-DCMAKE_{}_COMPILER={}'.format(cmake_lang, exelist[1])]
if hasattr(comp, 'get_linker_exelist') and comp.get_id() == 'clang-cl':
cmake_args += ['-DCMAKE_LINKER={}'.format(comp.get_linker_exelist()[0])]
cmake_args += ['-G', generator]
cmake_args += ['-DCMAKE_INSTALL_PREFIX={}'.format(self.install_prefix)]
cmake_args += ['--trace', '--trace-expand']
cmake_args += extra_cmake_options
# Run CMake
mlog.log()
with mlog.nested():
mlog.log('Configuring the build directory with', mlog.bold('CMake'), 'version', mlog.cyan(cmake_exe.version()))
mlog.log(mlog.bold('Running:'), ' '.join(cmake_args))
mlog.log()
os.makedirs(self.build_dir, exist_ok=True)
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
proc = Popen(cmake_args + [self.src_dir], stdout=PIPE, stderr=PIPE, cwd=self.build_dir, env=os_env)
def print_stdout():
while True:
line = proc.stdout.readline()
if not line:
break
mlog.log(line.decode('utf-8').strip('\n'))
proc.stdout.close()
t = Thread(target=print_stdout)
t.start()
self.raw_trace = proc.stderr.read()
self.raw_trace = self.raw_trace.decode('utf-8')
proc.stderr.close()
proc.wait()
t.join()
mlog.log()
h = mlog.green('SUCCEEDED') if proc.returncode == 0 else mlog.red('FAILED')
mlog.log('CMake configuration:', h)
if proc.returncode != 0:
raise CMakeException('Failed to configure the CMake subproject')
def initialise(self, extra_cmake_options: List[str]) -> None:
# Run configure the old way becuse doing it
# with the server doesn't work for some reason
self.configure(extra_cmake_options)
with self.client.connect():
generator = backend_generator_map[self.backend_name]
self.client.do_handshake(self.src_dir, self.build_dir, generator, 1)
# Do a second configure to initialise the server
self.client.query_checked(RequestConfigure(), 'CMake server configure')
# Generate the build system files
self.client.query_checked(RequestCompute(), 'Generating build system files')
# Get CMake build system files
bs_reply = self.client.query_checked(RequestCMakeInputs(), 'Querying build system files')
# Now get the CMake code model
cm_reply = self.client.query_checked(RequestCodeModel(), 'Querying the CMake code model')
src_dir = bs_reply.src_dir
self.bs_files = [x.file for x in bs_reply.build_files if not x.is_cmake and not x.is_temp]
self.bs_files = [os.path.relpath(os.path.join(src_dir, x), self.env.get_source_dir()) for x in self.bs_files]
self.bs_files = list(set(self.bs_files))
self.codemodel = cm_reply
def analyse(self) -> None:
if self.codemodel is None:
raise CMakeException('CMakeInterpreter was not initialized')
# Clear analyser data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = []
self.trace = CMakeTraceParser(permissive=True)
# Parse the trace
self.trace.parse(self.raw_trace)
# Find all targets
for i in self.codemodel.configs:
for j in i.projects:
if not self.project_name:
self.project_name = j.name
for k in j.targets:
if k.type not in skip_targets:
self.targets += [ConverterTarget(k, self.env)]
for i in self.trace.custom_targets:
self.custom_targets += [ConverterCustomTarget(i)]
# generate the output_target_map
output_target_map = {}
output_target_map.update({x.full_name: x for x in self.targets})
output_target_map.update({_target_key(x.name): x for x in self.targets})
for i in self.targets:
for j in i.artifacts:
output_target_map[os.path.basename(j)] = i
for i in self.custom_targets:
for j in i.original_outputs:
output_target_map[_generated_file_key(j)] = i
object_libs = []
# First pass: Basic target cleanup
for i in self.custom_targets:
i.postprocess(output_target_map, self.src_dir, self.subdir, self.build_dir)
for i in self.targets:
i.postprocess(output_target_map, self.src_dir, self.subdir, self.install_prefix, self.trace)
if i.type == 'OBJECT_LIBRARY':
object_libs += [i]
self.languages += [x for x in i.languages if x not in self.languages]
# Second pass: Detect object library dependencies
for i in self.targets:
i.process_object_libs(object_libs)
mlog.log('CMake project', mlog.bold(self.project_name), 'has', mlog.bold(str(len(self.targets) + len(self.custom_targets))), 'build targets.')
def pretend_to_be_meson(self) -> CodeBlockNode:
if not self.project_name:
raise CMakeException('CMakeInterpreter was not analysed')
def token(tid: str = 'string', val='') -> Token:
return Token(tid, self.subdir, 0, 0, 0, None, val)
def string(value: str) -> StringNode:
return StringNode(token(val=value))
def id_node(value: str) -> IdNode:
return IdNode(token(val=value))
def number(value: int) -> NumberNode:
return NumberNode(token(val=value))
def nodeify(value):
if isinstance(value, str):
return string(value)
elif isinstance(value, bool):
return BooleanNode(token(), value)
elif isinstance(value, int):
return number(value)
elif isinstance(value, list):
return array(value)
return value
def indexed(node: BaseNode, index: int) -> IndexNode:
return IndexNode(node, nodeify(index))
def array(elements) -> ArrayNode:
args = ArgumentNode(token())
if not isinstance(elements, list):
elements = [args]
args.arguments += [nodeify(x) for x in elements]
return ArrayNode(args, 0, 0, 0, 0)
def function(name: str, args=None, kwargs=None) -> FunctionNode:
if args is None:
args = []
if kwargs is None:
kwargs = {}
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args]
args_n.kwargs = {k: nodeify(v) for k, v in kwargs.items()}
func_n = FunctionNode(self.subdir, 0, 0, 0, 0, name, args_n)
return func_n
def method(obj: BaseNode, name: str, args=None, kwargs=None) -> MethodNode:
if args is None:
args = []
if kwargs is None:
kwargs = {}
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args]
args_n.kwargs = {k: nodeify(v) for k, v in kwargs.items()}
return MethodNode(self.subdir, 0, 0, obj, name, args_n)
def assign(var_name: str, value: BaseNode) -> AssignmentNode:
return AssignmentNode(self.subdir, 0, 0, var_name, value)
# Generate the root code block and the project function call
root_cb = CodeBlockNode(token())
root_cb.lines += [function('project', [self.project_name] + self.languages)]
# Add the run script for custom commands
run_script = '{}/data/run_ctgt.py'.format(os.path.dirname(os.path.realpath(__file__)))
run_script_var = 'ctgt_run_script'
root_cb.lines += [assign(run_script_var, function('find_program', [[run_script]], {'required': True}))]
# Add the targets
processed = {}
def resolve_ctgt_ref(ref: CustomTargetReference) -> BaseNode:
tgt_var = processed[ref.ctgt.name]['tgt']
if len(ref.ctgt.outputs) == 1:
return id_node(tgt_var)
else:
return indexed(id_node(tgt_var), ref.index)
def process_target(tgt: ConverterTarget):
# First handle inter target dependencies
link_with = []
objec_libs = []
sources = []
generated = []
generated_filenames = []
custom_targets = []
for i in tgt.link_with:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
link_with += [id_node(processed[i.name]['tgt'])]
for i in tgt.object_libs:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
objec_libs += [processed[i.name]['tgt']]
# Generate the source list and handle generated sources
for i in tgt.sources + tgt.generated:
if isinstance(i, CustomTargetReference):
if i.ctgt.name not in processed:
process_custom_target(i.ctgt)
generated += [resolve_ctgt_ref(i)]
generated_filenames += [i.filename()]
if i.ctgt not in custom_targets:
custom_targets += [i.ctgt]
else:
sources += [i]
# Add all header files from all used custom targets. This
# ensures that all custom targets are built before any
# sources of the current target are compiled and thus all
# header files are present. This step is necessary because
# CMake always ensures that a custom target is executed
# before another target if at least one output is used.
for i in custom_targets:
for j in i.outputs:
if not is_header(j) or j in generated_filenames:
continue
generated += [resolve_ctgt_ref(i.get_ref(j))]
generated_filenames += [j]
# Determine the meson function to use for the build target
tgt_func = tgt.meson_func()
if not tgt_func:
raise CMakeException('Unknown target type "{}"'.format(tgt.type))
# Determine the variable names
base_name = str(tgt.name)
base_name = base_name.replace('-', '_')
inc_var = '{}_inc'.format(base_name)
src_var = '{}_src'.format(base_name)
dep_var = '{}_dep'.format(base_name)
tgt_var = base_name
# Generate target kwargs
tgt_kwargs = {
'link_args': tgt.link_flags + tgt.link_libraries,
'link_with': link_with,
'include_directories': id_node(inc_var),
'install': tgt.install,
'install_dir': tgt.install_dir,
'override_options': tgt.override_options,
'objects': [method(id_node(x), 'extract_all_objects') for x in objec_libs],
}
# Handle compiler args
for key, val in tgt.compile_opts.items():
tgt_kwargs['{}_args'.format(key)] = val
# Handle -fPCI, etc
if tgt_func == 'executable':
tgt_kwargs['pie'] = tgt.pie
elif tgt_func == 'static_library':
tgt_kwargs['pic'] = tgt.pie
# declare_dependency kwargs
dep_kwargs = {
'link_args': tgt.link_flags + tgt.link_libraries,
'link_with': id_node(tgt_var),
'compile_args': tgt.public_compile_opts,
'include_directories': id_node(inc_var),
}
# Generate the function nodes
inc_node = assign(inc_var, function('include_directories', tgt.includes))
node_list = [inc_node]
if tgt_func == 'header_only':
del dep_kwargs['link_with']
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
src_var = ''
tgt_var = ''
else:
src_node = assign(src_var, function('files', sources))
tgt_node = assign(tgt_var, function(tgt_func, [base_name, [id_node(src_var)] + generated], tgt_kwargs))
node_list += [src_node, tgt_node]
if tgt_func in ['static_library', 'shared_library']:
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
else:
dep_var = ''
# Add the nodes to the ast
root_cb.lines += node_list
processed[tgt.name] = {'inc': inc_var, 'src': src_var, 'dep': dep_var, 'tgt': tgt_var, 'func': tgt_func}
def process_custom_target(tgt: ConverterCustomTarget) -> None:
# CMake allows to specify multiple commands in a custom target.
# To map this to meson, a helper script is used to execute all
# commands in order. This addtionally allows setting the working
# directory.
tgt_var = tgt.name # type: str
def resolve_source(x: Any) -> Any:
if isinstance(x, ConverterTarget):
if x.name not in processed:
process_target(x)
return id_node(x.name)
elif isinstance(x, CustomTargetReference):
if x.ctgt.name not in processed:
process_custom_target(x.ctgt)
return resolve_ctgt_ref(x)
else:
return x
# Generate the command list
command = []
command += [id_node(run_script_var)]
command += ['-o', '@OUTPUT@']
command += ['-O'] + tgt.original_outputs
command += ['-d', tgt.working_dir]
# Generate the commands. Subcommands are seperated by ';;;'
for cmd in tgt.command:
command += [resolve_source(x) for x in cmd] + [';;;']
tgt_kwargs = {
'input': [resolve_source(x) for x in tgt.inputs],
'output': tgt.outputs,
'command': command,
'depends': [resolve_source(x) for x in tgt.depends],
}
root_cb.lines += [assign(tgt_var, function('custom_target', [tgt.name], tgt_kwargs))]
processed[tgt.name] = {'inc': None, 'src': None, 'dep': None, 'tgt': tgt_var, 'func': 'custom_target'}
# Now generate the target function calls
for i in self.custom_targets:
if i.name not in processed:
process_custom_target(i)
for i in self.targets:
if i.name not in processed:
process_target(i)
self.generated_targets = processed
return root_cb
def target_info(self, target: str) -> Optional[Dict[str, str]]:
if target in self.generated_targets:
return self.generated_targets[target]
return None
def target_list(self) -> List[str]:
return list(self.generated_targets.keys())
| []
| []
| []
| [] | [] | python | 0 | 0 | |
helpers/bumper.py | #!/usr/bin/env python2
#
# Usage:
# ./bumper.py
#
# Configurable environment variables:
# - BUMPER_VERSION_6 overrides the 6.x.x version.
# - BUMPER_VERSION_7 overrides the 7.x.x version.
# - BUMPER_USE_STAGING_IMAGES set to "true" causes the
# docker.elastic.co/staging/ docker registry namespace to be used.
#
import re
import os
import glob
import subprocess
import fileinput
os.chdir(os.path.join(os.path.dirname(__file__), ".."))
versions = {
7: os.environ.get("BUMPER_VERSION_7", "7.10.0-SNAPSHOT"),
}
chart_version = versions[7]
file_patterns = [
"*/examples/*/*.y*ml",
"*/examples/*/README.md",
"helpers/examples.mk",
"*/README.md",
"*/values.y*ml",
"*/Chart.y*ml",
]
goss_files = ["*/examples/*/test/goss*.y*ml"]
# Anything matching this regex won't have version bumps changed
# This was happening because strings like 127.0.0.1 match for 7.0.0
# "7.0.0-alpha1" is also used in elasticsearch upgrade test and so shouldn't
# been bump
blacklist = re.compile(r".*127.0.0.1.*|.*7.0.0-alpha1.*")
print("Updating versions...")
for major, version in versions.iteritems():
r = re.compile(r"{0}\.[0-9]*\.[0-9]*-?(SNAPSHOT)?".format(major))
for pattern in file_patterns:
for f in glob.glob(pattern):
print(f)
for line in fileinput.input([f], inplace=True):
if re.match(blacklist, line):
print(line.rstrip())
else:
if f.endswith("Chart.yaml") and line.startswith("version:"):
print(r.sub(chart_version, line.rstrip()))
else:
print(r.sub(version, line.rstrip()))
for pattern in goss_files:
for f in glob.glob(pattern):
print(f)
for line in fileinput.input([f], inplace=True):
# If we have a version with a build id, like 7.6.2-abcdabcd,
# strip off the latter part and only use the 7.6.2 in the goss
# tests
version_without_build_id = re.sub(r"-.*", "", version)
if re.match(blacklist, line):
print(line.rstrip())
else:
print(r.sub(version_without_build_id, line.rstrip()))
if os.environ.get("BUMPER_USE_STAGING_IMAGES") == "true":
image_file_patterns = file_patterns + [
"*/tests/*.py",
"**/templates/*.tpl",
# some tests use docker images in their makefile
"*/examples/*/Makefile",
]
print("\nUpdating namespaces...")
for pattern in image_file_patterns:
for f in glob.glob(pattern):
print(f)
for line in fileinput.input([f], inplace=True):
print(
re.sub(
r"docker.elastic.co/.+?/",
"docker.elastic.co/staging/",
line.rstrip(),
)
)
print("\nUpdating imagePullSecrets...")
for f in glob.glob("*/values.y*ml"):
print(f)
for line in fileinput.input([f], inplace=True):
print(
line.rstrip().replace(
"imagePullSecrets: []",
"imagePullSecrets: [{name: registry-staging}]",
)
)
| []
| []
| [
"BUMPER_USE_STAGING_IMAGES",
"BUMPER_VERSION_7"
]
| [] | ["BUMPER_USE_STAGING_IMAGES", "BUMPER_VERSION_7"] | python | 2 | 0 | |
src/GreenMachine.py | print "Loading...\n"
import argparse, pip
parser = argparse.ArgumentParser(description='GreenMachine')
parser.add_argument('--screen', action="store_true")
parser.add_argument('--build', action="store_true")
parser.add_argument('--calibrate', action="store_true")
parser.add_argument('--install', action="store_true")
screen_mode = parser.parse_args().screen
build_graph = parser.parse_args().build
if parser.parse_args().install:
pip.main(['install', 'numpy'])
pip.main(['install', 'opencv-python'])
pip.main(['install', 'matplotlib'])
pip.main(['install', 'pillow'])
pip.main(['install', 'tensorflow-1.8.0-cp27-cp27mu-linux_aarch64.whl'])
from Camera import Camera
from Model import Model
import cv2, thread, copy, time, os, difflib, sys, json
import numpy as np
from tf_trt_models.detection import download_detection_model, build_detection_graph
import tensorflow.contrib.tensorrt as trt
import tensorflow as tf
# Turn off TensorFlow debug logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Directory of the model data
DATA_DIR = os.path.abspath('../models/EnviroNet/')
# Relates class numbers to their colors
COLOR_DICT = {
1: (73, 135, 71),
2: (244, 65, 181),
3: (73, 135, 71),
4: (73, 135, 71),
5: (244, 65, 181),
6: (73, 135, 71),
7: (73, 135, 71),
8: (244, 134, 66),
9: (66, 134, 244)
}
# Relates class numbers to their full names
CLASS_DICT = {
1: "Cup/Soup Bowl (Compost)",
2: "Silverware (Reusable)",
3: "Plastic Utensil (Compost)",
4: "Container/To-Go Box (Compost)",
5: "Bowl/Plate (Reusable)",
6: "Napkin (Compost)",
7: "Stick (Compost)",
8: "Plastic Bottle (Recycle)",
9: "Food/Candy Wrapper (Non-Compost)"
}
# Relates class numbers to their render order
ORDER_DICT = {
1: 7,
2: 2,
3: 3,
4: 6,
5: 9,
6: 1,
7: 8,
8: 4,
9: 5
}
prev_bboxes = []
prev_classes = []
on_prev_frame = []
def createModel(config_path, checkpoint_path, graph_path):
""" Create a TensorRT Model.
config_path (string) - The path to the model config file.
checkpoint_path (string) - The path to the model checkpoint file(s).
graph_path (string) - The path to the model graph.
returns (Model) - The TRT model built or loaded from the input files.
"""
global build_graph, prev_classes
trt_graph = None
input_names = None
if build_graph:
frozen_graph, input_names, output_names = build_detection_graph(
config=config_path,
checkpoint=checkpoint_path
)
trt_graph = trt.create_inference_graph(
input_graph_def=frozen_graph,
outputs=output_names,
max_batch_size=1,
max_workspace_size_bytes=1 << 25,
precision_mode='FP16',
minimum_segment_size=50
)
with open(graph_path, 'wb') as f:
f.write(trt_graph.SerializeToString())
with open('config.txt', 'r+') as json_file:
data = json.load(json_file)
data['model'] = []
data['model'] = [{'input_names': input_names}]
json_file.seek(0)
json_file.truncate()
json.dump(data, json_file)
else:
with open(graph_path, 'rb') as f:
trt_graph = tf.GraphDef()
trt_graph.ParseFromString(f.read())
with open('config.txt') as json_file:
data = json.load(json_file)
input_names = data['model'][0]['input_names']
return Model(trt_graph, input_names)
def greater_bbox(x, y):
if ORDER_DICT[x[1]] > ORDER_DICT[y[1]]:
return True
else:
return False
def bbox_sort(detected):
list_sorted = True
for i in range(len(detected) - 1):
if greater_bbox(detected[i + 1], detected[i]):
temp = detected[i]
detected[i] = detected[i + 1]
detected[i + 1] = temp
list_sorted = False
if not list_sorted:
return bbox_sort(detected)
return detected
def matchBBoxes(curr_bboxes, prev_bboxes, similarity_threshold):
matched_indices = []
prev_aggregates = []
for prev_bbox in prev_bboxes:
prev_aggregates.append(prev_bbox[0] + prev_bbox[1] + prev_bbox[2] + prev_bbox[3])
for i in range(len(curr_bboxes)):
curr_aggregate = curr_bboxes[i][0] + curr_bboxes[i][1] + curr_bboxes[i][2] + curr_bboxes[i][3]
for j in range(len(prev_aggregates)):
if abs(curr_aggregate - prev_aggregates[j]) <= similarity_threshold:
matched_indices.append((i, j))
return matched_indices
def predict(model, image, score_thresh, screen_mode, fill):
""" Predict objects on an image.
model (Model) - The model to predict with.
image (nd.nparray) - The image to predict on.
returns (nd.nparray) - The modified image with bounding boxes and item list.
"""
global COLOR_DICT, prev_bboxes, prev_classes
# Run the prediction
scores, boxes, classes = model.predict(image)
# Prepare the images for augmentation
if screen_mode:
new_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
else:
new_image = np.zeros((image.shape[0], image.shape[1], 3), dtype=np.uint8)
cv2.rectangle(new_image, (0, 0), (image.shape[1], image.shape[0]), (255, 0, 0), 5)
# Go through each bounding box and only draw and save the ones above the score threshold
detected = []
for i in range(len(scores)):
if scores[i] > score_thresh:
detected.append([i, classes[i] + 1])
detected = bbox_sort(detected)
text_list = []
bboxes = []
classes = []
for i in range(len(detected)):
box = boxes[detected[i][0]] * np.array([image.shape[0], image.shape[1], image.shape[0], image.shape[1]])
bboxes.append(box)
classes.append(detected[i][0])
matched_indices = matchBBoxes(bboxes, prev_bboxes, 100)
for i in range(len(detected)):
color = COLOR_DICT[detected[i][1]]
x0 = bboxes[i][1] - 20
y0 = bboxes[i][0] - (1080 - bboxes[i][0]) * 50 / 1080
x1 = bboxes[i][3] + 20
y1 = bboxes[i][2]
num_pairs = 0
for index_pair in matched_indices:
if index_pair[0] == i and detected[i][0] == prev_classes[index_pair[1]]:
num_pairs += 1
x0 = ((x0 * num_pairs) + prev_bboxes[index_pair[1]][1] - 20) / (num_pairs + 1.0)
y0 = ((y0 * num_pairs) + prev_bboxes[index_pair[1]][0] - (1080 - prev_bboxes[index_pair[1]][1]) * 50 / 1080) / (num_pairs + 1.0)
x1 = ((x1 * num_pairs) + prev_bboxes[index_pair[1]][3] + 20) / (num_pairs + 1.0)
y1 = ((y1 * num_pairs) + prev_bboxes[index_pair[1]][2]) / (num_pairs + 1.0)
line_type = 3
if fill and not screen_mode:
line_type = cv2.FILLED
cv2.rectangle(new_image, (int(x0), int(y0)), (int(x1), int(y1)), color, line_type)
name = CLASS_DICT[detected[i][1]]
prev_bboxes = bboxes
prev_classes = classes
dy = 50 # Change in y position for each item
for text in text_list:
color = COLOR_DICT[text[2]]
cv2.putText(new_image, str(text[1]) + "x " + text[0], (1500, y), cv2.FONT_HERSHEY_DUPLEX, 0.5, color, lineType=cv2.LINE_AA)
y += dy
return new_image
def main():
""" Run this script.
"""
global DATA_DIR, screen_mode
config_path = os.path.join(DATA_DIR, 'model.config')
checkpoint_path = os.path.join(DATA_DIR, 'model.ckpt')
graph_path = os.path.join(DATA_DIR, 'graph.pbtxt')
# Create an OpenCV window
cv2.namedWindow("Inference", cv2.WND_PROP_FULLSCREEN)
if not screen_mode:
cv2.setWindowProperty("Inference", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
empty = np.zeros((1080, 1920, 3), dtype=np.uint8)
cv2.waitKey(50)
cv2.putText(empty, "Loading...", (441, 387), cv2.FONT_HERSHEY_DUPLEX, 2.5, (0, 255, 0), lineType=cv2.LINE_AA)
cv2.putText(empty, "Green Machine", (521, 700), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 0, 0), lineType=cv2.LINE_AA)
cv2.imshow("Inference", empty)
cv2.waitKey(5)
# Create Camera object
x0 = None
y0 = None
x1 = None
y1 = None
with open('config.txt') as json_file:
data = json.load(json_file)
x0 = data['crop'][0]['x0']
y0 = data['crop'][0]['y0']
x1 = data['crop'][0]['x1']
y1 = data['crop'][0]['y1']
if x0 == None:
print "ERROR: Run config.py first!"
exit()
camera = Camera(0, (1920, 1080), float(x0), float(y0), float(x1), float(y1), True)
camera.startVideoStream()
# Create Model object
model = createModel(config_path, checkpoint_path, graph_path)
image = None
warmup_img = None
score_thresh = 0.65 # Change this to set the score threshold
i = 0
fill = True
while warmup_img is None:
warmup_img = camera.read()
prev_img = warmup_img
print "Starting Inference..."
while True:
read_img = camera.read()
image = predict(model, read_img, score_thresh, screen_mode, fill)
# Show inference only if camera is ready
if image is not None:
cv2.imshow("Inference", image)
key = cv2.waitKey(1)
if key == ord('f'):
fill = not fill
# Run the script
main()
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
backend/oj/wsgi.py | """
WSGI config for qduoj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
#os.environ['DJANGO_SETTINGS_MODULE'] = 'oj.settings'
#
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "oj.settings")
application = get_wsgi_application()
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
webhooks-extension/pkg/endpoints/webhook_test.go | // /*
// Copyright 2019 The Tekton Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// */
package endpoints
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"reflect"
"strconv"
"testing"
"strings"
restful "github.com/emicklei/go-restful"
"github.com/google/go-cmp/cmp"
routesv1 "github.com/openshift/api/route/v1"
"github.com/tektoncd/experimental/webhooks-extension/pkg/utils"
pipelinesv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
v1alpha1 "github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
var server *httptest.Server
type testcase struct {
Webhook webhook
MonitorTriggerName string
expectedProvider string
expectedAPIURL string
}
// All event sources will be created in the "default" namespace because the INSTALLED_NAMESPACE env variable is not set
const installNs = "default"
const defaultRegistry = "default.docker.reg:8500/foo"
func setUpServer() *Resource {
wsContainer := restful.NewContainer()
resource := dummyResource()
resource.K8sClient.CoreV1().Namespaces().Delete(installNs, &metav1.DeleteOptions{})
resource.K8sClient.CoreV1().Namespaces().Create(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: installNs}})
server = httptest.NewServer(wsContainer)
resource.RegisterExtensionWebService(wsContainer)
return resource
}
func TestGetNoServiceDashboardURL(t *testing.T) {
r := dummyResource()
dashboard := r.getDashboardURL(installNs)
if dashboard != "http://localhost:9097/" {
t.Errorf("Dashboard URL not http://localhost:9097/ when no dashboard service found. URL was %s", dashboard)
}
}
func TestGetServiceDashboardURL(t *testing.T) {
r := dummyResource()
svc := createDashboardService("fake-dashboard", "tekton-dashboard")
_, err := r.K8sClient.CoreV1().Services(installNs).Create(svc)
if err != nil {
t.Errorf("Error registering service")
}
dashboard := r.getDashboardURL(installNs)
if dashboard != "http://fake-dashboard:1234/v1/namespaces/default/endpoints" {
t.Errorf("Dashboard URL not http://fake-dashboard:1234/v1/namespaces/default/endpoints. URL was %s", dashboard)
}
}
func TestGetOpenshiftServiceDashboardURL(t *testing.T) {
r := dummyResource()
svc := createDashboardService("fake-openshift-dashboard", "tekton-dashboard-internal")
_, err := r.K8sClient.CoreV1().Services(installNs).Create(svc)
if err != nil {
t.Errorf("Error registering service")
}
os.Setenv("PLATFORM", "openshift")
dashboard := r.getDashboardURL(installNs)
if dashboard != "http://fake-openshift-dashboard:1234/v1/namespaces/default/endpoints" {
t.Errorf("Dashboard URL not http://fake-dashboard:1234/v1/namespaces/default/endpoints. URL was %s", dashboard)
}
}
func TestNewTrigger(t *testing.T) {
r := dummyResource()
trigger := r.newTrigger("myName", "myBindingName", "myTemplateName", "myRepoURL", "myEvent", "mySecretName", "foo1234")
expectedTrigger := v1alpha1.EventListenerTrigger{
Name: "myName",
Bindings: []*v1alpha1.EventListenerBinding{
{
Name: "myBindingName",
APIVersion: "v1alpha1",
},
{
Name: "foo1234",
APIVersion: "v1alpha1",
},
},
Template: v1alpha1.EventListenerTemplate{
Name: "myTemplateName",
APIVersion: "v1alpha1",
},
Interceptors: []*v1alpha1.EventInterceptor{
{
Webhook: &v1alpha1.WebhookInterceptor{
Header: []pipelinesv1alpha1.Param{
{Name: "Wext-Trigger-Name", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "myName"}},
{Name: "Wext-Repository-Url", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "myRepoURL"}},
{Name: "Wext-Incoming-Event", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "myEvent"}},
{Name: "Wext-Secret-Name", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "mySecretName"}}},
ObjectRef: &corev1.ObjectReference{
APIVersion: "v1",
Kind: "Service",
Name: "tekton-webhooks-extension-validator",
Namespace: r.Defaults.Namespace,
},
},
},
},
}
if !reflect.DeepEqual(trigger, expectedTrigger) {
t.Errorf("Eventlistener trigger did not match expectation")
t.Errorf("got: %+v", trigger)
t.Errorf("expected: %+v", expectedTrigger)
}
}
func TestGetParams(t *testing.T) {
var testcases = []testcase{
{
Webhook: webhook{
Name: "name1",
Namespace: installNs,
GitRepositoryURL: "https://github.com/owner/repo",
AccessTokenRef: "token1",
Pipeline: "pipeline1",
DockerRegistry: "registry1",
HelmSecret: "helmsecret1",
ReleaseName: "releasename1",
PullTask: "pulltask1",
OnSuccessComment: "onsuccesscomment1",
OnFailureComment: "onfailurecomment1",
OnTimeoutComment: "ontimeoutcomment1",
OnMissingComment: "onmissingcomment1",
},
expectedProvider: "github",
expectedAPIURL: "https://api.github.com/",
},
{
Webhook: webhook{
Name: "name2",
Namespace: "foo",
GitRepositoryURL: "https://github.com/owner/repo2",
AccessTokenRef: "token2",
Pipeline: "pipeline2",
DockerRegistry: "registry2",
OnSuccessComment: "onsuccesscomment2",
OnFailureComment: "onfailurecomment2",
OnTimeoutComment: "ontimeoutcomment2",
OnMissingComment: "onmissingcomment2",
},
expectedProvider: "github",
expectedAPIURL: "https://api.github.com/",
},
{
Webhook: webhook{
Name: "name3",
Namespace: "foo2",
GitRepositoryURL: "https://github.com/owner/repo3",
AccessTokenRef: "token3",
Pipeline: "pipeline3",
ServiceAccount: "my-sa",
},
expectedProvider: "github",
expectedAPIURL: "https://api.github.com/",
},
{
Webhook: webhook{
Name: "name4",
Namespace: "foo2",
GitRepositoryURL: "https://gitlab.company.com/owner/repo3",
AccessTokenRef: "token3",
Pipeline: "pipeline3",
ServiceAccount: "my-sa",
},
expectedProvider: "gitlab",
expectedAPIURL: "https://gitlab.company.com/api/v4",
},
{
Webhook: webhook{
Name: "name5",
Namespace: "foo2",
GitRepositoryURL: "https://github.company.com/owner/repo3",
AccessTokenRef: "token3",
Pipeline: "pipeline3",
ServiceAccount: "my-sa",
},
expectedProvider: "github",
expectedAPIURL: "https://github.company.com/api/v3/",
},
}
r := dummyResource()
os.Setenv("SSL_VERIFICATION_ENABLED", "true")
for _, tt := range testcases {
hookParams, monitorParams := r.getParams(tt.Webhook)
expectedHookParams, expectedMonitorParams := getExpectedParams(tt.Webhook, r, tt.expectedProvider, tt.expectedAPIURL)
if !reflect.DeepEqual(hookParams, expectedHookParams) {
t.Error("The webhook params returned from r.getParams were not as expected")
t.Errorf("got hookParams: %+v", hookParams)
t.Errorf("expected: %+v", expectedHookParams)
}
if !reflect.DeepEqual(monitorParams, expectedMonitorParams) {
t.Error("The monitor params returned from r.getParams were not as expected")
t.Errorf("monitorParams: %+v", monitorParams)
t.Errorf("expected: %+v", expectedMonitorParams)
}
}
}
func TestCompareRepos(t *testing.T) {
type testcase struct {
url1 string
url2 string
expectedMatch bool
expectedError string
}
testcases := []testcase{
{
url1: "Http://GitHub.Com/foo/BAR",
url2: "http://github.com/foo/bar",
expectedMatch: true,
},
{
url1: "Http://GitHub.Com/foo/BAR",
url2: "http://github.com/foo/bar.git",
expectedMatch: true,
},
{
url1: "Http://github.com/foo/bar.git",
url2: "http://github.com/foo/bar",
expectedMatch: true,
},
{
url1: "http://gitlab.com/foo/bar",
url2: "http://github.com/foo/bar",
expectedMatch: false,
},
{
url1: "http://github.com/bar/bar",
url2: "http://github.com/foo/bar",
expectedMatch: false,
},
{
url1: "http://gitlab.com/foo/bar",
url2: "http://gitLAB.com/FoO/bar",
expectedMatch: true,
},
}
r := dummyResource()
for _, tt := range testcases {
match, err := r.compareGitRepoNames(tt.url1, tt.url2)
if tt.expectedMatch != match {
if err != nil {
t.Errorf("url mismatch with error %s", err.Error())
}
t.Errorf("url mismatch unexpected: %s, %s", tt.url1, tt.url2)
}
}
}
func TestGenerateMonitorTriggerName(t *testing.T) {
r := dummyResource()
var triggers []v1alpha1.EventListenerTrigger
triggersMap := make(map[string]v1alpha1.EventListenerTrigger)
for i := 0; i < 2000; i++ {
t := r.newTrigger("foo-"+strconv.Itoa(i), "foo", "foo", "https://foo.com/foo/bar", "foo", "foo", "foo")
triggers = append(triggers, t)
triggersMap["foo-"+strconv.Itoa(i)] = t
}
for j := 0; j < 5000; j++ {
name := r.generateMonitorTriggerName("foo-", triggers)
if _, ok := triggersMap[name]; ok {
t.Errorf("generateMonitorTriggerName did not provide a unique name")
}
}
}
func TestDoesMonitorExist(t *testing.T) {
type testcase struct {
Webhook webhook
TriggerNamePrefix string
Expected bool
}
testcases := []testcase{
{
Webhook: webhook{
Name: "name1",
Namespace: "foo1",
GitRepositoryURL: "https://github.com/owner/repo1",
AccessTokenRef: "token1",
Pipeline: "pipeline1",
ServiceAccount: "my-sa",
},
TriggerNamePrefix: "name1-",
Expected: true,
},
{
Webhook: webhook{
Name: "name2",
Namespace: "foo2",
GitRepositoryURL: "https://github.com/owner/repo2",
AccessTokenRef: "token2",
Pipeline: "pipeline2",
ServiceAccount: "my-sa",
},
TriggerNamePrefix: "name2-",
Expected: false,
},
}
r := dummyResource()
// Create some pre-existing triggers to pretend to be the monitor
// we will cheat and use webhook name as the prefix
// for the trigger name
var eventListenerTriggers []v1alpha1.EventListenerTrigger
for i, tt := range testcases {
if tt.Expected {
t := r.newTrigger(tt.Webhook.Name+"-"+strconv.Itoa(i), "foo", "foo", tt.Webhook.GitRepositoryURL, "foo", "foo", "foo")
eventListenerTriggers = append(eventListenerTriggers, t)
}
}
// Now test
for _, tt := range testcases {
found, _ := r.doesMonitorExist(tt.TriggerNamePrefix, tt.Webhook, eventListenerTriggers)
if tt.Expected != found {
t.Errorf("Unexpected result checking existence of trigger with monitorprefix %s", tt.TriggerNamePrefix)
}
}
}
func TestGetMonitorBindingName(t *testing.T) {
type testcase struct {
repoURL string
monitorTask string
expectedBindingName string
expectedError string
}
testcases := []testcase{
{
repoURL: "http://foo.github.com/wibble/fish",
monitorTask: "monitor-task",
expectedBindingName: "monitor-task-github-binding",
},
{
repoURL: "https://github.bob.com/foo/dog",
monitorTask: "wibble",
expectedBindingName: "wibble-binding",
},
{
repoURL: "http://foo.gitlab.com/wibble/fish",
monitorTask: "monitor-task",
expectedBindingName: "monitor-task-gitlab-binding",
},
{
repoURL: "",
monitorTask: "monitor-task",
expectedError: "no repository URL provided on call to GetGitProviderAndAPIURL",
},
{
repoURL: "http://foo.gitlab.com/wibble/fish",
monitorTask: "",
expectedBindingName: "",
expectedError: "no monitor task set on call to getMonitorBindingName",
},
{
repoURL: "https://hungry.dinosaur.com/wibble/fish",
monitorTask: "monitor-task",
expectedBindingName: "",
expectedError: "Git Provider for project URL: https://hungry.dinosaur.com/wibble/fish not recognized",
},
}
r := dummyResource()
for _, tt := range testcases {
name, err := r.getMonitorBindingName(tt.repoURL, tt.monitorTask)
if err != nil {
if tt.expectedError != err.Error() {
t.Errorf("unexpected error in TestGetMonitorBindingName: %s", err.Error())
}
}
if name != tt.expectedBindingName {
t.Errorf("mismatch in expected binding name, expected %s got %s", tt.expectedBindingName, name)
}
}
}
func TestCreateEventListener(t *testing.T) {
hook := webhook{
Name: "name1",
Namespace: installNs,
GitRepositoryURL: "https://github.com/owner/repo",
AccessTokenRef: "token1",
Pipeline: "pipeline1",
DockerRegistry: "registry1",
HelmSecret: "helmsecret1",
ReleaseName: "releasename1",
PullTask: "pulltask1",
}
r := dummyResource()
createTriggerResources(hook, r)
_, owner, repo, _ := r.getGitValues(hook.GitRepositoryURL)
monitorTriggerNamePrefix := owner + "." + repo
GetTriggerBindingObjectMeta = FakeGetTriggerBindingObjectMeta
el, err := r.createEventListener(hook, r.Defaults.Namespace, monitorTriggerNamePrefix)
if err != nil {
t.Errorf("Error creating eventlistener: %s", err)
}
if el.Name != "tekton-webhooks-eventlistener" {
t.Errorf("Eventlistener name was: %s, expected: tekton-webhooks-eventlistener", el.Name)
}
if el.Namespace != r.Defaults.Namespace {
t.Errorf("Eventlistener namespace was: %s, expected: %s", el.Namespace, r.Defaults.Namespace)
}
if el.Spec.ServiceAccountName != "tekton-webhooks-extension-eventlistener" {
t.Errorf("Eventlistener service account was: %s, expected tekton-webhooks-extension-eventlistener", el.Spec.ServiceAccountName)
}
if len(el.Spec.Triggers) != 3 {
t.Errorf("Eventlistener had %d triggers, but expected 3", len(el.Spec.Triggers))
}
hooks, err := r.getHooksForRepo(hook.GitRepositoryURL)
if err != nil {
t.Errorf("Error occurred retrieving hook in getHooksForRepo: %s", err.Error())
}
if len(hooks) != 1 {
t.Errorf("Unexpected number of hooks returned from getHooksForRepo: %+v", hooks)
}
if !reflect.DeepEqual(hooks[0], hook) {
t.Errorf("Hook didn't match: Got %+v, Expected %+v", hooks[0], hook)
}
expectedTriggers := r.getExpectedPushAndPullRequestTriggersForWebhook(hook)
for _, trigger := range el.Spec.Triggers {
found := false
for _, t := range expectedTriggers {
if reflect.DeepEqual(t, trigger) {
found = true
break
}
}
if !found {
// Should be the monitor, can't deep equal monitor due to created name
if !strings.HasPrefix(trigger.Name, owner+"."+repo) {
t.Errorf("trigger %+v unexpected", trigger)
}
// Check params on monitor
os.Setenv("SSL_VERIFICATION_ENABLED", "true")
_, expectedMonitorParams := getExpectedParams(hook, r, "github", "https://api.github.com/")
wextMonitorBindingFound := false
for _, monitorBinding := range trigger.Bindings {
if strings.HasPrefix(monitorBinding.Name, "wext-") {
wextMonitorBindingFound = true
binding, err := r.TriggersClient.TektonV1alpha1().TriggerBindings(r.Defaults.Namespace).Get(monitorBinding.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("%s", err.Error())
}
if !reflect.DeepEqual(binding.Spec.Params, expectedMonitorParams) {
t.Error("The monitor params returned from r.getParams were not as expected")
t.Errorf("monitorParams: %+v", binding.Spec.Params)
t.Errorf("expected: %+v", expectedMonitorParams)
}
}
}
if !wextMonitorBindingFound {
t.Errorf("Did not find monitor bindings")
}
}
}
err = r.TriggersClient.TektonV1alpha1().EventListeners(r.Defaults.Namespace).Delete(el.Name, &metav1.DeleteOptions{})
if err != nil {
t.Errorf("Error occurred deleting eventlistener: %s", err.Error())
}
err = r.deleteAllBindings()
if err != nil {
t.Errorf("Error occurred deleting triggerbindings: %s", err.Error())
}
}
func TestUpdateEventListener(t *testing.T) {
var testcases = []webhook{
{
Name: "name1",
Namespace: installNs,
GitRepositoryURL: "https://github.com/owner/repo",
AccessTokenRef: "token1",
Pipeline: "pipeline1",
DockerRegistry: "registry1",
HelmSecret: "helmsecret1",
ReleaseName: "releasename1",
PullTask: "pulltask1",
OnSuccessComment: "onsuccesscomment1",
OnFailureComment: "onfailurecomment1",
OnTimeoutComment: "ontimeoutcomment1",
OnMissingComment: "onmissingcomment1",
},
{
Name: "name2",
Namespace: "foo",
GitRepositoryURL: "https://github.com/owner/repo",
AccessTokenRef: "token2",
Pipeline: "pipeline2",
DockerRegistry: "registry2",
PullTask: "pulltask1",
OnSuccessComment: "onsuccesscomment2",
OnFailureComment: "onfailurecomment2",
OnTimeoutComment: "ontimeoutcomment2",
OnMissingComment: "onmissingcomment2",
},
{
Name: "name3",
Namespace: "foo2",
GitRepositoryURL: "https://github.com/owner/repo2",
AccessTokenRef: "token3",
Pipeline: "pipeline3",
ServiceAccount: "my-sa",
PullTask: "check-me",
},
}
r := dummyResource()
os.Setenv("SERVICE_ACCOUNT", "tekton-test-service-account")
GetTriggerBindingObjectMeta = FakeGetTriggerBindingObjectMeta
createTriggerResources(testcases[0], r)
_, owner, repo, _ := r.getGitValues(testcases[0].GitRepositoryURL)
monitorTriggerNamePrefix := owner + "." + repo
el, err := r.createEventListener(testcases[0], r.Defaults.Namespace, monitorTriggerNamePrefix)
if err != nil {
t.Errorf("Error creating eventlistener: %s", err)
}
_, owner, repo, _ = r.getGitValues(testcases[1].GitRepositoryURL)
monitorTriggerNamePrefix = owner + "." + repo
el, err = r.updateEventListener(el, testcases[1], monitorTriggerNamePrefix)
if err != nil {
t.Errorf("Error updating eventlistener - first time: %s", err)
}
_, owner, repo, _ = r.getGitValues(testcases[2].GitRepositoryURL)
monitorTriggerNamePrefix = owner + "." + repo
el, err = r.updateEventListener(el, testcases[2], monitorTriggerNamePrefix)
if err != nil {
t.Errorf("Error updating eventlistener - second time: %s", err)
}
// Two of the webhooks are on the same repo - therefore only one monitor trigger for these
if len(el.Spec.Triggers) != 8 {
t.Errorf("Eventlistener had %d triggers, but expected 8", len(el.Spec.Triggers))
}
err = r.TriggersClient.TektonV1alpha1().EventListeners(r.Defaults.Namespace).Delete(el.Name, &metav1.DeleteOptions{})
if err != nil {
t.Errorf("Error occurred deleting eventlistener: %s", err.Error())
}
err = r.deleteAllBindings()
if err != nil {
t.Errorf("Error occurred deleting triggerbindings: %s", err.Error())
}
}
func TestDeleteFromEventListener(t *testing.T) {
var testcases = []testcase{
{
Webhook: webhook{
Name: "name1",
Namespace: installNs,
GitRepositoryURL: "https://github.com/owner/repo",
AccessTokenRef: "token1",
Pipeline: "pipeline1",
DockerRegistry: "registry1",
HelmSecret: "helmsecret1",
ReleaseName: "releasename1",
PullTask: "pulltask1",
OnSuccessComment: "onsuccesscomment1",
OnFailureComment: "onfailurecomment1",
OnTimeoutComment: "ontimeoutcomment1",
OnMissingComment: "onmissingcomment1",
},
expectedProvider: "github",
expectedAPIURL: "https://api.github.com/",
},
{
Webhook: webhook{
Name: "name2",
Namespace: "foo",
GitRepositoryURL: "https://github.com/owner/repo",
AccessTokenRef: "token2",
Pipeline: "pipeline2",
DockerRegistry: "registry2",
PullTask: "pulltask1",
OnSuccessComment: "onsuccesscomment2",
OnFailureComment: "onfailurecomment2",
OnTimeoutComment: "ontimeoutcomment2",
OnMissingComment: "onmissingcomment2",
},
expectedProvider: "github",
expectedAPIURL: "https://api.github.com/",
},
}
r := dummyResource()
GetTriggerBindingObjectMeta = FakeGetTriggerBindingObjectMeta
os.Setenv("SERVICE_ACCOUNT", "tekton-test-service-account")
_, owner, repo, _ := r.getGitValues(testcases[0].Webhook.GitRepositoryURL)
monitorTriggerNamePrefix := owner + "." + repo
el, err := r.createEventListener(testcases[0].Webhook, r.Defaults.Namespace, monitorTriggerNamePrefix)
if err != nil {
t.Errorf("Error creating eventlistener: %s", err)
}
_, owner, repo, _ = r.getGitValues(testcases[1].Webhook.GitRepositoryURL)
monitorTriggerNamePrefix = owner + "." + repo
el, err = r.updateEventListener(el, testcases[1].Webhook, monitorTriggerNamePrefix)
if err != nil {
t.Errorf("Error updating eventlistener: %s", err)
}
if len(el.Spec.Triggers) != 5 {
t.Errorf("Eventlistener had %d triggers, but expected 5", len(el.Spec.Triggers))
}
_, gitOwner, gitRepo, _ := r.getGitValues(testcases[1].Webhook.GitRepositoryURL)
monitorTriggerNamePrefix = gitOwner + "." + gitRepo
err = r.deleteFromEventListener(testcases[1].Webhook.Name+"-"+testcases[1].Webhook.Namespace, r.Defaults.Namespace, monitorTriggerNamePrefix, testcases[1].Webhook)
if err != nil {
t.Errorf("Error deleting entry from eventlistener: %s", err)
}
el, err = r.TriggersClient.TektonV1alpha1().EventListeners(r.Defaults.Namespace).Get("", metav1.GetOptions{})
if len(el.Spec.Triggers) != 3 {
t.Errorf("Eventlistener had %d triggers, but expected 3", len(el.Spec.Triggers))
}
}
func TestFailToCreateWebhookNoTriggerResources(t *testing.T) {
r := setUpServer()
os.Setenv("SERVICE_ACCOUNT", "tekton-test-service-account")
newDefaults := EnvDefaults{
Namespace: installNs,
DockerRegistry: defaultRegistry,
}
r = updateResourceDefaults(r, newDefaults)
hook := webhook{
Name: "name1",
Namespace: installNs,
GitRepositoryURL: "https://github.com/owner/repo",
AccessTokenRef: "token1",
Pipeline: "pipeline1",
DockerRegistry: "registry1",
HelmSecret: "helmsecret1",
ReleaseName: "releasename1",
OnSuccessComment: "onsuccesscomment1",
OnFailureComment: "onfailurecomment1",
OnTimeoutComment: "ontimeoutcomment1",
OnMissingComment: "onmissingcomment1",
}
resp := createWebhook(hook, r)
if resp.StatusCode() != 400 {
t.Errorf("Webhook creation succeeded for webhook %s but was expected to fail due to lack of triggertemplate and triggerbinding", hook.Name)
}
}
func TestDockerRegUnset(t *testing.T) {
r := dummyResource()
// Get the docker registry using the endpoint, expect ""
defaults := getEnvDefaults(r, t)
reg := defaults.DockerRegistry
if reg != "" {
t.Errorf("Incorrect defaultDockerRegistry, expected \"\" but was: %s", reg)
}
}
func TestDockerRegSet(t *testing.T) {
r := dummyResource()
newDefaults := EnvDefaults{
Namespace: installNs,
DockerRegistry: defaultRegistry,
}
r = updateResourceDefaults(r, newDefaults)
// Get the docker registry using the endpoint, expect ""
defaults := getEnvDefaults(r, t)
reg := defaults.DockerRegistry
if reg != "default.docker.reg:8500/foo" {
t.Errorf("Incorrect defaultDockerRegistry, expected default.docker.reg:8500/foo, but was: %s", reg)
}
}
func TestDeleteByNameNoName405(t *testing.T) {
setUpServer()
httpReq, _ := http.NewRequest(http.MethodDelete, server.URL+"/webhooks/?namespace=foo&repository=bar", nil)
response, _ := http.DefaultClient.Do(httpReq)
if response.StatusCode != 405 {
t.Errorf("Status code not set to 405 when deleting without a name, it's: %d", response.StatusCode)
}
}
func TestDeleteByNameNoNamespaceOrRepoBadRequest(t *testing.T) {
setUpServer()
httpReq, _ := http.NewRequest(http.MethodDelete, server.URL+"/webhooks/foo", nil)
response, _ := http.DefaultClient.Do(httpReq)
if response.StatusCode != 400 {
t.Errorf("Status code not set to 400 when deleting without a namespace, it's: %d", response.StatusCode)
}
}
func TestDeleteByNameNoNamespaceBadRequest(t *testing.T) {
setUpServer()
httpReq, _ := http.NewRequest(http.MethodDelete, server.URL+"/webhooks/foo?repository=bar", nil)
response, _ := http.DefaultClient.Do(httpReq)
if response.StatusCode != 400 {
t.Errorf("Status code not set to 400 when deleting without a namespace, it's: %d", response.StatusCode)
}
}
func TestDeleteByNameNoRepoBadRequest(t *testing.T) {
setUpServer()
httpReq, _ := http.NewRequest(http.MethodDelete, server.URL+"/webhooks/foo?namespace=foo", nil)
response, _ := http.DefaultClient.Do(httpReq)
if response.StatusCode != 400 {
t.Errorf("Status code not set to 400 when deleting without a repository, it's: %d", response.StatusCode)
}
}
// //------------------- UTILS -------------------//
func createDashboardService(name, labelValue string) *corev1.Service {
labels := make(map[string]string)
labels["app"] = labelValue
dashSVC := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
Annotations: map[string]string{},
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
corev1.ServicePort{
Name: "http",
Protocol: "TCP",
Port: 1234,
NodePort: 5678,
TargetPort: intstr.FromInt(91011),
},
},
},
Status: corev1.ServiceStatus{},
}
return dashSVC
}
func getExpectedParams(hook webhook, r *Resource, expectedProvider, expectedAPIURL string) (expectedHookParams, expectedMonitorParams []pipelinesv1alpha1.Param) {
url := strings.TrimPrefix(hook.GitRepositoryURL, "https://")
url = strings.TrimPrefix(url, "http://")
server := url[0:strings.Index(url, "/")]
org := strings.TrimPrefix(url, server+"/")
org = org[0:strings.Index(org, "/")]
repo := url[strings.LastIndex(url, "/")+1:]
sslverify := os.Getenv("SSL_VERIFICATION_ENABLED")
insecureAsBool, _ := strconv.ParseBool(sslverify)
insecureAsString := strconv.FormatBool(!insecureAsBool)
expectedHookParams = []pipelinesv1alpha1.Param{}
if hook.ReleaseName != "" {
expectedHookParams = append(expectedHookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-release-name", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: hook.ReleaseName}})
} else {
expectedHookParams = append(expectedHookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-release-name", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: hook.GitRepositoryURL[strings.LastIndex(hook.GitRepositoryURL, "/")+1:]}})
}
expectedHookParams = append(expectedHookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-target-namespace", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: hook.Namespace}})
expectedHookParams = append(expectedHookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-service-account", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: hook.ServiceAccount}})
expectedHookParams = append(expectedHookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-git-server", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: server}})
expectedHookParams = append(expectedHookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-git-org", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: org}})
expectedHookParams = append(expectedHookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-git-repo", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: repo}})
expectedHookParams = append(expectedHookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-pull-task", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: hook.PullTask}})
expectedHookParams = append(expectedHookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-ssl-verify", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: sslverify}})
expectedHookParams = append(expectedHookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-insecure-skip-tls-verify", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: insecureAsString}})
if hook.DockerRegistry != "" {
expectedHookParams = append(expectedHookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-docker-registry", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: hook.DockerRegistry}})
}
if hook.HelmSecret != "" {
expectedHookParams = append(expectedHookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-helm-secret", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: hook.HelmSecret}})
}
expectedMonitorParams = []pipelinesv1alpha1.Param{}
if hook.OnSuccessComment != "" {
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "commentsuccess", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: hook.OnSuccessComment}})
} else {
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "commentsuccess", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "Success"}})
}
if hook.OnFailureComment != "" {
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "commentfailure", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: hook.OnFailureComment}})
} else {
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "commentfailure", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "Failed"}})
}
if hook.OnTimeoutComment != "" {
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "commenttimeout", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: hook.OnTimeoutComment}})
} else {
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "commenttimeout", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "Unknown"}})
}
if hook.OnMissingComment != "" {
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "commentmissing", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: hook.OnMissingComment}})
} else {
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "commentmissing", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "Missing"}})
}
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "gitsecretname", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: hook.AccessTokenRef}})
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "gitsecretkeyname", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "accessToken"}})
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "dashboardurl", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: r.getDashboardURL(r.Defaults.Namespace)}})
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "insecure-skip-tls-verify", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: insecureAsString}})
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "provider", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: expectedProvider}})
expectedMonitorParams = append(expectedMonitorParams, pipelinesv1alpha1.Param{Name: "apiurl", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: expectedAPIURL}})
return
}
func (r Resource) deleteAllBindings() error {
tbs, err := r.TriggersClient.TektonV1alpha1().TriggerBindings(r.Defaults.Namespace).List(metav1.ListOptions{})
if err != nil {
return err
}
for _, tb := range tbs.Items {
err = r.TriggersClient.TektonV1alpha1().TriggerBindings(r.Defaults.Namespace).Delete(tb.Name, &metav1.DeleteOptions{})
if err != nil {
return err
}
}
return nil
}
func (r Resource) getExpectedPushAndPullRequestTriggersForWebhook(webhook webhook) []v1alpha1.EventListenerTrigger {
triggers := []v1alpha1.EventListenerTrigger{
{
Name: webhook.Name + "-" + webhook.Namespace + "-push-event",
Bindings: []*v1alpha1.EventListenerBinding{
{
Name: webhook.Pipeline + "-push-binding",
APIVersion: "v1alpha1",
},
{
// This name is not as it would be in the product, as
// GenerateName is used.
Name: "wext-" + webhook.Name + "-",
APIVersion: "v1alpha1",
},
},
Template: v1alpha1.EventListenerTemplate{
Name: webhook.Pipeline + "-template",
APIVersion: "v1alpha1",
},
Interceptors: []*v1alpha1.EventInterceptor{
{
Webhook: &v1alpha1.WebhookInterceptor{
Header: []pipelinesv1alpha1.Param{
{Name: "Wext-Trigger-Name", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: webhook.Name + "-" + webhook.Namespace + "-push-event"}},
{Name: "Wext-Repository-Url", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: webhook.GitRepositoryURL}},
{Name: "Wext-Incoming-Event", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "push, Push Hook, Tag Push Hook"}},
{Name: "Wext-Secret-Name", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: webhook.AccessTokenRef}}},
ObjectRef: &corev1.ObjectReference{
APIVersion: "v1",
Kind: "Service",
Name: "tekton-webhooks-extension-validator",
Namespace: r.Defaults.Namespace,
},
},
},
},
},
{
Name: webhook.Name + "-" + webhook.Namespace + "-pullrequest-event",
Bindings: []*v1alpha1.EventListenerBinding{
{
Name: webhook.Pipeline + "-pullrequest-binding",
APIVersion: "v1alpha1",
},
{
// This name is not as it would be in the product, as
// GenerateName is used.
Name: "wext-" + webhook.Name + "-",
APIVersion: "v1alpha1",
},
},
Template: v1alpha1.EventListenerTemplate{
Name: webhook.Pipeline + "-template",
APIVersion: "v1alpha1",
},
Interceptors: []*v1alpha1.EventInterceptor{
{
Webhook: &v1alpha1.WebhookInterceptor{
Header: []pipelinesv1alpha1.Param{
{Name: "Wext-Trigger-Name", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: webhook.Name + "-" + webhook.Namespace + "-pullrequest-event"}},
{Name: "Wext-Repository-Url", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: webhook.GitRepositoryURL}},
{Name: "Wext-Incoming-Event", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "pull_request, Merge Request Hook"}},
{Name: "Wext-Secret-Name", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: webhook.AccessTokenRef}},
{Name: "Wext-Incoming-Actions", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "opened,reopened,synchronize"}}},
ObjectRef: &corev1.ObjectReference{
APIVersion: "v1",
Kind: "Service",
Name: "tekton-webhooks-extension-validator",
Namespace: r.Defaults.Namespace,
},
},
},
},
},
}
return triggers
}
func getEnvDefaults(r *Resource, t *testing.T) EnvDefaults {
httpReq := dummyHTTPRequest("GET", "http://wwww.dummy.com:8080/webhooks/defaults", nil)
req := dummyRestfulRequest(httpReq, "")
httpWriter := httptest.NewRecorder()
resp := dummyRestfulResponse(httpWriter)
r.getDefaults(req, resp)
defaults := EnvDefaults{}
err := json.NewDecoder(httpWriter.Body).Decode(&defaults)
if err != nil {
t.Errorf("Error decoding result into defaults{}: %s", err.Error())
}
return defaults
}
func FakeGetTriggerBindingObjectMeta(name string) metav1.ObjectMeta {
return metav1.ObjectMeta{
Name: "wext-" + name + "-",
}
}
func createWebhook(webhook webhook, r *Resource) (response *restful.Response) {
b, err := json.Marshal(webhook)
if err != nil {
fmt.Println(fmt.Errorf("Marshal error when creating webhook, data is: %s, error is: %s", b, err))
return nil
}
httpReq := dummyHTTPRequest("POST", "http://wwww.dummy.com:8080/webhooks/", bytes.NewBuffer(b))
req := dummyRestfulRequest(httpReq, "")
httpWriter := httptest.NewRecorder()
resp := dummyRestfulResponse(httpWriter)
r.createWebhook(req, resp)
return resp
}
func testGetAllWebhooks(expectedWebhooks []webhook, r *Resource, t *testing.T) {
httpReq := dummyHTTPRequest("GET", "http://wwww.dummy.com:8080/webhooks/", nil)
req := dummyRestfulRequest(httpReq, "")
httpWriter := httptest.NewRecorder()
resp := dummyRestfulResponse(httpWriter)
r.getAllWebhooks(req, resp)
actualWebhooks := []webhook{}
err := json.NewDecoder(httpWriter.Body).Decode(&actualWebhooks)
if err != nil {
t.Errorf("Error decoding result into []webhook{}: %s", err.Error())
return
}
fmt.Printf("%+v", actualWebhooks)
if len(expectedWebhooks) != len(actualWebhooks) {
t.Errorf("Incorrect length of result, expected %d, but was %d", len(expectedWebhooks), len(actualWebhooks))
return
}
// Now compare the arrays expectedWebhooks and actualWebhooks by turning them into maps
expected := map[webhook]bool{}
actual := map[webhook]bool{}
for i := range expectedWebhooks {
if expectedWebhooks[i].DockerRegistry == "" {
expectedWebhooks[i].DockerRegistry = defaultRegistry
}
if expectedWebhooks[i].PullTask == "" {
expectedWebhooks[i].PullTask = "monitor-task"
}
expected[expectedWebhooks[i]] = true
actual[actualWebhooks[i]] = true
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Webhook error: expected: \n%v \nbut received \n%v", expected, actual)
}
}
func createTriggerResources(hook webhook, r *Resource) {
template := v1alpha1.TriggerTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: hook.Pipeline + "-template",
Namespace: installNs,
},
}
pushBinding := v1alpha1.TriggerBinding{
ObjectMeta: metav1.ObjectMeta{
Name: hook.Pipeline + "-push-binding",
Namespace: installNs,
},
}
pullBinding := v1alpha1.TriggerBinding{
ObjectMeta: metav1.ObjectMeta{
Name: hook.Pipeline + "-pullrequest-binding",
Namespace: installNs,
},
}
secret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: hook.AccessTokenRef,
Namespace: installNs,
},
Data: map[string][]byte{
"accessToken": []byte("access"),
"secretToken": []byte("secret"),
},
}
_, err := r.TriggersClient.TektonV1alpha1().TriggerTemplates(installNs).Create(&template)
if err != nil {
fmt.Printf("Error creating fake triggertemplate %s", template.Name)
}
_, err = r.TriggersClient.TektonV1alpha1().TriggerBindings(installNs).Create(&pushBinding)
if err != nil {
fmt.Printf("Error creating fake triggerbinding %s", pushBinding.Name)
}
_, err = r.TriggersClient.TektonV1alpha1().TriggerBindings(installNs).Create(&pullBinding)
if err != nil {
fmt.Printf("Error creating fake triggerbinding %s", pullBinding.Name)
}
_, err = r.K8sClient.CoreV1().Secrets(installNs).Create(&secret)
if err != nil {
fmt.Printf("Error creating fake secret %s", secret.Name)
}
return
}
func Test_getWebhookSecretTokens(t *testing.T) {
// Access token is stored as 'accessToken' and secret as 'secretToken'
tests := []struct {
name string
secret *corev1.Secret
wantAccessToken string
wantSecretToken string
}{
{
name: "foo",
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Data: map[string][]byte{
"accessToken": []byte("myAccessToken"),
"secretToken": []byte("mySecretToken"),
},
},
wantAccessToken: "myAccessToken",
wantSecretToken: "mySecretToken",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Setup resources
r := dummyResource()
if _, err := r.K8sClient.CoreV1().Secrets(r.Defaults.Namespace).Create(tt.secret); err != nil {
t.Errorf("getWebhookSecretTokens() error creating secret: %s", err)
}
// Test
gotAccessToken, gotSecretToken, err := utils.GetWebhookSecretTokens(r.K8sClient, r.Defaults.Namespace, tt.name)
if err != nil {
t.Errorf("getWebhookSecretTokens() returned an error: %s", err)
}
if tt.wantAccessToken != gotAccessToken {
t.Errorf("getWebhookSecretTokens() accessToken = %s, want %s", gotAccessToken, tt.wantAccessToken)
}
if tt.wantSecretToken != gotSecretToken {
t.Errorf("getWebhookSecretTokens() secretToken = %s, want %s", gotSecretToken, tt.wantSecretToken)
}
})
}
}
func Test_getWebhookSecretTokens_error(t *testing.T) {
// Access token is stored as 'accessToken' and secret as 'secretToken'
tests := []struct {
name string
secret *corev1.Secret
}{
{
name: "namenotfound",
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Data: map[string][]byte{
"accessToken": []byte("myAccessToken"),
"secretToken": []byte("mySecretToken"),
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Setup resources
r := dummyResource()
if _, err := r.K8sClient.CoreV1().Secrets(r.Defaults.Namespace).Create(tt.secret); err != nil {
t.Errorf("getWebhookSecretTokens() error creating secret: %s", err)
}
// Test
if _, _, err := utils.GetWebhookSecretTokens(r.K8sClient, r.Defaults.Namespace, tt.name); err == nil {
t.Errorf("getWebhookSecretTokens() did not return an error when expected")
}
})
}
}
func Test_createOAuth2Client(t *testing.T) {
// Create client
accessToken := "foo"
ctx := context.Background()
client := utils.CreateOAuth2Client(ctx, accessToken, true)
// Test
responseText := "my response"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
authHeader := r.Header.Get("Authorization")
if !strings.Contains(authHeader, accessToken) {
t.Errorf("createOAuth2Client() expected authHeader to contain: %s; authHeader is: %s", accessToken, authHeader)
}
_, err := w.Write([]byte(responseText))
if err != nil {
t.Errorf("createOAuth2Client() error writing response: %s", err)
}
}))
defer ts.Close()
resp, err := client.Get(ts.URL)
if err != nil {
t.Logf("createOAuth2Client() error sending request: %s", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Logf("createOAuth2Client() error reading response body")
}
if string(body) != responseText {
t.Logf("createOAuth2Client() expected response text %s; got: %s", responseText, body)
}
}
func Test_createOpenshiftRoute(t *testing.T) {
tests := []struct {
name string
serviceName string
route *routesv1.Route
hasErr bool
}{
{
name: "OpenShift Route",
serviceName: "route",
route: &routesv1.Route{
ObjectMeta: metav1.ObjectMeta{
Name: "route",
// Namepace in the dummy resource
Namespace: "default",
Annotations: map[string]string{"haproxy.router.openshift.io/timeout": "2m"},
},
Spec: routesv1.RouteSpec{
To: routesv1.RouteTargetReference{
Kind: "Service",
Name: "route",
},
TLS: &routesv1.TLSConfig{
Termination: "edge",
InsecureEdgeTerminationPolicy: "Redirect",
},
},
},
hasErr: false,
},
}
for i := range tests {
t.Run(tests[i].name, func(t *testing.T) {
r := dummyResource()
var hasErr bool
if err := r.createOpenshiftRoute(tests[i].serviceName); err != nil {
hasErr = true
}
if diff := cmp.Diff(tests[i].hasErr, hasErr); diff != "" {
t.Fatalf("Error mismatch (-want +got):\n%s", diff)
}
route, err := r.RoutesClient.RouteV1().Routes(r.Defaults.Namespace).Get(tests[i].serviceName, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(tests[i].route, route); diff != "" {
t.Errorf("Route mismatch (-want +got):\n%s", diff)
}
})
}
}
func Test_deleteOpenshiftRoute(t *testing.T) {
tests := []struct {
name string
routeName string
hasErr bool
}{
{
name: "OpenShift Route",
routeName: "route",
hasErr: false,
},
}
for i := range tests {
t.Run(tests[i].name, func(t *testing.T) {
r := dummyResource()
// Seed route for deletion
route := &routesv1.Route{
ObjectMeta: metav1.ObjectMeta{
Name: tests[i].routeName,
},
}
if _, err := r.RoutesClient.RouteV1().Routes(r.Defaults.Namespace).Create(route); err != nil {
t.Fatal(err)
}
// Delete
var hasErr bool
if err := r.deleteOpenshiftRoute(tests[i].routeName); err != nil {
hasErr = true
}
if diff := cmp.Diff(tests[i].hasErr, hasErr); diff != "" {
t.Fatalf("Error mismatch (-want +got):\n%s", diff)
}
_, err := r.RoutesClient.RouteV1().Routes(r.Defaults.Namespace).Get(tests[i].routeName, metav1.GetOptions{})
if err == nil {
t.Errorf("Route not expected")
}
})
}
}
func TestCreateDeleteIngress(t *testing.T) {
r := dummyResource()
r.Defaults.CallbackURL = "http://wibble.com"
expectedHost := "wibble.com"
err := r.createDeleteIngress("create", r.Defaults.Namespace)
if err != nil {
t.Errorf("error creating ingress: %s", err.Error())
}
ingress, err := r.K8sClient.ExtensionsV1beta1().Ingresses(r.Defaults.Namespace).Get("el-tekton-webhooks-eventlistener", metav1.GetOptions{})
if err != nil {
t.Errorf("error getting ingress: %s", err.Error())
}
if ingress.Spec.Rules[0].Host != expectedHost {
t.Error("ingress Host did not match the callback URL")
}
err = r.createDeleteIngress("delete", r.Defaults.Namespace)
if err != nil {
t.Errorf("error deleting ingress: %s", err.Error())
}
}
| [
"\"SSL_VERIFICATION_ENABLED\""
]
| []
| [
"SSL_VERIFICATION_ENABLED"
]
| [] | ["SSL_VERIFICATION_ENABLED"] | go | 1 | 0 | |
qutip/tests/solve/test_sesolve.py | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
from numpy.testing import assert_, run_module_suite
# disable the progress bar
import os
from qutip import (
sigmax, sigmay, sigmaz, qeye, basis, expect, num, destroy, create,
Cubic_Spline, sesolve,
)
from qutip.solve import SolverOptions
os.environ['QUTIP_GRAPHICS'] = "NO"
class TestSESolve:
"""
A test class for the QuTiP Schrodinger Eq. solver
"""
def check_evolution(self, H, delta, psi0, tlist, analytic_func,
U0=None, td_args={}, tol=5e-3):
"""
Compare integrated evolution with analytical result
If U0 is not None then operator evo is checked
Otherwise state evo
"""
if U0 is None:
output = sesolve(H, psi0, tlist, [sigmax(), sigmay(), sigmaz()],
args=td_args)
sx, sy, sz = output.expect[0], output.expect[1], output.expect[2]
else:
output = sesolve(H, U0, tlist, args=td_args)
sx = [expect(sigmax(), U*psi0) for U in output.states]
sy = [expect(sigmay(), U*psi0) for U in output.states]
sz = [expect(sigmaz(), U*psi0) for U in output.states]
sx_analytic = np.zeros(np.shape(tlist))
sy_analytic = np.array([-np.sin(delta*analytic_func(t, td_args))
for t in tlist])
sz_analytic = np.array([np.cos(delta*analytic_func(t, td_args))
for t in tlist])
np.testing.assert_allclose(sx, sx_analytic, atol=tol)
np.testing.assert_allclose(sy, sy_analytic, atol=tol)
np.testing.assert_allclose(sz, sz_analytic, atol=tol)
def test_01_1_state_with_const_H(self):
"sesolve: state with const H"
delta = 1.0 * 2*np.pi # atom frequency
psi0 = basis(2, 0) # initial state
H1 = 0.5*delta*sigmax() # Hamiltonian operator
tlist = np.linspace(0, 20, 200)
analytic_func = lambda t, args: t
self.check_evolution(H1, delta, psi0, tlist, analytic_func)
def test_01_1_unitary_with_const_H(self):
"sesolve: unitary operator with const H"
delta = 1.0 * 2*np.pi # atom frequency
psi0 = basis(2, 0) # initial state
U0 = qeye(2) # initital operator
H1 = 0.5*delta*sigmax() # Hamiltonian operator
tlist = np.linspace(0, 20, 200)
analytic_func = lambda t, args: t
self.check_evolution(H1, delta, psi0, tlist, analytic_func, U0)
def test_02_1_state_with_func_H(self):
"sesolve: state with td func H"
delta = 1.0 * 2*np.pi # atom frequency
psi0 = basis(2, 0) # initial state
H1 = 0.5*delta*sigmax() # Hamiltonian operator
tlist = np.linspace(0, 20, 200)
alpha = 0.1
td_args = {'alpha':alpha}
h1_func = lambda t, args: H1*np.exp(-args['alpha']*t)
analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))
/args['alpha'])
self.check_evolution(h1_func, delta, psi0, tlist, analytic_func,
td_args=td_args)
def test_02_2_unitary_with_func_H(self):
"sesolve: unitary operator with td func H"
delta = 1.0 * 2*np.pi # atom frequency
psi0 = basis(2, 0) # initial state
U0 = qeye(2) # initital operator
H1 = 0.5*delta*sigmax() # Hamiltonian operator
tlist = np.linspace(0, 20, 200)
alpha = 0.1
td_args = {'alpha':alpha}
h1_func = lambda t, args: H1*np.exp(-args['alpha']*t)
analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))
/args['alpha'])
self.check_evolution(h1_func, delta, psi0, tlist, analytic_func, U0,
td_args=td_args)
def test_03_1_state_with_list_func_H(self):
"sesolve: state with td list func H"
delta = 1.0 * 2*np.pi # atom frequency
psi0 = basis(2, 0) # initial state
H1 = 0.5*delta*sigmax() # Hamiltonian operator
tlist = np.linspace(0, 20, 200)
alpha = 0.1
td_args = {'alpha':alpha}
h1_coeff = lambda t, args: np.exp(-args['alpha']*t)
H = [[H1, h1_coeff]]
analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))
/args['alpha'])
self.check_evolution(H, delta, psi0, tlist, analytic_func,
td_args=td_args)
def test_03_2_unitary_with_list_func_H(self):
"sesolve: unitary operator with td list func H"
delta = 1.0 * 2*np.pi # atom frequency
psi0 = basis(2, 0) # initial state
U0 = qeye(2) # initital operator
H1 = 0.5*delta*sigmax() # Hamiltonian operator
tlist = np.linspace(0, 20, 200)
alpha = 0.1
td_args = {'alpha':alpha}
h1_coeff = lambda t, args: np.exp(-args['alpha']*t)
H = [[H1, h1_coeff]]
analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))
/args['alpha'])
self.check_evolution(H, delta, psi0, tlist, analytic_func, U0,
td_args=td_args)
def test_04_1_state_with_list_str_H(self):
"sesolve: state with td list str H"
delta = 1.0 * 2*np.pi # atom frequency
psi0 = basis(2, 0) # initial state
H1 = 0.5*delta*sigmax() # Hamiltonian operator
tlist = np.linspace(0, 20, 200)
alpha = 0.1
td_args = {'alpha':alpha}
H = [[H1, 'exp(-alpha*t)']]
analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))
/args['alpha'])
self.check_evolution(H, delta, psi0, tlist, analytic_func,
td_args=td_args)
def test_04_2_unitary_with_list_func_H(self):
"sesolve: unitary operator with td list str H"
delta = 1.0 * 2*np.pi # atom frequency
psi0 = basis(2, 0) # initial state
U0 = qeye(2) # initital operator
H1 = 0.5*delta*sigmax() # Hamiltonian operator
tlist = np.linspace(0, 20, 200)
alpha = 0.1
td_args = {'alpha':alpha}
H = [[H1, 'exp(-alpha*t)']]
analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))
/args['alpha'])
self.check_evolution(H, delta, psi0, tlist, analytic_func, U0,
td_args=td_args)
def test_05_1_state_with_interp_H(self):
"sesolve: state with td interp H"
delta = 1.0 * 2*np.pi # atom frequency
psi0 = basis(2, 0) # initial state
H1 = 0.5*delta*sigmax() # Hamiltonian operator
tlist = np.linspace(0, 20, 200)
alpha = 0.1
td_args = {'alpha':alpha}
tcub = np.linspace(0, 20, 50)
S = Cubic_Spline(0, 20, np.exp(-alpha*tcub))
H = [[H1, S]]
analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))
/args['alpha'])
self.check_evolution(H, delta, psi0, tlist, analytic_func,
td_args=td_args)
def test_05_2_unitary_with_interp_H(self):
"sesolve: unitary operator with td interp H"
delta = 1.0 * 2*np.pi # atom frequency
psi0 = basis(2, 0) # initial state
U0 = qeye(2) # initital operator
H1 = 0.5*delta*sigmax() # Hamiltonian operator
tlist = np.linspace(0, 20, 200)
alpha = 0.1
td_args = {'alpha':alpha}
tcub = np.linspace(0, 20, 50)
S = Cubic_Spline(0, 20, np.exp(-alpha*tcub))
H = [[H1, S]]
analytic_func = lambda t, args: ((1 - np.exp(-args['alpha']*t))
/args['alpha'])
self.check_evolution(H, delta, psi0, tlist, analytic_func, U0,
td_args=td_args)
def compare_evolution(self, H, psi0, tlist,
normalize=False, td_args={}, tol=5e-5):
"""
Compare integrated evolution of unitary operator with state evo
"""
U0 = qeye(2)
options = SolverOptions(store_states=True, normalize_output=normalize)
out_s = sesolve(H, psi0, tlist, [sigmax(), sigmay(), sigmaz()],
options=options,args=td_args)
xs, ys, zs = out_s.expect[0], out_s.expect[1], out_s.expect[2]
out_u = sesolve(H, U0, tlist, options=options, args=td_args)
xu = [expect(sigmax(), U*psi0) for U in out_u.states]
yu = [expect(sigmay(), U*psi0) for U in out_u.states]
zu = [expect(sigmaz(), U*psi0) for U in out_u.states]
if normalize:
msg_ext = ". (Normalized)"
else:
msg_ext = ". (Not normalized)"
assert_(max(abs(xs - xu)) < tol,
msg="expect X not matching" + msg_ext)
assert_(max(abs(ys - yu)) < tol,
msg="expect Y not matching" + msg_ext)
assert_(max(abs(zs - zu)) < tol,
msg="expect Z not matching" + msg_ext)
def test_06_1_compare_state_and_unitary_const(self):
"sesolve: compare state and unitary operator evo - const H"
eps = 0.2 * 2*np.pi
delta = 1.0 * 2*np.pi # atom frequency
w0 = 0.5*eps
w1 = 0.5*delta
H0 = w0*sigmaz()
H1 = w1*sigmax()
H = H0 + H1
psi0 = basis(2, 0) # initial state
tlist = np.linspace(0, 20, 200)
self.compare_evolution(H, psi0, tlist,
normalize=False, tol=5e-5)
self.compare_evolution(H, psi0, tlist,
normalize=True, tol=5e-5)
def test_06_2_compare_state_and_unitary_func(self):
"sesolve: compare state and unitary operator evo - func td"
eps = 0.2 * 2*np.pi
delta = 1.0 * 2*np.pi # atom frequency
w0 = 0.5*eps
w1 = 0.5*delta
H0 = w0*sigmaz()
H1 = w1*sigmax()
a = 0.1
alpha = 0.1
td_args = {'a':a, 'alpha':alpha}
H_func = lambda t, args: a*t*H0 + H1*np.exp(-alpha*t)
H = H_func
psi0 = basis(2, 0) # initial state
tlist = np.linspace(0, 20, 200)
self.compare_evolution(H, psi0, tlist,
normalize=False, td_args=td_args, tol=5e-5)
self.compare_evolution(H, psi0, tlist,
normalize=True, td_args=td_args, tol=5e-5)
def test_06_3_compare_state_and_unitary_list_func(self):
"sesolve: compare state and unitary operator evo - list func td"
eps = 0.2 * 2*np.pi
delta = 1.0 * 2*np.pi # atom frequency
w0 = 0.5*eps
w1 = 0.5*delta
H0 = w0*sigmaz()
H1 = w1*sigmax()
a = 0.1
w_a = w0
td_args = {'a':a, 'w_a':w_a}
h0_func = lambda t, args: a*t
h1_func = lambda t, args: np.cos(w_a*t)
H = [[H0, h0_func], [H1, h1_func]]
psi0 = basis(2, 0) # initial state
tlist = np.linspace(0, 20, 200)
self.compare_evolution(H, psi0, tlist,
normalize=False, td_args=td_args, tol=5e-5)
self.compare_evolution(H, psi0, tlist,
normalize=True, td_args=td_args, tol=5e-5)
def test_06_4_compare_state_and_unitary_list_str(self):
"sesolve: compare state and unitary operator evo - list str td"
eps = 0.2 * 2*np.pi
delta = 1.0 * 2*np.pi # atom frequency
w0 = 0.5*eps
w1 = 0.5*delta
H0 = w0*sigmaz()
H1 = w1*sigmax()
w_a = w0
td_args = {'w_a':w_a}
H = [H0, [H1, 'cos(w_a*t)']]
psi0 = basis(2, 0) # initial state
tlist = np.linspace(0, 20, 200)
self.compare_evolution(H, psi0, tlist,
normalize=False, td_args=td_args, tol=5e-5)
self.compare_evolution(H, psi0, tlist,
normalize=True, td_args=td_args, tol=5e-5)
if __name__ == "__main__":
run_module_suite()
| []
| []
| [
"QUTIP_GRAPHICS"
]
| [] | ["QUTIP_GRAPHICS"] | python | 1 | 0 | |
tests/test_modules/test_pmac/test_pmactrajectorypart.py | import pytest
from cothread import cothread
from mock import call
from malcolm.core import Process, TimeoutError
from malcolm.modules.builtin.controllers import ManagerController
from malcolm.modules.pmac.blocks import pmac_trajectory_block
from malcolm.modules.pmac.parts import PmacTrajectoryPart
from malcolm.testutil import ChildTestCase
SHOW_GRAPHS = False
# Uncomment this to show graphs when running under PyCharm
# SHOW_GRAPHS = "PYCHARM_HOSTED" in os.environ
class TestPMACTrajectoryPart(ChildTestCase):
def setUp(self):
self.process = Process("Process")
self.child = self.create_child_block(
pmac_trajectory_block, self.process, mri="PMAC:TRAJ", pv_prefix="PV:PRE"
)
c = ManagerController("PMAC", "/tmp")
self.o = PmacTrajectoryPart(name="pmac", mri="PMAC:TRAJ")
c.add_part(self.o)
self.process.add_controller(c)
self.process.start()
self.b = c.block_view()
self.set_attributes(self.child, trajectoryProgVersion=2)
def tearDown(self):
self.process.stop(timeout=1)
def test_init(self):
assert not self.b.pointsScanned.meta.writeable
assert list(self.b.writeProfile.meta.takes.elements) == [
"timeArray",
"csPort",
"velocityMode",
"userPrograms",
"a",
"b",
"c",
"u",
"v",
"w",
"x",
"y",
"z",
]
assert "executeProfile" in self.b
assert "abortProfile" in self.b
def test_write_profile_build(self):
self.b.writeProfile(
[1, 5, 2],
"BRICK2CS1",
velocityMode=[0, 1, 2],
userPrograms=[0, 8, 0],
x=[1, 2, 3],
z=[4, 4.1, 4.2],
)
assert self.child.handled_requests.mock_calls == [
call.put("numPoints", 4000000),
call.put("cs", "BRICK2CS1"),
call.put("useA", False),
call.put("useB", False),
call.put("useC", False),
call.put("useU", False),
call.put("useV", False),
call.put("useW", False),
call.put("useX", True),
call.put("useY", False),
call.put("useZ", True),
call.put("pointsToBuild", 3),
call.put("positionsX", [1, 2, 3]),
call.put("positionsZ", [4, 4.1, 4.2]),
call.put("timeArray", [1, 5, 2]),
call.put("userPrograms", [0, 8, 0]),
call.put("velocityMode", [0, 1, 2]),
call.post("buildProfile"),
]
def test_write_profile_append(self):
self.b.writeProfile([1, 5, 2], x=[11, 12, 13], z=[14, 14.1, 14.2])
assert self.child.handled_requests.mock_calls == [
call.put("pointsToBuild", 3),
call.put("positionsX", [11, 12, 13]),
call.put("positionsZ", [14, 14.1, 14.2]),
call.put("timeArray", [1, 5, 2]),
call.put("userPrograms", pytest.approx([0, 0, 0])),
call.put("velocityMode", pytest.approx([0, 0, 0])),
call.post("appendProfile"),
]
def test_execute_profile(self):
self.mock_when_value_matches(self.child)
self.b.executeProfile()
assert self.child.handled_requests.mock_calls == [
call.post("executeProfile"),
call.when_value_matches("pointsScanned", 0, None),
]
def test_execute_profile_not_enough(self):
def _handle_post(request):
cothread.Sleep(1)
return [request.return_response(1)]
self.child._handle_post = _handle_post
self.o.total_points = 2
future = self.b.executeProfile_async()
self.set_attributes(self.child, pointsScanned=1)
with self.assertRaises(TimeoutError) as cm:
future.result(timeout=2)
assert str(cm.exception) == (
"Timeout waiting for [When(PMAC:TRAJ.pointsScanned.value, equals_2, "
"last=1)]"
)
| []
| []
| []
| [] | [] | python | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.