filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
backend/budbua/wsgi.py | """
WSGI config for budbua project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'budbua.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
fastmri_recon/evaluate/scripts/updnet_sense_inference.py | import os
import click
import tensorflow as tf
from tqdm import tqdm
from fastmri_recon.config import *
from fastmri_recon.data.datasets.multicoil.fastmri_pyfunc import test_masked_kspace_dataset_from_indexable, test_filenames
from fastmri_recon.models.subclassed_models.updnet import UPDNet
from fastmri_recon.evaluate.utils.write_results import write_result
def updnet_sense_inference(
brain=False,
challenge=False,
run_id='updnet_sense_af4_1588609141',
exp_id='updnet',
n_epochs=200,
contrast=None,
scale_factor=1e6,
af=4,
n_iter=10,
n_layers=3,
base_n_filter=16,
non_linearity='relu',
channel_attention_kwargs=None,
refine_smaps=False,
n_samples=None,
cuda_visible_devices='0123',
):
if brain:
if challenge:
test_path = f'{FASTMRI_DATA_DIR}brain_multicoil_challenge/'
else:
test_path = f'{FASTMRI_DATA_DIR}brain_multicoil_test/'
else:
test_path = f'{FASTMRI_DATA_DIR}multicoil_test_v2/'
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(cuda_visible_devices)
af = int(af)
run_params = {
'n_primal': 5,
'n_dual': 1,
'primal_only': True,
'multicoil': True,
'n_layers': n_layers,
'layers_n_channels': [base_n_filter * 2**i for i in range(n_layers)],
'non_linearity': non_linearity,
'n_iter': n_iter,
'channel_attention_kwargs': channel_attention_kwargs,
'refine_smaps': refine_smaps,
'output_shape_spec': brain,
}
test_set = test_masked_kspace_dataset_from_indexable(
test_path,
AF=af,
contrast=contrast,
scale_factor=scale_factor,
n_samples=n_samples,
output_shape_spec=brain,
)
test_set_filenames = test_filenames(
test_path,
AF=af,
contrast=contrast,
n_samples=n_samples,
)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = UPDNet(**run_params)
fake_inputs = [
tf.zeros([1, 15, 640, 372, 1], dtype=tf.complex64),
tf.zeros([1, 15, 640, 372], dtype=tf.complex64),
tf.zeros([1, 15, 640, 372], dtype=tf.complex64),
]
if brain:
fake_inputs.append(tf.constant([[320, 320]]))
model(fake_inputs)
model.load_weights(f'{CHECKPOINTS_DIR}checkpoints/{run_id}-{n_epochs:02d}.hdf5')
if n_samples is None:
if not brain:
if contrast:
tqdm_total = n_volumes_test[af] // 2
else:
tqdm_total = n_volumes_test[af]
else:
if contrast:
tqdm_total = brain_volumes_per_contrast['test'][af][contrast]
else:
tqdm_total = brain_n_volumes_test[af]
else:
tqdm_total = n_samples
tqdm_desc = f'{exp_id}_{contrast}_{af}'
# TODO: change when the following issue has been dealt with
# https://github.com/tensorflow/tensorflow/issues/38561
@tf.function(experimental_relax_shapes=True)
def predict(t):
return model(t)
for data_example, filename in tqdm(zip(test_set, test_set_filenames), total=tqdm_total, desc=tqdm_desc):
res = predict(data_example)
write_result(
exp_id,
res.numpy(),
filename.numpy().decode('utf-8'),
scale_factor=scale_factor,
brain=brain,
challenge=challenge,
)
@click.command()
@click.option(
'af',
'-a',
type=int,
default=4,
help='The acceleration factor.'
)
@click.option(
'brain',
'-b',
is_flag=True,
help='Whether you want to consider brain data.'
)
@click.option(
'challenge',
'-ch',
is_flag=True,
help='Whether you want to consider challenge data (only for brain).'
)
@click.option(
'n_iter',
'-i',
default=10,
type=int,
help='The number of epochs to train the model. Default to 300.',
)
@click.option(
'refine_smaps',
'-rfs',
is_flag=True,
help='Whether you want to use an smaps refiner.'
)
@click.option(
'n_epochs',
'-e',
type=int,
default=10,
help='The number of epochs used in the final training.'
)
@click.option(
'run_id',
'-r',
type=str,
default=None,
help='The run id of the final training.'
)
@click.option(
'exp_id',
'-x',
type=str,
default='updnet',
help='The experiment id.'
)
@click.option(
'contrast',
'-c',
type=str,
default=None,
help='The contrast to use for the training.'
)
def updnet_sense_inference_click(
af,
brain,
challenge,
n_iter,
refine_smaps,
n_epochs,
run_id,
exp_id,
contrast,
):
updnet_sense_inference(
af=af,
brain=brain,
challenge=challenge,
n_iter=n_iter,
refine_smaps=refine_smaps,
n_epochs=n_epochs,
run_id=run_id,
exp_id=exp_id,
contrast=contrast,
)
if __name__ == '__main__':
updnet_sense_inference_click()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
apigw-private-custom-domain-name/src/fn.py | import json
# helper functions
def build_response(code, body):
# headers for cors
headers = {
# "Access-Control-Allow-Origin": "amazonaws.com",
# "Access-Control-Allow-Credentials": True,
"Content-Type": "application/json"
}
# lambda proxy integration
response = {
"isBase64Encoded": False,
"statusCode": code,
"headers": headers,
"body": body
}
return response
def handler(event, context):
payload = {
"reqid": event["requestContext"]["requestId"],
"domain": event["requestContext"]["domainName"],
"api": event["requestContext"]["apiId"],
"host": event["headers"]["Host"],
"path": event["path"],
"resource": event["resource"],
"method": event["httpMethod"],
"x-amzn-vpce-id": event["headers"]["x-amzn-vpce-id"],
"x-forwarded-for": event["headers"]["X-Forwarded-For"]
}
output = build_response(200, json.dumps(payload))
print(json.dumps(output))
return output
| []
| []
| []
| [] | [] | python | null | null | null |
test/pkg/suite_init/k8s_docker_registry_data.go | package suite_init
import (
"fmt"
"os"
"github.com/onsi/ginkgo"
"github.com/prashantv/gostub"
)
type K8sDockerRegistryData struct {
K8sDockerRegistryRepo string
}
func NewK8sDockerRegistryData(projectNameData *ProjectNameData, stubsData *StubsData) *K8sDockerRegistryData {
data := &K8sDockerRegistryData{}
SetupK8sDockerRegistryRepo(&data.K8sDockerRegistryRepo, &projectNameData.ProjectName, stubsData.Stubs)
return data
}
func SetupK8sDockerRegistryRepo(repo *string, projectName *string, stubs *gostub.Stubs) bool {
return ginkgo.BeforeEach(func() {
*repo = fmt.Sprintf("%s/%s", os.Getenv("WERF_TEST_K8S_DOCKER_REGISTRY"), *projectName)
stubs.SetEnv("WERF_REPO", *repo)
})
}
| [
"\"WERF_TEST_K8S_DOCKER_REGISTRY\""
]
| []
| [
"WERF_TEST_K8S_DOCKER_REGISTRY"
]
| [] | ["WERF_TEST_K8S_DOCKER_REGISTRY"] | go | 1 | 0 | |
codechef/codechef/asgi.py | """
ASGI config for codechef project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'codechef.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
scheduler/noxfile.py | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core"))
@nox.session(python="3.7")
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", "black", *LOCAL_DEPS)
session.run(
"black",
"--check",
"google",
"tests",
"docs",
)
session.run("flake8", "google", "tests")
@nox.session(python="3.6")
def blacken(session):
"""Run black.
Format code to uniform standard.
This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
That run uses an image that doesn't have 3.6 installed. Before updating this
check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
"""
session.install("black")
session.run(
"black",
"google",
"tests",
"docs",
)
@nox.session(python="3.7")
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
session.install("mock", "pytest", "pytest-cov")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", ".")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=79",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=["2.7", "3.5", "3.6", "3.7"])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=["2.7", "3.7"])
def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", "../test_utils/")
session.install("-e", ".")
# Run py.test against the system tests.
if system_test_exists:
session.run("py.test", "--quiet", system_test_path, *session.posargs)
if system_test_folder_exists:
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
@nox.session(python="3.7")
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=80")
session.run("coverage", "erase")
| []
| []
| [
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["GOOGLE_APPLICATION_CREDENTIALS"] | python | 1 | 0 | |
aws_saml_auth/tests/test_amazon.py | #!/usr/bin/env python
import unittest
import mock
from datetime import datetime
from aws_saml_auth import amazon
from aws_saml_auth import configuration
from os import path
import os
class TestAmazon(unittest.TestCase):
@property
def valid_config(self):
return configuration.Configuration(
idp_id="IDPID",
sp_id="SPID",
username="[email protected]",
password="hunter2",
)
def read_local_file(self, filename):
here = path.abspath(path.dirname(__file__))
with open(path.join(here, filename)) as fp:
return fp.read().encode("utf-8")
def test_sts_client(self):
a = amazon.Amazon(self.valid_config, "dummy-encoded-saml")
self.assertEqual(str(a.sts_client.__class__), "<class 'botocore.client.STS'>")
def test_role_extraction(self):
saml_xml = self.read_local_file("valid-response.xml")
a = amazon.Amazon(self.valid_config, saml_xml)
self.assertIsInstance(a.roles, dict)
list_of_testing_roles = [
"arn:aws:iam::123456789012:role/admin",
"arn:aws:iam::123456789012:role/read-only",
"arn:aws:iam::123456789012:role/test",
]
self.assertEqual(sorted(list(a.roles.keys())), sorted(list_of_testing_roles))
def test_role_extraction_too_many_commas(self):
# See https://github.com/cevoaustralia/aws-google-auth/issues/12
saml_xml = self.read_local_file("too-many-commas.xml")
a = amazon.Amazon(self.valid_config, saml_xml)
self.assertIsInstance(a.roles, dict)
list_of_testing_roles = [
"arn:aws:iam::123456789012:role/admin",
"arn:aws:iam::123456789012:role/read-only",
"arn:aws:iam::123456789012:role/test",
]
self.assertEqual(sorted(list(a.roles.keys())), sorted(list_of_testing_roles))
def test_invalid_saml_too_soon(self):
saml_xml = self.read_local_file("saml-response-too-soon.xml")
self.assertFalse(amazon.Amazon.is_valid_saml_assertion(saml_xml))
def test_invalid_saml_too_late(self):
saml_xml = self.read_local_file("saml-response-too-late.xml")
self.assertFalse(amazon.Amazon.is_valid_saml_assertion(saml_xml))
def test_invalid_saml_expired_before_valid(self):
saml_xml = self.read_local_file("saml-response-expired-before-valid.xml")
self.assertFalse(amazon.Amazon.is_valid_saml_assertion(saml_xml))
def test_invalid_saml_bad_input(self):
self.assertFalse(amazon.Amazon.is_valid_saml_assertion(None))
self.assertFalse(amazon.Amazon.is_valid_saml_assertion("Malformed Base64"))
self.assertFalse(amazon.Amazon.is_valid_saml_assertion(123456))
self.assertFalse(amazon.Amazon.is_valid_saml_assertion(""))
self.assertFalse(
amazon.Amazon.is_valid_saml_assertion("QmFkIFhNTA==")
) # Bad XML
def test_valid_saml(self):
saml_xml = self.read_local_file("saml-response-no-expire.xml")
self.assertTrue(amazon.Amazon.is_valid_saml_assertion(saml_xml))
@mock.patch.dict(
os.environ, {"AWS_PROFILE": "xxx-xxxx", "DEFAULT_AWS_PROFILE": "blart"}
)
def test_sts_client_with_invalid_profile(self):
a = amazon.Amazon(self.valid_config, "dummy-encoded-saml")
self.assertIsNotNone(a.sts_client)
self.assertEqual("xxx-xxxx", os.environ["AWS_PROFILE"])
self.assertEqual("blart", os.environ["DEFAULT_AWS_PROFILE"])
@mock.patch.object(amazon.Amazon, "token", new_callable=mock.PropertyMock)
def test_print_credential_process(self, mock_token):
mock_token.return_value = {
"Credentials": {
"Expiration": datetime.now(),
"AccessKeyId": "some_id",
"SecretAccessKey": "some_secret",
"SessionToken": "some_token",
}
}
a = amazon.Amazon(self.valid_config, "dummy-encoded-saml")
a.print_credential_process()
| []
| []
| [
"AWS_PROFILE",
"DEFAULT_AWS_PROFILE"
]
| [] | ["AWS_PROFILE", "DEFAULT_AWS_PROFILE"] | python | 2 | 0 | |
metricbeat/module/elasticsearch/elasticsearch_integration_test.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build integration
package elasticsearch_test
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"testing"
"time"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/elastic/beats/libbeat/common"
"github.com/elastic/beats/libbeat/tests/compose"
"github.com/elastic/beats/metricbeat/helper/elastic"
mbtest "github.com/elastic/beats/metricbeat/mb/testing"
"github.com/elastic/beats/metricbeat/module/elasticsearch"
_ "github.com/elastic/beats/metricbeat/module/elasticsearch/ccr"
_ "github.com/elastic/beats/metricbeat/module/elasticsearch/cluster_stats"
_ "github.com/elastic/beats/metricbeat/module/elasticsearch/index"
_ "github.com/elastic/beats/metricbeat/module/elasticsearch/index_recovery"
_ "github.com/elastic/beats/metricbeat/module/elasticsearch/index_summary"
_ "github.com/elastic/beats/metricbeat/module/elasticsearch/ml_job"
_ "github.com/elastic/beats/metricbeat/module/elasticsearch/node"
_ "github.com/elastic/beats/metricbeat/module/elasticsearch/node_stats"
_ "github.com/elastic/beats/metricbeat/module/elasticsearch/shard"
)
var metricSets = []string{
"ccr",
"cluster_stats",
"index",
"index_recovery",
"index_summary",
"ml_job",
"node",
"node_stats",
"shard",
}
func TestFetch(t *testing.T) {
compose.EnsureUp(t, "elasticsearch")
host := net.JoinHostPort(getEnvHost(), getEnvPort())
err := createIndex(host)
assert.NoError(t, err)
version, err := getElasticsearchVersion(host)
if err != nil {
t.Fatal("getting elasticsearch version", err)
}
err = enableTrialLicense(host, version)
assert.NoError(t, err)
err = createMLJob(host, version)
assert.NoError(t, err)
err = createCCRStats(host)
assert.NoError(t, err)
for _, metricSet := range metricSets {
checkSkip(t, metricSet, version)
t.Run(metricSet, func(t *testing.T) {
f := mbtest.NewReportingMetricSetV2Error(t, getConfig(metricSet))
events, errs := mbtest.ReportingFetchV2Error(f)
assert.Empty(t, errs)
if !assert.NotEmpty(t, events) {
t.FailNow()
}
t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(),
events[0].BeatEvent("elasticsearch", metricSet).Fields.StringToPrint())
})
}
}
func TestData(t *testing.T) {
compose.EnsureUp(t, "elasticsearch")
host := net.JoinHostPort(getEnvHost(), getEnvPort())
version, err := getElasticsearchVersion(host)
if err != nil {
t.Fatal("getting elasticsearch version", err)
}
for _, metricSet := range metricSets {
checkSkip(t, metricSet, version)
t.Run(metricSet, func(t *testing.T) {
f := mbtest.NewReportingMetricSetV2Error(t, getConfig(metricSet))
err := mbtest.WriteEventsReporterV2Error(f, t, metricSet)
if err != nil {
t.Fatal("write", err)
}
})
}
}
// GetEnvHost returns host for Elasticsearch
func getEnvHost() string {
host := os.Getenv("ES_HOST")
if len(host) == 0 {
host = "127.0.0.1"
}
return host
}
// GetEnvPort returns port for Elasticsearch
func getEnvPort() string {
port := os.Getenv("ES_PORT")
if len(port) == 0 {
port = "9200"
}
return port
}
// GetConfig returns config for elasticsearch module
func getConfig(metricset string) map[string]interface{} {
return map[string]interface{}{
"module": elasticsearch.ModuleName,
"metricsets": []string{metricset},
"hosts": []string{getEnvHost() + ":" + getEnvPort()},
"index_recovery.active_only": false,
}
}
// createIndex creates and elasticsearch index in case it does not exit yet
func createIndex(host string) error {
client := &http.Client{}
if checkExists("http://" + host + "/testindex") {
return nil
}
req, err := http.NewRequest("PUT", "http://"+host+"/testindex", nil)
if err != nil {
return err
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return fmt.Errorf("HTTP error %d: %s", resp.StatusCode, resp.Status)
}
return nil
}
// enableTrialLicense creates and elasticsearch index in case it does not exit yet
func enableTrialLicense(host string, version *common.Version) error {
client := &http.Client{}
enabled, err := checkTrialLicenseEnabled(host, version)
if err != nil {
return err
}
if enabled {
return nil
}
var enableXPackURL string
if version.Major < 7 {
enableXPackURL = "/_xpack/license/start_trial?acknowledge=true"
} else {
enableXPackURL = "/_license/start_trial?acknowledge=true"
}
req, err := http.NewRequest("POST", "http://"+host+enableXPackURL, nil)
if err != nil {
return err
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
return fmt.Errorf("could not enable trial license, response = %v", string(body))
}
return nil
}
// checkTrialLicenseEnabled creates and elasticsearch index in case it does not exit yet
func checkTrialLicenseEnabled(host string, version *common.Version) (bool, error) {
var licenseURL string
if version.Major < 7 {
licenseURL = "/_xpack/license"
} else {
licenseURL = "/_license"
}
resp, err := http.Get("http://" + host + licenseURL)
if err != nil {
return false, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return false, err
}
var data struct {
License struct {
Status string `json:"status"`
Type string `json:"type"`
} `json:"license"`
}
err = json.Unmarshal(body, &data)
if err != nil {
return false, err
}
active := data.License.Status == "active"
isTrial := data.License.Type == "trial"
return active && isTrial, nil
}
func createMLJob(host string, version *common.Version) error {
mlJob, err := ioutil.ReadFile("ml_job/_meta/test/test_job.json")
if err != nil {
return err
}
var jobURL string
if version.Major < 7 {
jobURL = "/_xpack/ml/anomaly_detectors/total-requests"
} else {
jobURL = "/_ml/anomaly_detectors/total-requests"
}
if checkExists("http://" + host + jobURL) {
return nil
}
body, resp, err := httpPutJSON(host, jobURL, mlJob)
if err != nil {
return errors.Wrap(err, "error doing PUT request when creating ML job")
}
if resp.StatusCode != 200 {
return fmt.Errorf("HTTP error loading ml job %d: %s, %s", resp.StatusCode, resp.Status, string(body))
}
return nil
}
func createCCRStats(host string) error {
err := setupCCRRemote(host)
if err != nil {
return errors.Wrap(err, "error setup CCR remote settings")
}
err = createCCRLeaderIndex(host)
if err != nil {
return errors.Wrap(err, "error creating CCR leader index")
}
err = createCCRFollowerIndex(host)
if err != nil {
return errors.Wrap(err, "error creating CCR follower index")
}
// Give ES sufficient time to do the replication and produce stats
checkCCRStats := func() (bool, error) {
return checkCCRStatsExists(host)
}
exists, err := waitForSuccess(checkCCRStats, 300, 5)
if err != nil {
return errors.Wrap(err, "error checking if CCR stats exist")
}
if !exists {
return fmt.Errorf("expected to find CCR stats but not found")
}
return nil
}
func checkCCRStatsExists(host string) (bool, error) {
resp, err := http.Get("http://" + host + "/_ccr/stats")
if err != nil {
return false, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return false, err
}
var data struct {
FollowStats struct {
Indices []map[string]interface{} `json:"indices"`
} `json:"follow_stats"`
}
err = json.Unmarshal(body, &data)
if err != nil {
return false, err
}
return len(data.FollowStats.Indices) > 0, nil
}
func setupCCRRemote(host string) error {
remoteSettings, err := ioutil.ReadFile("ccr/_meta/test/test_remote_settings.json")
if err != nil {
return err
}
settingsURL := "/_cluster/settings"
_, _, err = httpPutJSON(host, settingsURL, remoteSettings)
return err
}
func createCCRLeaderIndex(host string) error {
leaderIndex, err := ioutil.ReadFile("ccr/_meta/test/test_leader_index.json")
if err != nil {
return err
}
indexURL := "/pied_piper"
_, _, err = httpPutJSON(host, indexURL, leaderIndex)
return err
}
func createCCRFollowerIndex(host string) error {
followerIndex, err := ioutil.ReadFile("ccr/_meta/test/test_follower_index.json")
if err != nil {
return err
}
followURL := "/rats/_ccr/follow"
_, _, err = httpPutJSON(host, followURL, followerIndex)
return err
}
func checkExists(url string) bool {
resp, err := http.Get(url)
if err != nil {
return false
}
resp.Body.Close()
// Entry exists
if resp.StatusCode == 200 {
return true
}
return false
}
func checkSkip(t *testing.T, metricset string, version *common.Version) {
if metricset != "ccr" {
return
}
isCCRStatsAPIAvailable := elastic.IsFeatureAvailable(version, elasticsearch.CCRStatsAPIAvailableVersion)
if !isCCRStatsAPIAvailable {
t.Skip("elasticsearch CCR stats API is not available until " + elasticsearch.CCRStatsAPIAvailableVersion.String())
}
}
func getElasticsearchVersion(elasticsearchHostPort string) (*common.Version, error) {
resp, err := http.Get("http://" + elasticsearchHostPort + "/")
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var data common.MapStr
err = json.Unmarshal(body, &data)
if err != nil {
return nil, err
}
version, err := data.GetValue("version.number")
if err != nil {
return nil, err
}
return common.NewVersion(version.(string))
}
func httpPutJSON(host, path string, body []byte) ([]byte, *http.Response, error) {
req, err := http.NewRequest("PUT", "http://"+host+path, bytes.NewReader(body))
if err != nil {
return nil, nil, err
}
req.Header.Add("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
return nil, nil, err
}
return body, resp, nil
}
type checkSuccessFunction func() (bool, error)
func waitForSuccess(f checkSuccessFunction, retryIntervalMs time.Duration, numAttempts int) (bool, error) {
for numAttempts > 0 {
success, err := f()
if err != nil {
return false, err
}
if success {
return success, nil
}
time.Sleep(retryIntervalMs * time.Millisecond)
numAttempts--
}
return false, nil
}
| [
"\"ES_HOST\"",
"\"ES_PORT\""
]
| []
| [
"ES_PORT",
"ES_HOST"
]
| [] | ["ES_PORT", "ES_HOST"] | go | 2 | 0 | |
gunicorn/config.py | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
# Please remember to run "make -C docs html" after update "desc" attributes.
import argparse
import copy
import grp
import inspect
import os
import pwd
import re
import shlex
import ssl
import sys
import textwrap
from gunicorn import __version__, util
from gunicorn.errors import ConfigError
from gunicorn.reloader import reloader_engines
KNOWN_SETTINGS = []
PLATFORM = sys.platform
def make_settings(ignore=None):
settings = {}
ignore = ignore or ()
for s in KNOWN_SETTINGS:
setting = s()
if setting.name in ignore:
continue
settings[setting.name] = setting.copy()
return settings
def auto_int(_, x):
# for compatible with octal numbers in python3
if re.match(r'0(\d)', x, re.IGNORECASE):
x = x.replace('0', '0o', 1)
return int(x, 0)
class Config(object):
def __init__(self, usage=None, prog=None):
self.settings = make_settings()
self.usage = usage
self.prog = prog or os.path.basename(sys.argv[0])
self.env_orig = os.environ.copy()
def __str__(self):
lines = []
kmax = max(len(k) for k in self.settings)
for k in sorted(self.settings):
v = self.settings[k].value
if callable(v):
v = "<{}()>".format(v.__qualname__)
lines.append("{k:{kmax}} = {v}".format(k=k, v=v, kmax=kmax))
return "\n".join(lines)
def __getattr__(self, name):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
return self.settings[name].get()
def __setattr__(self, name, value):
if name != "settings" and name in self.settings:
raise AttributeError("Invalid access!")
super().__setattr__(name, value)
def set(self, name, value):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
self.settings[name].set(value)
def get_cmd_args_from_env(self):
if 'GUNICORN_CMD_ARGS' in self.env_orig:
return shlex.split(self.env_orig['GUNICORN_CMD_ARGS'])
return []
def parser(self):
kwargs = {
"usage": self.usage,
"prog": self.prog
}
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument("-v", "--version",
action="version", default=argparse.SUPPRESS,
version="%(prog)s (version " + __version__ + ")\n",
help="show program's version number and exit")
parser.add_argument("args", nargs="*", help=argparse.SUPPRESS)
keys = sorted(self.settings, key=self.settings.__getitem__)
for k in keys:
self.settings[k].add_option(parser)
return parser
@property
def worker_class_str(self):
uri = self.settings['worker_class'].get()
# are we using a threaded worker?
is_sync = uri.endswith('SyncWorker') or uri == 'sync'
if is_sync and self.threads > 1:
return "gthread"
return uri
@property
def worker_class(self):
uri = self.settings['worker_class'].get()
# are we using a threaded worker?
is_sync = uri.endswith('SyncWorker') or uri == 'sync'
if is_sync and self.threads > 1:
uri = "gunicorn.workers.gthread.ThreadWorker"
worker_class = util.load_class(uri)
if hasattr(worker_class, "setup"):
worker_class.setup()
return worker_class
@property
def address(self):
s = self.settings['bind'].get()
return [util.parse_address(util.bytes_to_str(bind)) for bind in s]
@property
def uid(self):
return self.settings['user'].get()
@property
def gid(self):
return self.settings['group'].get()
@property
def proc_name(self):
pn = self.settings['proc_name'].get()
if pn is not None:
return pn
else:
return self.settings['default_proc_name'].get()
@property
def logger_class(self):
uri = self.settings['logger_class'].get()
if uri == "simple":
# support the default
uri = LoggerClass.default
# if default logger is in use, and statsd is on, automagically switch
# to the statsd logger
if uri == LoggerClass.default:
if 'statsd_host' in self.settings and self.settings['statsd_host'].value is not None:
uri = "gunicorn.instrument.statsd.Statsd"
logger_class = util.load_class(
uri,
default="gunicorn.glogging.Logger",
section="gunicorn.loggers")
if hasattr(logger_class, "install"):
logger_class.install()
return logger_class
@property
def is_ssl(self):
return self.certfile or self.keyfile
@property
def ssl_options(self):
opts = {}
for name, value in self.settings.items():
if value.section == 'SSL':
opts[name] = value.get()
return opts
@property
def env(self):
raw_env = self.settings['raw_env'].get()
env = {}
if not raw_env:
return env
for e in raw_env:
s = util.bytes_to_str(e)
try:
k, v = s.split('=', 1)
except ValueError:
raise RuntimeError("environment setting %r invalid" % s)
env[k] = v
return env
@property
def sendfile(self):
if self.settings['sendfile'].get() is not None:
return False
if 'SENDFILE' in os.environ:
sendfile = os.environ['SENDFILE'].lower()
return sendfile in ['y', '1', 'yes', 'true']
return True
@property
def reuse_port(self):
return self.settings['reuse_port'].get()
@property
def paste_global_conf(self):
raw_global_conf = self.settings['raw_paste_global_conf'].get()
if raw_global_conf is None:
return None
global_conf = {}
for e in raw_global_conf:
s = util.bytes_to_str(e)
try:
k, v = re.split(r'(?<!\\)=', s, 1)
except ValueError:
raise RuntimeError("environment setting %r invalid" % s)
k = k.replace('\\=', '=')
v = v.replace('\\=', '=')
global_conf[k] = v
return global_conf
class SettingMeta(type):
def __new__(cls, name, bases, attrs):
super_new = super().__new__
parents = [b for b in bases if isinstance(b, SettingMeta)]
if not parents:
return super_new(cls, name, bases, attrs)
attrs["order"] = len(KNOWN_SETTINGS)
attrs["validator"] = staticmethod(attrs["validator"])
new_class = super_new(cls, name, bases, attrs)
new_class.fmt_desc(attrs.get("desc", ""))
KNOWN_SETTINGS.append(new_class)
return new_class
def fmt_desc(cls, desc):
desc = textwrap.dedent(desc).strip()
setattr(cls, "desc", desc)
setattr(cls, "short", desc.splitlines()[0])
class Setting(object):
name = None
value = None
section = None
cli = None
validator = None
type = None
meta = None
action = None
default = None
short = None
desc = None
nargs = None
const = None
def __init__(self):
if self.default is not None:
self.set(self.default)
def add_option(self, parser):
if not self.cli:
return
args = tuple(self.cli)
help_txt = "%s [%s]" % (self.short, self.default)
help_txt = help_txt.replace("%", "%%")
kwargs = {
"dest": self.name,
"action": self.action or "store",
"type": self.type or str,
"default": None,
"help": help_txt
}
if self.meta is not None:
kwargs['metavar'] = self.meta
if kwargs["action"] != "store":
kwargs.pop("type")
if self.nargs is not None:
kwargs["nargs"] = self.nargs
if self.const is not None:
kwargs["const"] = self.const
parser.add_argument(*args, **kwargs)
def copy(self):
return copy.copy(self)
def get(self):
return self.value
def set(self, val):
if not callable(self.validator):
raise TypeError('Invalid validator: %s' % self.name)
self.value = self.validator(val)
def __lt__(self, other):
return (self.section == other.section and
self.order < other.order)
__cmp__ = __lt__
def __repr__(self):
return "<%s.%s object at %x with value %r>" % (
self.__class__.__module__,
self.__class__.__name__,
id(self),
self.value,
)
Setting = SettingMeta('Setting', (Setting,), {})
def validate_bool(val):
if val is None:
return
if isinstance(val, bool):
return val
if not isinstance(val, str):
raise TypeError("Invalid type for casting: %s" % val)
if val.lower().strip() == "true":
return True
elif val.lower().strip() == "false":
return False
else:
raise ValueError("Invalid boolean: %s" % val)
def validate_dict(val):
if not isinstance(val, dict):
raise TypeError("Value is not a dictionary: %s " % val)
return val
def validate_pos_int(val):
if not isinstance(val, int):
val = int(val, 0)
else:
# Booleans are ints!
val = int(val)
if val < 0:
raise ValueError("Value must be positive: %s" % val)
return val
def validate_ssl_version(val):
ssl_versions = {}
for protocol in [p for p in dir(ssl) if p.startswith("PROTOCOL_")]:
ssl_versions[protocol[9:]] = getattr(ssl, protocol)
if val in ssl_versions:
# string matching PROTOCOL_...
return ssl_versions[val]
try:
intval = validate_pos_int(val)
if intval in ssl_versions.values():
# positive int matching a protocol int constant
return intval
except (ValueError, TypeError):
# negative integer or not an integer
# drop this in favour of the more descriptive ValueError below
pass
raise ValueError("Invalid ssl_version: %s. Valid options: %s"
% (val, ', '.join(ssl_versions)))
def validate_string(val):
if val is None:
return None
if not isinstance(val, str):
raise TypeError("Not a string: %s" % val)
return val.strip()
def validate_file_exists(val):
if val is None:
return None
if not os.path.exists(val):
raise ValueError("File %s does not exists." % val)
return val
def validate_list_string(val):
if not val:
return []
# legacy syntax
if isinstance(val, str):
val = [val]
return [validate_string(v) for v in val]
def validate_list_of_existing_files(val):
return [validate_file_exists(v) for v in validate_list_string(val)]
def validate_string_to_list(val):
val = validate_string(val)
if not val:
return []
return [v.strip() for v in val.split(",") if v]
def validate_class(val):
if inspect.isfunction(val) or inspect.ismethod(val):
val = val()
if inspect.isclass(val):
return val
return validate_string(val)
def validate_callable(arity):
def _validate_callable(val):
if isinstance(val, str):
try:
mod_name, obj_name = val.rsplit(".", 1)
except ValueError:
raise TypeError("Value '%s' is not import string. "
"Format: module[.submodules...].object" % val)
try:
mod = __import__(mod_name, fromlist=[obj_name])
val = getattr(mod, obj_name)
except ImportError as e:
raise TypeError(str(e))
except AttributeError:
raise TypeError("Can not load '%s' from '%s'"
"" % (obj_name, mod_name))
if not callable(val):
raise TypeError("Value is not callable: %s" % val)
if arity != -1 and arity != util.get_arity(val):
raise TypeError("Value must have an arity of: %s" % arity)
return val
return _validate_callable
def validate_user(val):
if val is None:
return os.geteuid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return pwd.getpwnam(val).pw_uid
except KeyError:
raise ConfigError("No such user: '%s'" % val)
def validate_group(val):
if val is None:
return os.getegid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return grp.getgrnam(val).gr_gid
except KeyError:
raise ConfigError("No such group: '%s'" % val)
def validate_post_request(val):
val = validate_callable(-1)(val)
largs = util.get_arity(val)
if largs == 4:
return val
elif largs == 3:
return lambda worker, req, env, _r: val(worker, req, env)
elif largs == 2:
return lambda worker, req, _e, _r: val(worker, req)
else:
raise TypeError("Value must have an arity of: 4")
def validate_chdir(val):
# valid if the value is a string
val = validate_string(val)
# transform relative paths
path = os.path.abspath(os.path.normpath(os.path.join(util.getcwd(), val)))
# test if the path exists
if not os.path.exists(path):
raise ConfigError("can't chdir to %r" % val)
return path
def validate_hostport(val):
val = validate_string(val)
if val is None:
return None
elements = val.split(":")
if len(elements) == 2:
return (elements[0], int(elements[1]))
else:
raise TypeError("Value must consist of: hostname:port")
def validate_reload_engine(val):
if val not in reloader_engines:
raise ConfigError("Invalid reload_engine: %r" % val)
return val
def get_default_config_file():
config_path = os.path.join(os.path.abspath(os.getcwd()),
'gunicorn.conf.py')
if os.path.exists(config_path):
return config_path
return None
class ConfigFile(Setting):
name = "config"
section = "Config File"
cli = ["-c", "--config"]
meta = "CONFIG"
validator = validate_string
default = "./gunicorn.conf.py"
desc = """\
The Gunicorn config file.
A string of the form ``PATH``, ``file:PATH``, or ``python:MODULE_NAME``.
Only has an effect when specified on the command line or as part of an
application specific configuration.
By default, a file named ``gunicorn.conf.py`` will be read from the same
directory where gunicorn is being run.
.. versionchanged:: 19.4
Loading the config from a Python module requires the ``python:``
prefix.
"""
class WSGIApp(Setting):
name = "wsgi_app"
section = "Config File"
meta = "STRING"
validator = validate_string
default = None
desc = """\
A WSGI application path in pattern ``$(MODULE_NAME):$(VARIABLE_NAME)``.
.. versionadded:: 20.1.0
"""
class Bind(Setting):
name = "bind"
action = "append"
section = "Server Socket"
cli = ["-b", "--bind"]
meta = "ADDRESS"
validator = validate_list_string
if 'PORT' in os.environ:
default = ['0.0.0.0:{0}'.format(os.environ.get('PORT'))]
else:
default = ['127.0.0.1:8000']
desc = """\
The socket to bind.
A string of the form: ``HOST``, ``HOST:PORT``, ``unix:PATH``,
``fd://FD``. An IP is a valid ``HOST``.
.. versionchanged:: 20.0
Support for ``fd://FD`` got added.
Multiple addresses can be bound. ex.::
$ gunicorn -b 127.0.0.1:8000 -b [::1]:8000 test:app
will bind the `test:app` application on localhost both on ipv6
and ipv4 interfaces.
If the ``PORT`` environment variable is defined, the default
is ``['0.0.0.0:$PORT']``. If it is not defined, the default
is ``['127.0.0.1:8000']``.
"""
class Backlog(Setting):
name = "backlog"
section = "Server Socket"
cli = ["--backlog"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2048
desc = """\
The maximum number of pending connections.
This refers to the number of clients that can be waiting to be served.
Exceeding this number results in the client getting an error when
attempting to connect. It should only affect servers under significant
load.
Must be a positive integer. Generally set in the 64-2048 range.
"""
class Workers(Setting):
name = "workers"
section = "Worker Processes"
cli = ["-w", "--workers"]
meta = "INT"
validator = validate_pos_int
type = int
default = int(os.environ.get("WEB_CONCURRENCY", 1))
desc = """\
The number of worker processes for handling requests.
A positive integer generally in the ``2-4 x $(NUM_CORES)`` range.
You'll want to vary this a bit to find the best for your particular
application's work load.
By default, the value of the ``WEB_CONCURRENCY`` environment variable,
which is set by some Platform-as-a-Service providers such as Heroku. If
it is not defined, the default is ``1``.
"""
class WorkerClass(Setting):
name = "worker_class"
section = "Worker Processes"
cli = ["-k", "--worker-class"]
meta = "STRING"
validator = validate_class
default = "sync"
desc = """\
The type of workers to use.
The default class (``sync``) should handle most "normal" types of
workloads. You'll want to read :doc:`design` for information on when
you might want to choose one of the other worker classes. Required
libraries may be installed using setuptools' ``extras_require`` feature.
A string referring to one of the following bundled classes:
* ``sync``
* ``eventlet`` - Requires eventlet >= 0.24.1 (or install it via
``pip install gunicorn[eventlet]``)
* ``gevent`` - Requires gevent >= 1.4 (or install it via
``pip install gunicorn[gevent]``)
* ``tornado`` - Requires tornado >= 0.2 (or install it via
``pip install gunicorn[tornado]``)
* ``gthread`` - Python 2 requires the futures package to be installed
(or install it via ``pip install gunicorn[gthread]``)
Optionally, you can provide your own worker by giving Gunicorn a
Python path to a subclass of ``gunicorn.workers.base.Worker``.
This alternative syntax will load the gevent class:
``gunicorn.workers.ggevent.GeventWorker``.
"""
class WorkerThreads(Setting):
name = "threads"
section = "Worker Processes"
cli = ["--threads"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1
desc = """\
The number of worker threads for handling requests.
Run each worker with the specified number of threads.
A positive integer generally in the ``2-4 x $(NUM_CORES)`` range.
You'll want to vary this a bit to find the best for your particular
application's work load.
If it is not defined, the default is ``1``.
This setting only affects the Gthread worker type.
.. note::
If you try to use the ``sync`` worker type and set the ``threads``
setting to more than 1, the ``gthread`` worker type will be used
instead.
"""
class WorkerConnections(Setting):
name = "worker_connections"
section = "Worker Processes"
cli = ["--worker-connections"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1000
desc = """\
The maximum number of simultaneous clients.
This setting only affects the Eventlet and Gevent worker types.
"""
class MaxRequests(Setting):
name = "max_requests"
section = "Worker Processes"
cli = ["--max-requests"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum number of requests a worker will process before restarting.
Any value greater than zero will limit the number of requests a worker
will process before automatically restarting. This is a simple method
to help limit the damage of memory leaks.
If this is set to zero (the default) then the automatic worker
restarts are disabled.
"""
class MaxRequestsJitter(Setting):
name = "max_requests_jitter"
section = "Worker Processes"
cli = ["--max-requests-jitter"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum jitter to add to the *max_requests* setting.
The jitter causes the restart per worker to be randomized by
``randint(0, max_requests_jitter)``. This is intended to stagger worker
restarts to avoid all workers restarting at the same time.
.. versionadded:: 19.2
"""
class WaitForNewWorkers(Setting):
name = "wait_for_new_workers"
section = "Worker Processes"
cli = ["--wait-for-new-workers"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Wait for a new worker to become ready before killing an old worker.
"""
class MaxRestartingWorkers(Setting):
name = "max_restarting_workers"
section = "Worker Processes"
cli = ["--max-restarting-workers"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum number of workers which can be restarted at the same time.
"""
class WarmupRequests(Setting):
name = "warmup_requests"
section = "Worker Processes"
cli = ["--warmup-requests"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The number of requests a new worker needs to handle until the old worker can be killed.
"""
class Timeout(Setting):
name = "timeout"
section = "Worker Processes"
cli = ["-t", "--timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Workers silent for more than this many seconds are killed and restarted.
Value is a positive number or 0. Setting it to 0 has the effect of
infinite timeouts by disabling timeouts for all workers entirely.
Generally, the default of thirty seconds should suffice. Only set this
noticeably higher if you're sure of the repercussions for sync workers.
For the non sync workers it just means that the worker process is still
communicating and is not tied to the length of time required to handle a
single request.
"""
class GracefulTimeout(Setting):
name = "graceful_timeout"
section = "Worker Processes"
cli = ["--graceful-timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Timeout for graceful workers restart.
After receiving a restart signal, workers have this much time to finish
serving requests. Workers still alive after the timeout (starting from
the receipt of the restart signal) are force killed.
"""
class Keepalive(Setting):
name = "keepalive"
section = "Worker Processes"
cli = ["--keep-alive"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2
desc = """\
The number of seconds to wait for requests on a Keep-Alive connection.
Generally set in the 1-5 seconds range for servers with direct connection
to the client (e.g. when you don't have separate load balancer). When
Gunicorn is deployed behind a load balancer, it often makes sense to
set this to a higher value.
.. note::
``sync`` worker does not support persistent connections and will
ignore this option.
"""
class LimitRequestLine(Setting):
name = "limit_request_line"
section = "Security"
cli = ["--limit-request-line"]
meta = "INT"
validator = validate_pos_int
type = int
default = 4094
desc = """\
The maximum size of HTTP request line in bytes.
This parameter is used to limit the allowed size of a client's
HTTP request-line. Since the request-line consists of the HTTP
method, URI, and protocol version, this directive places a
restriction on the length of a request-URI allowed for a request
on the server. A server needs this value to be large enough to
hold any of its resource names, including any information that
might be passed in the query part of a GET request. Value is a number
from 0 (unlimited) to 8190.
This parameter can be used to prevent any DDOS attack.
"""
class LimitRequestFields(Setting):
name = "limit_request_fields"
section = "Security"
cli = ["--limit-request-fields"]
meta = "INT"
validator = validate_pos_int
type = int
default = 100
desc = """\
Limit the number of HTTP headers fields in a request.
This parameter is used to limit the number of headers in a request to
prevent DDOS attack. Used with the *limit_request_field_size* it allows
more safety. By default this value is 100 and can't be larger than
32768.
"""
class LimitRequestFieldSize(Setting):
name = "limit_request_field_size"
section = "Security"
cli = ["--limit-request-field_size"]
meta = "INT"
validator = validate_pos_int
type = int
default = 8190
desc = """\
Limit the allowed size of an HTTP request header field.
Value is a positive number or 0. Setting it to 0 will allow unlimited
header field sizes.
.. warning::
Setting this parameter to a very high or unlimited value can open
up for DDOS attacks.
"""
class EnrichResponse(Setting):
name = "enrich_response"
section = 'Debugging'
cli = ['--enrich-response']
validator = validate_bool
action = 'store_true'
default = False
desc = '''\
Add extra information in the http response body. Works only for sync worker type.
While handling a request, a few timestamps are taken (in microseconds, since 1st of January, 1970):
* ``spawning time`` - when worker object is initialized (this is before forking the new process)
* ``time 1`` - immediately after entering "handle_request"
* ``time 2`` - just before getting the response
* ``time 3`` - immediately after getting the response
The following information is inserted into the response body:
* ``spawn``: spawning time
* ``t1``: time1
* ``d1``: time2 - time1
* ``d2``: time3 - time2
* ``pid``: the pid of the worker handling the request
* ``nr``: number of requests handled by this worker so far
* ``max``: number of requests planned for this worker (this can be exceeded a little bit because of the rolling restarting strategy)
The new response is a json with two keys:
"res" contains the original response
"info" contains the extra information
'''
class Reload(Setting):
name = "reload"
section = 'Debugging'
cli = ['--reload']
validator = validate_bool
action = 'store_true'
default = False
desc = '''\
Restart workers when code changes.
This setting is intended for development. It will cause workers to be
restarted whenever application code changes.
The reloader is incompatible with application preloading. When using a
paste configuration be sure that the server block does not import any
application code or the reload will not work as designed.
The default behavior is to attempt inotify with a fallback to file
system polling. Generally, inotify should be preferred if available
because it consumes less system resources.
.. note::
In order to use the inotify reloader, you must have the ``inotify``
package installed.
'''
class ReloadEngine(Setting):
name = "reload_engine"
section = "Debugging"
cli = ["--reload-engine"]
meta = "STRING"
validator = validate_reload_engine
default = "auto"
desc = """\
The implementation that should be used to power :ref:`reload`.
Valid engines are:
* ``'auto'``
* ``'poll'``
* ``'inotify'`` (requires inotify)
.. versionadded:: 19.7
"""
class ReloadExtraFiles(Setting):
name = "reload_extra_files"
action = "append"
section = "Debugging"
cli = ["--reload-extra-file"]
meta = "FILES"
validator = validate_list_of_existing_files
default = []
desc = """\
Extends :ref:`reload` option to also watch and reload on additional files
(e.g., templates, configurations, specifications, etc.).
.. versionadded:: 19.8
"""
class Spew(Setting):
name = "spew"
section = "Debugging"
cli = ["--spew"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Install a trace function that spews every line executed by the server.
This is the nuclear option.
"""
class ConfigCheck(Setting):
name = "check_config"
section = "Debugging"
cli = ["--check-config"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Check the configuration and exit. The exit status is 0 if the
configuration is correct, and 1 if the configuration is incorrect.
"""
class PrintConfig(Setting):
name = "print_config"
section = "Debugging"
cli = ["--print-config"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Print the configuration settings as fully resolved. Implies :ref:`check-config`.
"""
class PreloadApp(Setting):
name = "preload_app"
section = "Server Mechanics"
cli = ["--preload"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Load application code before the worker processes are forked.
By preloading an application you can save some RAM resources as well as
speed up server boot times. Although, if you defer application loading
to each worker process, you can reload your application code easily by
restarting workers.
"""
class Sendfile(Setting):
name = "sendfile"
section = "Server Mechanics"
cli = ["--no-sendfile"]
validator = validate_bool
action = "store_const"
const = False
desc = """\
Disables the use of ``sendfile()``.
If not set, the value of the ``SENDFILE`` environment variable is used
to enable or disable its usage.
.. versionadded:: 19.2
.. versionchanged:: 19.4
Swapped ``--sendfile`` with ``--no-sendfile`` to actually allow
disabling.
.. versionchanged:: 19.6
added support for the ``SENDFILE`` environment variable
"""
class ReusePort(Setting):
name = "reuse_port"
section = "Server Mechanics"
cli = ["--reuse-port"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Set the ``SO_REUSEPORT`` flag on the listening socket.
.. versionadded:: 19.8
"""
class Chdir(Setting):
name = "chdir"
section = "Server Mechanics"
cli = ["--chdir"]
validator = validate_chdir
default = util.getcwd()
desc = """\
Change directory to specified directory before loading apps.
"""
class Daemon(Setting):
name = "daemon"
section = "Server Mechanics"
cli = ["-D", "--daemon"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Daemonize the Gunicorn process.
Detaches the server from the controlling terminal and enters the
background.
"""
class Env(Setting):
name = "raw_env"
action = "append"
section = "Server Mechanics"
cli = ["-e", "--env"]
meta = "ENV"
validator = validate_list_string
default = []
desc = """\
Set environment variables in the execution environment.
Should be a list of strings in the ``key=value`` format.
For example on the command line:
.. code-block:: console
$ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app
Or in the configuration file:
.. code-block:: python
raw_env = ["FOO=1"]
"""
class Pidfile(Setting):
name = "pidfile"
section = "Server Mechanics"
cli = ["-p", "--pid"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
A filename to use for the PID file.
If not set, no PID file will be written.
"""
class WorkerTmpDir(Setting):
name = "worker_tmp_dir"
section = "Server Mechanics"
cli = ["--worker-tmp-dir"]
meta = "DIR"
validator = validate_string
default = None
desc = """\
A directory to use for the worker heartbeat temporary file.
If not set, the default temporary directory will be used.
.. note::
The current heartbeat system involves calling ``os.fchmod`` on
temporary file handlers and may block a worker for arbitrary time
if the directory is on a disk-backed filesystem.
See :ref:`blocking-os-fchmod` for more detailed information
and a solution for avoiding this problem.
"""
class User(Setting):
name = "user"
section = "Server Mechanics"
cli = ["-u", "--user"]
meta = "USER"
validator = validate_user
default = os.geteuid()
desc = """\
Switch worker processes to run as this user.
A valid user id (as an integer) or the name of a user that can be
retrieved with a call to ``pwd.getpwnam(value)`` or ``None`` to not
change the worker process user.
"""
class Group(Setting):
name = "group"
section = "Server Mechanics"
cli = ["-g", "--group"]
meta = "GROUP"
validator = validate_group
default = os.getegid()
desc = """\
Switch worker process to run as this group.
A valid group id (as an integer) or the name of a user that can be
retrieved with a call to ``pwd.getgrnam(value)`` or ``None`` to not
change the worker processes group.
"""
class Umask(Setting):
name = "umask"
section = "Server Mechanics"
cli = ["-m", "--umask"]
meta = "INT"
validator = validate_pos_int
type = auto_int
default = 0
desc = """\
A bit mask for the file mode on files written by Gunicorn.
Note that this affects unix socket permissions.
A valid value for the ``os.umask(mode)`` call or a string compatible
with ``int(value, 0)`` (``0`` means Python guesses the base, so values
like ``0``, ``0xFF``, ``0022`` are valid for decimal, hex, and octal
representations)
"""
class Initgroups(Setting):
name = "initgroups"
section = "Server Mechanics"
cli = ["--initgroups"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
If true, set the worker process's group access list with all of the
groups of which the specified username is a member, plus the specified
group id.
.. versionadded:: 19.7
"""
class TmpUploadDir(Setting):
name = "tmp_upload_dir"
section = "Server Mechanics"
meta = "DIR"
validator = validate_string
default = None
desc = """\
Directory to store temporary request data as they are read.
This may disappear in the near future.
This path should be writable by the process permissions set for Gunicorn
workers. If not specified, Gunicorn will choose a system generated
temporary directory.
"""
class SecureSchemeHeader(Setting):
name = "secure_scheme_headers"
section = "Server Mechanics"
validator = validate_dict
default = {
"X-FORWARDED-PROTOCOL": "ssl",
"X-FORWARDED-PROTO": "https",
"X-FORWARDED-SSL": "on"
}
desc = """\
A dictionary containing headers and values that the front-end proxy
uses to indicate HTTPS requests. If the source IP is permitted by
``forwarded-allow-ips`` (below), *and* at least one request header matches
a key-value pair listed in this dictionary, then Gunicorn will set
``wsgi.url_scheme`` to ``https``, so your application can tell that the
request is secure.
If the other headers listed in this dictionary are not present in the request, they will be ignored,
but if the other headers are present and do not match the provided values, then
the request will fail to parse. See the note below for more detailed examples of this behaviour.
The dictionary should map upper-case header names to exact string
values. The value comparisons are case-sensitive, unlike the header
names, so make sure they're exactly what your front-end proxy sends
when handling HTTPS requests.
It is important that your front-end proxy configuration ensures that
the headers defined here can not be passed directly from the client.
"""
class ForwardedAllowIPS(Setting):
name = "forwarded_allow_ips"
section = "Server Mechanics"
cli = ["--forwarded-allow-ips"]
meta = "STRING"
validator = validate_string_to_list
default = os.environ.get("FORWARDED_ALLOW_IPS", "127.0.0.1")
desc = """\
Front-end's IPs from which allowed to handle set secure headers.
(comma separate).
Set to ``*`` to disable checking of Front-end IPs (useful for setups
where you don't know in advance the IP address of Front-end, but
you still trust the environment).
By default, the value of the ``FORWARDED_ALLOW_IPS`` environment
variable. If it is not defined, the default is ``"127.0.0.1"``.
.. note::
The interplay between the request headers, the value of ``forwarded_allow_ips``, and the value of
``secure_scheme_headers`` is complex. Various scenarios are documented below to further elaborate. In each case, we
have a request from the remote address 134.213.44.18, and the default value of ``secure_scheme_headers``:
.. code::
secure_scheme_headers = {
'X-FORWARDED-PROTOCOL': 'ssl',
'X-FORWARDED-PROTO': 'https',
'X-FORWARDED-SSL': 'on'
}
.. list-table::
:header-rows: 1
:align: center
:widths: auto
* - ``forwarded-allow-ips``
- Secure Request Headers
- Result
- Explanation
* - .. code::
["127.0.0.1"]
- .. code::
X-Forwarded-Proto: https
- .. code::
wsgi.url_scheme = "http"
- IP address was not allowed
* - .. code::
"*"
- <none>
- .. code::
wsgi.url_scheme = "http"
- IP address allowed, but no secure headers provided
* - .. code::
"*"
- .. code::
X-Forwarded-Proto: https
- .. code::
wsgi.url_scheme = "https"
- IP address allowed, one request header matched
* - .. code::
["134.213.44.18"]
- .. code::
X-Forwarded-Ssl: on
X-Forwarded-Proto: http
- ``InvalidSchemeHeaders()`` raised
- IP address allowed, but the two secure headers disagreed on if HTTPS was used
"""
class AccessLog(Setting):
name = "accesslog"
section = "Logging"
cli = ["--access-logfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The Access log file to write to.
``'-'`` means log to stdout.
"""
class DisableRedirectAccessToSyslog(Setting):
name = "disable_redirect_access_to_syslog"
section = "Logging"
cli = ["--disable-redirect-access-to-syslog"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Disable redirect access logs to syslog.
.. versionadded:: 19.8
"""
class AccessLogFormat(Setting):
name = "access_log_format"
section = "Logging"
cli = ["--access-logformat"]
meta = "STRING"
validator = validate_string
default = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
desc = """\
The access log format.
=========== ===========
Identifier Description
=========== ===========
h remote address
l ``'-'``
u user name
t date of the request
r status line (e.g. ``GET / HTTP/1.1``)
m request method
U URL path without query string
q query string
H protocol
s status
B response length
b response length or ``'-'`` (CLF format)
f referer
a user agent
T request time in seconds
M request time in milliseconds
D request time in microseconds
L request time in decimal seconds
p process ID
{header}i request header
{header}o response header
{variable}e environment variable
=========== ===========
Use lowercase for header and environment variable names, and put
``{...}x`` names inside ``%(...)s``. For example::
%({x-forwarded-for}i)s
"""
class ErrorLog(Setting):
name = "errorlog"
section = "Logging"
cli = ["--error-logfile", "--log-file"]
meta = "FILE"
validator = validate_string
default = '-'
desc = """\
The Error log file to write to.
Using ``'-'`` for FILE makes gunicorn log to stderr.
.. versionchanged:: 19.2
Log to stderr by default.
"""
class Loglevel(Setting):
name = "loglevel"
section = "Logging"
cli = ["--log-level"]
meta = "LEVEL"
validator = validate_string
default = "info"
desc = """\
The granularity of Error log outputs.
Valid level names are:
* ``'debug'``
* ``'info'``
* ``'warning'``
* ``'error'``
* ``'critical'``
"""
class CaptureOutput(Setting):
name = "capture_output"
section = "Logging"
cli = ["--capture-output"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Redirect stdout/stderr to specified file in :ref:`errorlog`.
.. versionadded:: 19.6
"""
class LoggerClass(Setting):
name = "logger_class"
section = "Logging"
cli = ["--logger-class"]
meta = "STRING"
validator = validate_class
default = "gunicorn.glogging.Logger"
desc = """\
The logger you want to use to log events in Gunicorn.
The default class (``gunicorn.glogging.Logger``) handles most
normal usages in logging. It provides error and access logging.
You can provide your own logger by giving Gunicorn a Python path to a
class that quacks like ``gunicorn.glogging.Logger``.
"""
class LogConfig(Setting):
name = "logconfig"
section = "Logging"
cli = ["--log-config"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The log config file to use.
Gunicorn uses the standard Python logging module's Configuration
file format.
"""
class LogConfigDict(Setting):
name = "logconfig_dict"
section = "Logging"
validator = validate_dict
default = {}
desc = """\
The log config dictionary to use, using the standard Python
logging module's dictionary configuration format. This option
takes precedence over the :ref:`logconfig` option, which uses the
older file configuration format.
Format: https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig
.. versionadded:: 19.8
"""
class SyslogTo(Setting):
name = "syslog_addr"
section = "Logging"
cli = ["--log-syslog-to"]
meta = "SYSLOG_ADDR"
validator = validate_string
if PLATFORM == "darwin":
default = "unix:///var/run/syslog"
elif PLATFORM in ('freebsd', 'dragonfly', ):
default = "unix:///var/run/log"
elif PLATFORM == "openbsd":
default = "unix:///dev/log"
else:
default = "udp://localhost:514"
desc = """\
Address to send syslog messages.
Address is a string of the form:
* ``unix://PATH#TYPE`` : for unix domain socket. ``TYPE`` can be ``stream``
for the stream driver or ``dgram`` for the dgram driver.
``stream`` is the default.
* ``udp://HOST:PORT`` : for UDP sockets
* ``tcp://HOST:PORT`` : for TCP sockets
"""
class Syslog(Setting):
name = "syslog"
section = "Logging"
cli = ["--log-syslog"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Send *Gunicorn* logs to syslog.
.. versionchanged:: 19.8
You can now disable sending access logs by using the
:ref:`disable-redirect-access-to-syslog` setting.
"""
class SyslogPrefix(Setting):
name = "syslog_prefix"
section = "Logging"
cli = ["--log-syslog-prefix"]
meta = "SYSLOG_PREFIX"
validator = validate_string
default = None
desc = """\
Makes Gunicorn use the parameter as program-name in the syslog entries.
All entries will be prefixed by ``gunicorn.<prefix>``. By default the
program name is the name of the process.
"""
class SyslogFacility(Setting):
name = "syslog_facility"
section = "Logging"
cli = ["--log-syslog-facility"]
meta = "SYSLOG_FACILITY"
validator = validate_string
default = "user"
desc = """\
Syslog facility name
"""
class EnableStdioInheritance(Setting):
name = "enable_stdio_inheritance"
section = "Logging"
cli = ["-R", "--enable-stdio-inheritance"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable stdio inheritance.
Enable inheritance for stdio file descriptors in daemon mode.
Note: To disable the Python stdout buffering, you can to set the user
environment variable ``PYTHONUNBUFFERED`` .
"""
# statsD monitoring
class StatsdHost(Setting):
name = "statsd_host"
section = "Logging"
cli = ["--statsd-host"]
meta = "STATSD_ADDR"
default = None
validator = validate_hostport
desc = """\
``host:port`` of the statsd server to log to.
.. versionadded:: 19.1
"""
# Datadog Statsd (dogstatsd) tags. https://docs.datadoghq.com/developers/dogstatsd/
class DogstatsdTags(Setting):
name = "dogstatsd_tags"
section = "Logging"
cli = ["--dogstatsd-tags"]
meta = "DOGSTATSD_TAGS"
default = ""
validator = validate_string
desc = """\
A comma-delimited list of datadog statsd (dogstatsd) tags to append to
statsd metrics.
.. versionadded:: 20
"""
class StatsdPrefix(Setting):
name = "statsd_prefix"
section = "Logging"
cli = ["--statsd-prefix"]
meta = "STATSD_PREFIX"
default = ""
validator = validate_string
desc = """\
Prefix to use when emitting statsd metrics (a trailing ``.`` is added,
if not provided).
.. versionadded:: 19.2
"""
class Procname(Setting):
name = "proc_name"
section = "Process Naming"
cli = ["-n", "--name"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A base to use with setproctitle for process naming.
This affects things like ``ps`` and ``top``. If you're going to be
running more than one instance of Gunicorn you'll probably want to set a
name to tell them apart. This requires that you install the setproctitle
module.
If not set, the *default_proc_name* setting will be used.
"""
class DefaultProcName(Setting):
name = "default_proc_name"
section = "Process Naming"
validator = validate_string
default = "gunicorn"
desc = """\
Internal setting that is adjusted for each type of application.
"""
class PythonPath(Setting):
name = "pythonpath"
section = "Server Mechanics"
cli = ["--pythonpath"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A comma-separated list of directories to add to the Python path.
e.g.
``'/home/djangoprojects/myproject,/home/python/mylibrary'``.
"""
class Paste(Setting):
name = "paste"
section = "Server Mechanics"
cli = ["--paste", "--paster"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
Load a PasteDeploy config file. The argument may contain a ``#``
symbol followed by the name of an app section from the config file,
e.g. ``production.ini#admin``.
At this time, using alternate server blocks is not supported. Use the
command line arguments to control server configuration instead.
"""
class OnStarting(Setting):
name = "on_starting"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def on_starting(server):
pass
default = staticmethod(on_starting)
desc = """\
Called just before the master process is initialized.
The callable needs to accept a single instance variable for the Arbiter.
"""
class OnReload(Setting):
name = "on_reload"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def on_reload(server):
pass
default = staticmethod(on_reload)
desc = """\
Called to recycle workers during a reload via SIGHUP.
The callable needs to accept a single instance variable for the Arbiter.
"""
class WhenReady(Setting):
name = "when_ready"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def when_ready(server):
pass
default = staticmethod(when_ready)
desc = """\
Called just after the server is started.
The callable needs to accept a single instance variable for the Arbiter.
"""
class Prefork(Setting):
name = "pre_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def pre_fork(server, worker):
pass
default = staticmethod(pre_fork)
desc = """\
Called just before a worker is forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class Postfork(Setting):
name = "post_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def post_fork(server, worker):
pass
default = staticmethod(post_fork)
desc = """\
Called just after a worker has been forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class PostWorkerInit(Setting):
name = "post_worker_init"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def post_worker_init(worker):
pass
default = staticmethod(post_worker_init)
desc = """\
Called just after a worker has initialized the application.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class WorkerInt(Setting):
name = "worker_int"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def worker_int(worker):
pass
default = staticmethod(worker_int)
desc = """\
Called just after a worker exited on SIGINT or SIGQUIT.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class WorkerAbort(Setting):
name = "worker_abort"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def worker_abort(worker):
pass
default = staticmethod(worker_abort)
desc = """\
Called when a worker received the SIGABRT signal.
This call generally happens on timeout.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class PreExec(Setting):
name = "pre_exec"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def pre_exec(server):
pass
default = staticmethod(pre_exec)
desc = """\
Called just before a new master process is forked.
The callable needs to accept a single instance variable for the Arbiter.
"""
class PreRequest(Setting):
name = "pre_request"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def pre_request(worker, req):
worker.log.debug("%s %s" % (req.method, req.path))
default = staticmethod(pre_request)
desc = """\
Called just before a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class PostRequest(Setting):
name = "post_request"
section = "Server Hooks"
validator = validate_post_request
type = callable
def post_request(worker, req, environ, resp):
pass
default = staticmethod(post_request)
desc = """\
Called after a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class ChildExit(Setting):
name = "child_exit"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def child_exit(server, worker):
pass
default = staticmethod(child_exit)
desc = """\
Called just after a worker has been exited, in the master process.
The callable needs to accept two instance variables for the Arbiter and
the just-exited Worker.
.. versionadded:: 19.7
"""
class WorkerExit(Setting):
name = "worker_exit"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def worker_exit(server, worker):
pass
default = staticmethod(worker_exit)
desc = """\
Called just after a worker has been exited, in the worker process.
The callable needs to accept two instance variables for the Arbiter and
the just-exited Worker.
"""
class NumWorkersChanged(Setting):
name = "nworkers_changed"
section = "Server Hooks"
validator = validate_callable(3)
type = callable
def nworkers_changed(server, new_value, old_value):
pass
default = staticmethod(nworkers_changed)
desc = """\
Called just after *num_workers* has been changed.
The callable needs to accept an instance variable of the Arbiter and
two integers of number of workers after and before change.
If the number of workers is set for the first time, *old_value* would
be ``None``.
"""
class OnExit(Setting):
name = "on_exit"
section = "Server Hooks"
validator = validate_callable(1)
def on_exit(server):
pass
default = staticmethod(on_exit)
desc = """\
Called just before exiting Gunicorn.
The callable needs to accept a single instance variable for the Arbiter.
"""
class ProxyProtocol(Setting):
name = "proxy_protocol"
section = "Server Mechanics"
cli = ["--proxy-protocol"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable detect PROXY protocol (PROXY mode).
Allow using HTTP and Proxy together. It may be useful for work with
stunnel as HTTPS frontend and Gunicorn as HTTP server.
PROXY protocol: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt
Example for stunnel config::
[https]
protocol = proxy
accept = 443
connect = 80
cert = /etc/ssl/certs/stunnel.pem
key = /etc/ssl/certs/stunnel.key
"""
class ProxyAllowFrom(Setting):
name = "proxy_allow_ips"
section = "Server Mechanics"
cli = ["--proxy-allow-from"]
validator = validate_string_to_list
default = "127.0.0.1"
desc = """\
Front-end's IPs from which allowed accept proxy requests (comma separate).
Set to ``*`` to disable checking of Front-end IPs (useful for setups
where you don't know in advance the IP address of Front-end, but
you still trust the environment)
"""
class KeyFile(Setting):
name = "keyfile"
section = "SSL"
cli = ["--keyfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL key file
"""
class CertFile(Setting):
name = "certfile"
section = "SSL"
cli = ["--certfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL certificate file
"""
class SSLVersion(Setting):
name = "ssl_version"
section = "SSL"
cli = ["--ssl-version"]
validator = validate_ssl_version
if hasattr(ssl, "PROTOCOL_TLS"):
default = ssl.PROTOCOL_TLS
else:
default = ssl.PROTOCOL_SSLv23
desc = """\
SSL version to use (see stdlib ssl module's)
.. versionchanged:: 20.0.1
The default value has been changed from ``ssl.PROTOCOL_SSLv23`` to
``ssl.PROTOCOL_TLS`` when Python >= 3.6 .
"""
default = ssl.PROTOCOL_SSLv23
desc = """\
SSL version to use.
============= ============
--ssl-version Description
============= ============
SSLv3 SSLv3 is not-secure and is strongly discouraged.
SSLv23 Alias for TLS. Deprecated in Python 3.6, use TLS.
TLS Negotiate highest possible version between client/server.
Can yield SSL. (Python 3.6+)
TLSv1 TLS 1.0
TLSv1_1 TLS 1.1 (Python 3.4+)
TLSv1_2 TLS 1.2 (Python 3.4+)
TLS_SERVER Auto-negotiate the highest protocol version like TLS,
but only support server-side SSLSocket connections.
(Python 3.6+)
============= ============
.. versionchanged:: 19.7
The default value has been changed from ``ssl.PROTOCOL_TLSv1`` to
``ssl.PROTOCOL_SSLv23``.
.. versionchanged:: 20.0
This setting now accepts string names based on ``ssl.PROTOCOL_``
constants.
"""
class CertReqs(Setting):
name = "cert_reqs"
section = "SSL"
cli = ["--cert-reqs"]
validator = validate_pos_int
default = ssl.CERT_NONE
desc = """\
Whether client certificate is required (see stdlib ssl module's)
"""
class CACerts(Setting):
name = "ca_certs"
section = "SSL"
cli = ["--ca-certs"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
CA certificates file
"""
class SuppressRaggedEOFs(Setting):
name = "suppress_ragged_eofs"
section = "SSL"
cli = ["--suppress-ragged-eofs"]
action = "store_true"
default = True
validator = validate_bool
desc = """\
Suppress ragged EOFs (see stdlib ssl module's)
"""
class DoHandshakeOnConnect(Setting):
name = "do_handshake_on_connect"
section = "SSL"
cli = ["--do-handshake-on-connect"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Whether to perform SSL handshake on socket connect (see stdlib ssl module's)
"""
class Ciphers(Setting):
name = "ciphers"
section = "SSL"
cli = ["--ciphers"]
validator = validate_string
default = None
desc = """\
SSL Cipher suite to use, in the format of an OpenSSL cipher list.
By default we use the default cipher list from Python's ``ssl`` module,
which contains ciphers considered strong at the time of each Python
release.
As a recommended alternative, the Open Web App Security Project (OWASP)
offers `a vetted set of strong cipher strings rated A+ to C-
<https://www.owasp.org/index.php/TLS_Cipher_String_Cheat_Sheet>`_.
OWASP provides details on user-agent compatibility at each security level.
See the `OpenSSL Cipher List Format Documentation
<https://www.openssl.org/docs/manmaster/man1/ciphers.html#CIPHER-LIST-FORMAT>`_
for details on the format of an OpenSSL cipher list.
"""
class PasteGlobalConf(Setting):
name = "raw_paste_global_conf"
action = "append"
section = "Server Mechanics"
cli = ["--paste-global"]
meta = "CONF"
validator = validate_list_string
default = []
desc = """\
Set a PasteDeploy global config variable in ``key=value`` form.
The option can be specified multiple times.
The variables are passed to the the PasteDeploy entrypoint. Example::
$ gunicorn -b 127.0.0.1:8000 --paste development.ini --paste-global FOO=1 --paste-global BAR=2
.. versionadded:: 19.7
"""
class StripHeaderSpaces(Setting):
name = "strip_header_spaces"
section = "Server Mechanics"
cli = ["--strip-header-spaces"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Strip spaces present between the header name and the the ``:``.
This is known to induce vulnerabilities and is not compliant with the HTTP/1.1 standard.
See https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn.
Use with care and only if necessary.
"""
| []
| []
| [
"SENDFILE",
"PORT",
"FORWARDED_ALLOW_IPS",
"WEB_CONCURRENCY"
]
| [] | ["SENDFILE", "PORT", "FORWARDED_ALLOW_IPS", "WEB_CONCURRENCY"] | python | 4 | 0 | |
helm.go | package main
import (
"os"
"github.com/golang/glog"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/kube"
)
func actionConfigInit(namespace string) (*action.Configuration, error) {
actionConfig := new(action.Configuration)
clientConfig := kube.GetConfig(settings.KubeConfig, settings.KubeContext, namespace)
if settings.KubeToken != "" {
clientConfig.BearerToken = &settings.KubeToken
}
if settings.KubeAPIServer != "" {
clientConfig.APIServer = &settings.KubeAPIServer
}
err := actionConfig.Init(clientConfig, namespace, os.Getenv("HELM_DRIVER"), glog.Infof)
if err != nil {
glog.Errorf("%+v", err)
return nil, err
}
return actionConfig, nil
}
| [
"\"HELM_DRIVER\""
]
| []
| [
"HELM_DRIVER"
]
| [] | ["HELM_DRIVER"] | go | 1 | 0 | |
murmur/web/utils.py | """Utility functions."""
import json
import os
import threading
import datetime
from pytz import timezone
from queue import Queue
from requests_oauthlib import OAuth1Session
def merge_two_dicts(a, b):
"""Merge 2 dictionaries."""
c = a.copy()
c.update(b)
return c
class TwitterClient:
"""Client to get data from Twitter."""
def __init__(self, user_social=None):
"""Return client instance with tokens."""
if user_social:
self.AT = user_social.access_token['oauth_token']
self.AS = user_social.access_token['oauth_token_secret']
else:
self.AT = os.environ['tw_at']
self.AS = os.environ['tw_as']
self.CK = os.environ['SOCIAL_AUTH_TWITTER_KEY']
self.CS = os.environ['SOCIAL_AUTH_TWITTER_SECRET']
self.session = OAuth1Session(self.CK, self.CS, self.AT, self.AS)
self.urls = {
'timeline':
'https://api.twitter.com/1.1/statuses/home_timeline.json',
'favlist': 'https://api.twitter.com/1.1/favorites/list.json',
'user': 'https://api.twitter.com/1.1/users/show.json',
'oembed': 'https://publish.twitter.com/oembed',
'request_token': 'https://twitter.com/oauth/request_token',
'access_token': 'https://twitter.com/oauth/access_token',
'authorize': 'https://twitter.com/oauth/authorize',
'account_verified':
'https://api.twitter.com/1.1/account/verify_credentials.json',
'tweet': 'https://api.twitter.com/1.1/statuses/show.json',
'update': 'https://api.twitter.com/1.1/statuses/update.json'
}
def timeline(self):
"""Show self timeline."""
res = self.session.get(self.urls['timeline'], params={})
if res.status_code != 200:
raise Exception()
return json.loads(res.text)
def favlist(self, user_id, page=1, count=100):
"""Show someone's favorite list."""
params = {
'user_id': user_id,
'count': count,
'page': page,
}
res = self.session.get(self.urls['favlist'], params=params)
if res.status_code != 200:
raise Exception()
return json.loads(res.text)
def user_from_screen_name(self, screen_name):
"""Show user's profile from screen_name."""
params = {
'screen_name': screen_name,
}
res = self.session.get(self.urls['user'], params=params)
if res.status_code != 200:
raise Exception()
return json.loads(res.text)
def show_tweets(self, tweets):
"""Print given tweets."""
for item in tweets:
print(item['text'])
def show_user(self, user):
"""Print given user's profile."""
print('User ID: {}'.format(user['id_str']))
print('Screen Name: {}'.format(user['screen_name']))
print('Name: {}'.format(user['name']))
def user_id_from_screen_name(self, screen_name):
"""Show user's id from screen_name."""
try:
user = self.user_from_screen_name(screen_name)
except:
raise Exception()
return user['id_str']
def html_embedded(self, tweet, q):
"""Add HTML data for Twitter widget on single tweet."""
# Remove private account
if tweet['user']['protected']:
q.put({})
return
url = 'https://twitter.com/{screen_name}/status/{tweet_id}'.format(
screen_name=tweet['user']['screen_name'], tweet_id=tweet['id_str'])
params = {
'url': url,
'maxwidth': 300,
}
res = self.session.get(self.urls['oembed'], params=params)
if res.status_code != 200:
return ''
q.put(json.loads(res.text)['html'])
def add_htmls_embedded(self, tweets):
"""Add HTML data for Twitter widget on tweets."""
threads = []
queues = []
for tweet in tweets:
q = Queue()
queues.append(q)
th = threading.Thread(target=self.html_embedded, args=(tweet, q))
th.start()
threads.append(th)
tweets_add = []
for th, q, tweet in zip(threads, queues, tweets):
th.join()
if tweet['user']['protected']:
continue
tweet_add = merge_two_dicts(tweet, {'html_embedded': q.get()})
tweets_add.append(tweet_add)
return tweets_add
def tweet_from_id(self, tweet_id):
"""Get tweet from id_str."""
params = {
'id': tweet_id,
}
res = self.session.get(self.urls['tweet'], params=params)
if res.status_code != 200:
raise Exception()
return json.loads(res.text)
def status_update(self, text):
"""Update status."""
params = {"status": text}
res = self.session.post(self.urls['update'], params=params)
print(res)
if res.status_code != 200:
raise Exception()
return True
def is_pc(request):
"""Whether user agent is pc or not."""
from user_agents import parse
ua_string = request.META['HTTP_USER_AGENT']
user_agent = parse(ua_string)
return not user_agent.is_mobile
# return True
def ignore_exceptions(func, items):
"""Ignore exceptions with multi-thread."""
def carry_out(func, item, q):
"""For each execusion."""
try:
q.put(func(item))
except:
q.put(None)
threads = []
queues = []
for item in items:
q = Queue()
queues.append(q)
th = threading.Thread(target=carry_out, args=(func, item, q))
th.start()
threads.append(th)
result = []
for th, q, item in zip(threads, queues, items):
th.join()
res = q.get()
if res:
result.append(res)
print(len(items))
return result
def parse_datetime(string):
"""Convert string to datetime object."""
dt = datetime.datetime.strptime(string, '%a %b %d %H:%M:%S +0000 %Y')
return dt.astimezone(timezone('Asia/Tokyo'))
if __name__ == '__main__':
user_id = '1212759744'
screen_name = 'kemomimi_oukoku'
twitter = TwitterClient()
# user = twitter.user_from_screen_name(screen_name)
# user_id = user['id_str']
# twitter.show_user(user)
# tweets = twitter.timeline()
tweets = twitter.favlist(user_id)
# twitter.show_tweets(tweets)
# tweets = twitter.add_htmls_embedded(tweets)
print(tweets[0]["favorite_count"])
# print(twitter.issue_request_url())
| []
| []
| [
"SOCIAL_AUTH_TWITTER_KEY",
"tw_at",
"tw_as",
"SOCIAL_AUTH_TWITTER_SECRET"
]
| [] | ["SOCIAL_AUTH_TWITTER_KEY", "tw_at", "tw_as", "SOCIAL_AUTH_TWITTER_SECRET"] | python | 4 | 0 | |
node/config/def.go | package config
import (
"encoding"
"os"
"strconv"
"time"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
)
const (
// RetrievalPricingDefault configures the node to use the default retrieval pricing policy.
RetrievalPricingDefaultMode = "default"
// RetrievalPricingExternal configures the node to use the external retrieval pricing script
// configured by the user.
RetrievalPricingExternalMode = "external"
)
// MaxTraversalLinks configures the maximum number of links to traverse in a DAG while calculating
// CommP and traversing a DAG with graphsync; invokes a budget on DAG depth and density.
var MaxTraversalLinks uint64 = 32 * (1 << 20)
func init() {
if envMaxTraversal, err := strconv.ParseUint(os.Getenv("LOTUS_MAX_TRAVERSAL_LINKS"), 10, 64); err == nil {
MaxTraversalLinks = envMaxTraversal
}
}
func (b *BatchFeeConfig) FeeForSectors(nSectors int) abi.TokenAmount {
return big.Add(big.Int(b.Base), big.Mul(big.NewInt(int64(nSectors)), big.Int(b.PerSector)))
}
func defCommon() Common {
return Common{
API: API{
ListenAddress: "/ip4/127.0.0.1/tcp/1234/http",
Timeout: Duration(30 * time.Second),
},
Libp2p: Libp2p{
ListenAddresses: []string{
"/ip4/0.0.0.0/tcp/0",
"/ip6/::/tcp/0",
},
AnnounceAddresses: []string{},
NoAnnounceAddresses: []string{},
ConnMgrLow: 150,
ConnMgrHigh: 180,
ConnMgrGrace: Duration(20 * time.Second),
},
Pubsub: Pubsub{
Bootstrapper: false,
DirectPeers: nil,
},
}
}
var DefaultDefaultMaxFee = types.MustParseFIL("0.07")
var DefaultSimultaneousTransfers = uint64(20)
// DefaultFullNode returns the default config
func DefaultFullNode() *FullNode {
return &FullNode{
Common: defCommon(),
Fees: FeeConfig{
DefaultMaxFee: DefaultDefaultMaxFee,
},
Client: Client{
SimultaneousTransfersForStorage: DefaultSimultaneousTransfers,
SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers,
},
Chainstore: Chainstore{
EnableSplitstore: false,
Splitstore: Splitstore{
ColdStoreType: "universal",
HotStoreType: "badger",
MarkSetType: "badger",
HotStoreFullGCFrequency: 20,
},
},
}
}
func DefaultStorageMiner() *StorageMiner {
cfg := &StorageMiner{
Common: defCommon(),
Sealing: SealingConfig{
MaxWaitDealsSectors: 2, // 64G with 32G sectors
MaxSealingSectors: 0,
MaxSealingSectorsForDeals: 0,
WaitDealsDelay: Duration(time.Hour * 6),
AlwaysKeepUnsealedCopy: true,
FinalizeEarly: false,
CollateralFromMinerBalance: false,
AvailableBalanceBuffer: types.FIL(big.Zero()),
DisableCollateralFallback: false,
BatchPreCommits: true,
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
// XXX snap deals wait deals slack if first
PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
CommittedCapacitySectorLifetime: Duration(builtin.EpochDurationSeconds * uint64(policy.GetMaxSectorExpirationExtension()) * uint64(time.Second)),
AggregateCommits: true,
MinCommitBatch: miner5.MinAggregatedSectors, // per FIP13, we must have at least four proofs to aggregate, where 4 is the cross over point where aggregation wins out on single provecommit gas costs
MaxCommitBatch: miner5.MaxAggregatedSectors, // maximum 819 sectors, this is the maximum aggregation per FIP13
CommitBatchWait: Duration(24 * time.Hour), // this can be up to 30 days
CommitBatchSlack: Duration(1 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
BatchPreCommitAboveBaseFee: types.FIL(types.BigMul(types.PicoFil, types.NewInt(320))), // 0.32 nFIL
AggregateAboveBaseFee: types.FIL(types.BigMul(types.PicoFil, types.NewInt(320))), // 0.32 nFIL
TerminateBatchMin: 1,
TerminateBatchMax: 100,
TerminateBatchWait: Duration(5 * time.Minute),
},
Storage: sectorstorage.SealerConfig{
AllowAddPiece: true,
AllowPreCommit1: true,
AllowPreCommit2: true,
AllowCommit: true,
AllowUnseal: true,
AllowReplicaUpdate: true,
AllowProveReplicaUpdate2: true,
AllowRegenSectorKey: true,
// Default to 10 - tcp should still be able to figure this out, and
// it's the ratio between 10gbit / 1gbit
ParallelFetchLimit: 10,
// By default use the hardware resource filtering strategy.
ResourceFiltering: sectorstorage.ResourceFilteringHardware,
},
Dealmaking: DealmakingConfig{
ConsiderOnlineStorageDeals: true,
ConsiderOfflineStorageDeals: true,
ConsiderOnlineRetrievalDeals: true,
ConsiderOfflineRetrievalDeals: true,
ConsiderVerifiedStorageDeals: true,
ConsiderUnverifiedStorageDeals: true,
PieceCidBlocklist: []cid.Cid{},
MakeNewSectorForDeals: true,
// TODO: It'd be nice to set this based on sector size
MaxDealStartDelay: Duration(time.Hour * 24 * 14),
ExpectedSealDuration: Duration(time.Hour * 24),
PublishMsgPeriod: Duration(time.Hour),
MaxDealsPerPublishMsg: 8,
MaxProviderCollateralMultiplier: 2,
SimultaneousTransfersForStorage: DefaultSimultaneousTransfers,
SimultaneousTransfersForStoragePerClient: 0,
SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers,
StartEpochSealingBuffer: 480, // 480 epochs buffer == 4 hours from adding deal to sector to sector being sealed
RetrievalPricing: &RetrievalPricing{
Strategy: RetrievalPricingDefaultMode,
Default: &RetrievalPricingDefault{
VerifiedDealsFreeTransfer: true,
},
External: &RetrievalPricingExternal{
Path: "",
},
},
},
Subsystems: MinerSubsystemConfig{
EnableMining: true,
EnableSealing: true,
EnableSectorStorage: true,
EnableMarkets: true,
},
Fees: MinerFeeConfig{
MaxPreCommitGasFee: types.MustParseFIL("0.025"),
MaxCommitGasFee: types.MustParseFIL("0.05"),
MaxPreCommitBatchGasFee: BatchFeeConfig{
Base: types.MustParseFIL("0"),
PerSector: types.MustParseFIL("0.02"),
},
MaxCommitBatchGasFee: BatchFeeConfig{
Base: types.MustParseFIL("0"),
PerSector: types.MustParseFIL("0.03"), // enough for 6 agg and 1nFIL base fee
},
MaxTerminateGasFee: types.MustParseFIL("0.5"),
MaxWindowPoStGasFee: types.MustParseFIL("5"),
MaxPublishDealsFee: types.MustParseFIL("0.05"),
MaxMarketBalanceAddFee: types.MustParseFIL("0.007"),
},
Addresses: MinerAddressConfig{
PreCommitControl: []string{},
CommitControl: []string{},
TerminateControl: []string{},
DealPublishControl: []string{},
},
DAGStore: DAGStoreConfig{
MaxConcurrentIndex: 5,
MaxConcurrencyStorageCalls: 100,
MaxConcurrentUnseals: 5,
GCInterval: Duration(1 * time.Minute),
},
}
cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"
cfg.Common.API.RemoteListenAddress = "127.0.0.1:2345"
return cfg
}
var _ encoding.TextMarshaler = (*Duration)(nil)
var _ encoding.TextUnmarshaler = (*Duration)(nil)
// Duration is a wrapper type for time.Duration
// for decoding and encoding from/to TOML
type Duration time.Duration
// UnmarshalText implements interface for TOML decoding
func (dur *Duration) UnmarshalText(text []byte) error {
d, err := time.ParseDuration(string(text))
if err != nil {
return err
}
*dur = Duration(d)
return err
}
func (dur Duration) MarshalText() ([]byte, error) {
d := time.Duration(dur)
return []byte(d.String()), nil
}
| [
"\"LOTUS_MAX_TRAVERSAL_LINKS\""
]
| []
| [
"LOTUS_MAX_TRAVERSAL_LINKS"
]
| [] | ["LOTUS_MAX_TRAVERSAL_LINKS"] | go | 1 | 0 | |
lib/bitcoin.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import base64
import os
import re
import hmac
import version
from util import print_error, InvalidPassword
import ecdsa
import pyaes
import x11_hash
# zaap and bip32, bip44 constants
# https://github.com/zaapnetwork/zaap/blob/master/src/chainparams.cpp
# https://github.com/zaapnetwork/zaap/ 0.11.0 Release notes for drkp/drkv/DRKP/DRKV
# https://github.com/satoshilabs/slips/blob/master/slip-0044.md
TESTNET = False
ADDRTYPE_P2PKH = 76
ADDRTYPE_P2SH = 16
WIF = 204
XPRV_HEADER = 0x0488ade4
XPUB_HEADER = 0x0488b21e
DRKP_HEADER = 0x02fe52cc
DRKV_HEADER = 0x02fe52f8
HEADERS_URL = '' # TODO headers bootstrap
GENESIS = '00000ffd590b1485b3caadc19b22e6379c733355108f107a430458cdf3407ab6'
def set_testnet():
global ADDRTYPE_P2PKH, ADDRTYPE_P2SH, WIF
global XPRV_HEADER, XPUB_HEADER
global TESTNET, HEADERS_URL
global GENESIS, DRKP_HEADER, DRKV_HEADER
TESTNET = True
ADDRTYPE_P2PKH = 140
ADDRTYPE_P2SH = 19
WIF = 239
XPRV_HEADER = 0x04358394
XPUB_HEADER = 0x043587cf
DRKP_HEADER = 0x3a805837
DRKV_HEADER = 0x3a8061a0
HEADERS_URL = '' # TODO headers bootstrap
GENESIS = '0000' + \
'0bafbc94add76cb75e2ec92894837288a481e5c005f6563d91623bf8bc2c'
################################## transactions
MAX_FEE_RATE = 10000
FEE_TARGETS = [25, 10, 5, 2]
COINBASE_MATURITY = 100
COIN = 100000000
# supported types of transction outputs
TYPE_ADDRESS = 0
TYPE_PUBKEY = 1
TYPE_SCRIPT = 2
# AES encryption
try:
from Cryptodome.Cipher import AES
except:
AES = None
def aes_encrypt_with_iv(key, iv, data):
if AES:
padlen = 16 - (len(data) % 16)
if padlen == 0:
padlen = 16
data += chr(padlen) * padlen
e = AES.new(key, AES.MODE_CBC, iv).encrypt(data)
return e
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc)
e = aes.feed(data) + aes.feed() # empty aes.feed() appends pkcs padding
return e
def aes_decrypt_with_iv(key, iv, data):
if AES:
cipher = AES.new(key, AES.MODE_CBC, iv)
data = cipher.decrypt(data)
padlen = ord(data[-1])
for i in data[-padlen:]:
if ord(i) != padlen:
raise InvalidPassword()
return data[0:-padlen]
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc)
s = aes.feed(data) + aes.feed() # empty aes.feed() strips pkcs padding
return s
def EncodeAES(secret, s):
iv = bytes(os.urandom(16))
ct = aes_encrypt_with_iv(secret, iv, s)
e = iv + ct
return base64.b64encode(e)
def DecodeAES(secret, e):
e = bytes(base64.b64decode(e))
iv, e = e[:16], e[16:]
s = aes_decrypt_with_iv(secret, iv, e)
return s
def pw_encode(s, password):
if password:
secret = Hash(password)
return EncodeAES(secret, s.encode("utf8"))
else:
return s
def pw_decode(s, password):
if password is not None:
secret = Hash(password)
try:
d = DecodeAES(secret, s).decode("utf8")
except Exception:
raise InvalidPassword()
return d
else:
return s
def rev_hex(s):
return s.decode('hex')[::-1].encode('hex')
def int_to_hex(i, length=1):
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def var_int(i):
# https://en.bitcoin.it/wiki/Protocol_specification#Variable_length_integer
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def op_push(i):
if i<0x4c:
return int_to_hex(i)
elif i<0xff:
return '4c' + int_to_hex(i)
elif i<0xffff:
return '4d' + int_to_hex(i,2)
else:
return '4e' + int_to_hex(i,4)
def sha256(x):
return hashlib.sha256(x).digest()
def Hash(x):
if type(x) is unicode: x=x.encode('utf-8')
return sha256(sha256(x))
def PoWHash(x):
if type(x) is unicode: x=x.encode('utf-8')
return x11_hash.getPoWHash(x)
hash_encode = lambda x: x[::-1].encode('hex')
hash_decode = lambda x: x.decode('hex')[::-1]
hmac_sha_512 = lambda x,y: hmac.new(x, y, hashlib.sha512).digest()
def is_new_seed(x, prefix=version.SEED_PREFIX):
import mnemonic
x = mnemonic.normalize_text(x)
s = hmac_sha_512("Seed version", x.encode('utf8')).encode('hex')
return s.startswith(prefix)
def is_old_seed(seed):
import old_mnemonic
words = seed.strip().split()
try:
old_mnemonic.mn_decode(words)
uses_electrum_words = True
except Exception:
uses_electrum_words = False
try:
seed.decode('hex')
is_hex = (len(seed) == 32 or len(seed) == 64)
except Exception:
is_hex = False
return is_hex or (uses_electrum_words and (len(words) == 12 or len(words) == 24))
def seed_type(x):
if is_old_seed(x):
return 'old'
elif is_new_seed(x):
return 'standard'
return ''
is_seed = lambda x: bool(seed_type(x))
# pywallet openssl private key implementation
def i2o_ECPublicKey(pubkey, compressed=False):
# public keys are 65 bytes long (520 bits)
# 0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
# 0x00 = point at infinity, 0x02 and 0x03 = compressed, 0x04 = uncompressed
# compressed keys: <sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
if compressed:
if pubkey.point.y() & 1:
key = '03' + '%064x' % pubkey.point.x()
else:
key = '02' + '%064x' % pubkey.point.x()
else:
key = '04' + \
'%064x' % pubkey.point.x() + \
'%064x' % pubkey.point.y()
return key.decode('hex')
# end pywallet openssl private key implementation
############ functions from pywallet #####################
def hash_160(public_key):
if 'ANDROID_DATA' in os.environ:
from Crypto.Hash import RIPEMD
md = RIPEMD.new()
else:
md = hashlib.new('ripemd')
md.update(sha256(public_key))
return md.digest()
def hash_160_to_bc_address(h160, addrtype):
s = chr(addrtype)
s += h160
return base_encode(s+Hash(s)[0:4], base=58)
def bc_address_to_hash_160(addr):
bytes = base_decode(addr, 25, base=58)
return ord(bytes[0]), bytes[1:21]
def hash160_to_p2pkh(h160):
return hash_160_to_bc_address(h160, ADDRTYPE_P2PKH)
def hash160_to_p2sh(h160):
return hash_160_to_bc_address(h160, ADDRTYPE_P2SH)
def public_key_to_p2pkh(public_key):
return hash160_to_p2pkh(hash_160(public_key))
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def base_encode(v, base):
""" encode v, which is a string of bytes, to base58."""
if base == 58:
chars = __b58chars
elif base == 43:
chars = __b43chars
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= base:
div, mod = divmod(long_value, base)
result = chars[mod] + result
long_value = div
result = chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (chars[0]*nPad) + result
def base_decode(v, length, base):
""" decode v into a string of len bytes."""
if base == 58:
chars = __b58chars
elif base == 43:
chars = __b43chars
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += chars.find(c) * (base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz):
vchRet = base_decode(psz, None, base=58)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
def PrivKeyToSecret(privkey):
return privkey[9:9+32]
def SecretToASecret(secret, compressed=False):
vchIn = chr(WIF) + secret
if compressed: vchIn += '\01'
return EncodeBase58Check(vchIn)
def ASecretToSecret(key):
vch = DecodeBase58Check(key)
if vch and vch[0] == chr(WIF):
return vch[1:]
elif is_minikey(key):
return minikey_to_private_key(key)
else:
return False
def regenerate_key(sec):
b = ASecretToSecret(sec)
if not b:
return False
b = b[0:32]
return EC_KEY(b)
def GetPubKey(pubkey, compressed=False):
return i2o_ECPublicKey(pubkey, compressed)
def GetSecret(pkey):
return ('%064x' % pkey.secret).decode('hex')
def is_compressed(sec):
b = ASecretToSecret(sec)
return len(b) == 33
def public_key_from_private_key(sec):
# rebuild public key from private key, compressed or uncompressed
pkey = regenerate_key(sec)
assert pkey
compressed = is_compressed(sec)
public_key = GetPubKey(pkey.pubkey, compressed)
return public_key.encode('hex')
def address_from_private_key(sec):
public_key = public_key_from_private_key(sec)
address = public_key_to_p2pkh(public_key.decode('hex'))
return address
def is_valid(addr):
return is_address(addr)
def is_address(addr):
try:
addrtype, h = bc_address_to_hash_160(addr)
except Exception:
return False
if addrtype not in [ADDRTYPE_P2PKH, ADDRTYPE_P2SH]:
return False
return addr == hash_160_to_bc_address(h, addrtype)
def is_p2pkh(addr):
if is_address(addr):
addrtype, h = bc_address_to_hash_160(addr)
return addrtype == ADDRTYPE_P2PKH
def is_p2sh(addr):
if is_address(addr):
addrtype, h = bc_address_to_hash_160(addr)
return addrtype == ADDRTYPE_P2SH
def is_private_key(key):
try:
k = ASecretToSecret(key)
return k is not False
except:
return False
########### end pywallet functions #######################
def is_minikey(text):
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitoins.
return (len(text) >= 20 and text[0] == 'S'
and all(c in __b58chars for c in text)
and ord(sha256(text + '?')[0]) == 0)
def minikey_to_private_key(text):
return sha256(text)
from ecdsa.ecdsa import curve_secp256k1, generator_secp256k1
from ecdsa.curves import SECP256k1
from ecdsa.ellipticcurve import Point
from ecdsa.util import string_to_number, number_to_string
def msg_magic(message):
varint = var_int(len(message))
encoded_varint = "".join([chr(int(varint[i:i+2], 16)) for i in xrange(0, len(varint), 2)])
return "\x19DarkCoin Signed Message:\n" + encoded_varint + message
def verify_message(address, sig, message):
try:
h = Hash(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key using the address
pubkey = point_to_ser(public_key.pubkey.point, compressed)
addr = public_key_to_p2pkh(pubkey)
if address != addr:
raise Exception("Bad signature for %s, sig is for %s" % (address,
addr))
# check message
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
return True
except Exception as e:
print_error("Verification error: {0}".format(e))
return False
def encrypt_message(message, pubkey):
return EC_KEY.encrypt_message(message, pubkey.decode('hex'))
def chunks(l, n):
return [l[i:i+n] for i in xrange(0, len(l), n)]
def ECC_YfromX(x,curved=curve_secp256k1, odd=True):
_p = curved.p()
_a = curved.a()
_b = curved.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p+1)/4, _p )
if curved.contains_point(Mx,My):
if odd == bool(My&1):
return [My,offset]
return [_p-My,offset]
raise Exception('ECC_YfromX: No Y found')
def negative_point(P):
return Point( P.curve(), P.x(), -P.y(), P.order() )
def point_to_ser(P, comp=True ):
if comp:
return ( ('%02x'%(2+(P.y()&1)))+('%064x'%P.x()) ).decode('hex')
return ( '04'+('%064x'%P.x())+('%064x'%P.y()) ).decode('hex')
def ser_to_point(Aser):
curve = curve_secp256k1
generator = generator_secp256k1
_r = generator.order()
assert Aser[0] in ['\x02','\x03','\x04']
if Aser[0] == '\x04':
return Point( curve, string_to_number(Aser[1:33]), string_to_number(Aser[33:]), _r )
Mx = string_to_number(Aser[1:])
return Point( curve, Mx, ECC_YfromX(Mx, curve, Aser[0]=='\x03')[0], _r )
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
from ecdsa import util, numbertheory
import msqr
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = util.sigdecode_string(sig, order)
# 1.1
x = r + (recid/2) * order
# 1.3
alpha = ( x * x * x + curveFp.a() * x + curveFp.b() ) % curveFp.p()
beta = msqr.modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = string_to_number(h)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = numbertheory.inverse_mod(r,order)
Q = inv_r * ( s * R + minus_e * G )
return klass.from_public_point( Q, curve )
def pubkey_from_signature(sig, h):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = ord(sig[0])
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
return MyVerifyingKey.from_signature(sig[1:], recid, h, curve = SECP256k1), compressed
class MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
curve = SECP256k1
G = curve.generator
order = G.order()
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > order/2:
s = order - s
return r, s
class EC_KEY(object):
def __init__( self, k ):
secret = string_to_number(k)
self.pubkey = ecdsa.ecdsa.Public_key( generator_secp256k1, generator_secp256k1 * secret )
self.privkey = ecdsa.ecdsa.Private_key( self.pubkey, secret )
self.secret = secret
def get_public_key(self, compressed=True):
return point_to_ser(self.pubkey.point, compressed).encode('hex')
def sign(self, msg_hash):
private_key = MySigningKey.from_secret_exponent(self.secret, curve = SECP256k1)
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic(msg_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_string)
assert public_key.verify_digest(signature, msg_hash, sigdecode = ecdsa.util.sigdecode_string)
return signature
def sign_message(self, message, is_compressed):
signature = self.sign(Hash(msg_magic(message)))
for i in range(4):
sig = chr(27 + i + (4 if is_compressed else 0)) + signature
try:
self.verify_message(sig, message)
return sig
except Exception as e:
print_error('Error for verifying with "%s": %s' % (
chr(27 + i + (4 if is_compressed else 0)), str(e)))
continue
else:
raise Exception("error: cannot sign message")
def verify_message(self, sig, message):
h = Hash(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key
if point_to_ser(public_key.pubkey.point, compressed) != point_to_ser(self.pubkey.point, compressed):
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
# ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message, pubkey):
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(ecdsa.util.randrange(pow(2,256)), generator_secp256k1.order())
ephemeral = EC_KEY(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = ephemeral.get_public_key(compressed=True).decode('hex')
encrypted = 'BIE1' + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic != 'BIE1':
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError, e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
###################################### BIP32 ##############################
random_seed = lambda n: "%032x"%ecdsa.util.randrange( pow(2,n) )
BIP32_PRIME = 0x80000000
def get_pubkeys_from_secret(secret):
# public key
private_key = ecdsa.SigningKey.from_string( secret, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
K = public_key.to_string()
K_compressed = GetPubKey(public_key.pubkey,True)
return K, K_compressed
# Child private key derivation function (from master private key)
# k = master private key (32 bytes)
# c = master chain code (extra entropy for key derivation) (32 bytes)
# n = the index of the key we want to derive. (only 32 bits will be used)
# If n is negative (i.e. the 32nd bit is set), the resulting private key's
# corresponding public key can NOT be determined without the master private key.
# However, if n is positive, the resulting private key's corresponding
# public key can be determined without the master private key.
def CKD_priv(k, c, n):
is_prime = n & BIP32_PRIME
return _CKD_priv(k, c, rev_hex(int_to_hex(n,4)).decode('hex'), is_prime)
def _CKD_priv(k, c, s, is_prime):
order = generator_secp256k1.order()
keypair = EC_KEY(k)
cK = GetPubKey(keypair.pubkey,True)
data = chr(0) + k + s if is_prime else cK + s
I = hmac.new(c, data, hashlib.sha512).digest()
k_n = number_to_string( (string_to_number(I[0:32]) + string_to_number(k)) % order , order )
c_n = I[32:]
return k_n, c_n
# Child public key derivation function (from public key only)
# K = master public key
# c = master chain code
# n = index of key we want to derive
# This function allows us to find the nth public key, as long as n is
# non-negative. If n is negative, we need the master private key to find it.
def CKD_pub(cK, c, n):
if n & BIP32_PRIME: raise
return _CKD_pub(cK, c, rev_hex(int_to_hex(n,4)).decode('hex'))
# helper function, callable with arbitrary string
def _CKD_pub(cK, c, s):
order = generator_secp256k1.order()
I = hmac.new(c, cK + s, hashlib.sha512).digest()
curve = SECP256k1
pubkey_point = string_to_number(I[0:32])*curve.generator + ser_to_point(cK)
public_key = ecdsa.VerifyingKey.from_public_point( pubkey_point, curve = SECP256k1 )
c_n = I[32:]
cK_n = GetPubKey(public_key.pubkey,True)
return cK_n, c_n
def xprv_header(xtype):
return ("%08x"%(XPRV_HEADER + xtype)).decode('hex')
def xpub_header(xtype):
return ("%08x"%(XPUB_HEADER + xtype)).decode('hex')
def serialize_xprv(xtype, c, k, depth=0, fingerprint=chr(0)*4, child_number=chr(0)*4):
xprv = xprv_header(xtype) + chr(depth) + fingerprint + child_number + c + chr(0) + k
return EncodeBase58Check(xprv)
def serialize_xpub(xtype, c, cK, depth=0, fingerprint=chr(0)*4, child_number=chr(0)*4):
xpub = xpub_header(xtype) + chr(depth) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
def deserialize_xkey(xkey, prv):
xkey = DecodeBase58Check(xkey)
if len(xkey) != 78:
raise BaseException('Invalid length')
depth = ord(xkey[4])
fingerprint = xkey[5:9]
child_number = xkey[9:13]
c = xkey[13:13+32]
header = XPRV_HEADER if prv else XPUB_HEADER
xtype = int('0x' + xkey[0:4].encode('hex'), 16) - header
if xtype not in ([0, 1] if TESTNET else [0]):
raise BaseException('Invalid header')
n = 33 if prv else 32
K_or_k = xkey[13+n:]
return xtype, depth, fingerprint, child_number, c, K_or_k
def deserialize_drk(xkey, prv):
xkey = DecodeBase58Check(xkey)
if len(xkey) != 78:
raise BaseException('Invalid length')
depth = ord(xkey[4])
fingerprint = xkey[5:9]
child_number = xkey[9:13]
c = xkey[13:13+32]
header = DRKV_HEADER if prv else DRKP_HEADER
xtype = int('0x' + xkey[0:4].encode('hex'), 16) - header
if xtype != 0:
raise BaseException('Invalid header')
n = 33 if prv else 32
K_or_k = xkey[13+n:]
return xtype, depth, fingerprint, child_number, c, K_or_k
def deserialize_xpub(xkey):
return deserialize_xkey(xkey, False)
def deserialize_xprv(xkey):
return deserialize_xkey(xkey, True)
def deserialize_drkp(xkey):
return deserialize_drk(xkey, False)
def deserialize_drkv(xkey):
return deserialize_drk(xkey, True)
def is_xpub(text):
try:
deserialize_xpub(text)
return True
except:
return False
def is_xprv(text):
try:
deserialize_xprv(text)
return True
except:
return False
def is_drkp(text):
try:
deserialize_drkp(text)
return True
except:
return False
def is_drkv(text):
try:
deserialize_drkv(text)
return True
except:
return False
def xpub_from_xprv(xprv):
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
K, cK = get_pubkeys_from_secret(k)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_root(seed, xtype):
I = hmac.new("Bitcoin seed", seed, hashlib.sha512).digest()
master_k = I[0:32]
master_c = I[32:]
K, cK = get_pubkeys_from_secret(master_k)
xprv = serialize_xprv(xtype, master_c, master_k)
xpub = serialize_xpub(xtype, master_c, cK)
return xprv, xpub
def xpub_from_pubkey(xtype, cK):
assert cK[0] in ['\x02','\x03']
return serialize_xpub(xtype, chr(0)*32, cK)
def bip32_derivation(s):
assert s.startswith('m/')
s = s[2:]
for n in s.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
yield i
def is_bip32_derivation(x):
try:
[ i for i in bip32_derivation(x)]
return True
except :
return False
def bip32_private_derivation(xprv, branch, sequence):
assert sequence.startswith(branch)
if branch == sequence:
return xprv, xpub_from_xprv(xprv)
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
parent_k = k
k, c = CKD_priv(k, c, i)
depth += 1
_, parent_cK = get_pubkeys_from_secret(parent_k)
fingerprint = hash_160(parent_cK)[0:4]
child_number = ("%08X"%i).decode('hex')
K, cK = get_pubkeys_from_secret(k)
xpub = serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
xprv = serialize_xprv(xtype, c, k, depth, fingerprint, child_number)
return xprv, xpub
def bip32_public_derivation(xpub, branch, sequence):
xtype, depth, fingerprint, child_number, c, cK = deserialize_xpub(xpub)
assert sequence.startswith(branch)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n)
parent_cK = cK
cK, c = CKD_pub(cK, c, i)
depth += 1
fingerprint = hash_160(parent_cK)[0:4]
child_number = ("%08X"%i).decode('hex')
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_private_key(sequence, k, chain):
for i in sequence:
k, chain = CKD_priv(k, chain, i)
return SecretToASecret(k, True)
def xkeys_from_seed(seed, passphrase, derivation):
from mnemonic import Mnemonic
xprv, xpub = bip32_root(Mnemonic.mnemonic_to_seed(seed, passphrase), 0)
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
return xprv, xpub
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/frontend/internal/cli/serve_cmd.go | package cli
import (
"context"
"database/sql"
"fmt"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/cockroachdb/errors"
"github.com/graph-gophers/graphql-go"
"github.com/inconshreveable/log15"
"github.com/keegancsmith/tmpfriend"
"github.com/throttled/throttled/v2/store/redigostore"
"github.com/sourcegraph/sourcegraph/cmd/frontend/backend"
"github.com/sourcegraph/sourcegraph/cmd/frontend/enterprise"
"github.com/sourcegraph/sourcegraph/cmd/frontend/envvar"
"github.com/sourcegraph/sourcegraph/cmd/frontend/globals"
"github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/app/ui"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/app/updatecheck"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/bg"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/cli/loghandlers"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/siteid"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/vfsutil"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/conf/conftypes"
"github.com/sourcegraph/sourcegraph/internal/conf/deploy"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/connections"
"github.com/sourcegraph/sourcegraph/internal/debugserver"
"github.com/sourcegraph/sourcegraph/internal/encryption/keyring"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/goroutine"
"github.com/sourcegraph/sourcegraph/internal/httpserver"
"github.com/sourcegraph/sourcegraph/internal/logging"
"github.com/sourcegraph/sourcegraph/internal/oobmigration"
"github.com/sourcegraph/sourcegraph/internal/profiler"
"github.com/sourcegraph/sourcegraph/internal/redispool"
"github.com/sourcegraph/sourcegraph/internal/sentry"
"github.com/sourcegraph/sourcegraph/internal/sysreq"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/tracer"
"github.com/sourcegraph/sourcegraph/internal/version"
)
var (
traceFields = env.Get("SRC_LOG_TRACE", "HTTP", "space separated list of trace logs to show. Options: all, HTTP, build, github")
traceThreshold = env.Get("SRC_LOG_TRACE_THRESHOLD", "", "show traces that take longer than this")
printLogo, _ = strconv.ParseBool(env.Get("LOGO", "false", "print Sourcegraph logo upon startup"))
httpAddr = env.Get("SRC_HTTP_ADDR", ":3080", "HTTP listen address for app and HTTP API")
httpAddrInternal = envvar.HTTPAddrInternal
nginxAddr = env.Get("SRC_NGINX_HTTP_ADDR", "", "HTTP listen address for nginx reverse proxy to SRC_HTTP_ADDR. Has preference over SRC_HTTP_ADDR for ExternalURL.")
// dev browser browser extension ID. You can find this by going to chrome://extensions
devExtension = "chrome-extension://bmfbcejdknlknpncfpeloejonjoledha"
// production browser extension ID. This is found by viewing our extension in the chrome store.
prodExtension = "chrome-extension://dgjhfomjieaadpoljlnidmbgkdffpack"
)
func init() {
// If CACHE_DIR is specified, use that
cacheDir := env.Get("CACHE_DIR", "/tmp", "directory to store cached archives.")
vfsutil.ArchiveCacheDir = filepath.Join(cacheDir, "frontend-archive-cache")
}
// defaultExternalURL returns the default external URL of the application.
func defaultExternalURL(nginxAddr, httpAddr string) *url.URL {
addr := nginxAddr
if addr == "" {
addr = httpAddr
}
var hostPort string
if strings.HasPrefix(addr, ":") {
// Prepend localhost if HTTP listen addr is just a port.
hostPort = "127.0.0.1" + addr
} else {
hostPort = addr
}
return &url.URL{Scheme: "http", Host: hostPort}
}
// InitDB initializes and returns the global database connection and sets the
// version of the frontend in our versions table.
func InitDB() (*sql.DB, error) {
sqlDB, err := connections.NewFrontendDB("", "frontend", true)
if err != nil {
return nil, errors.Errorf("failed to connect to frontend database: %s", err)
}
if err := backend.UpdateServiceVersion(context.Background(), database.NewDB(sqlDB), "frontend", version.Version()); err != nil {
return nil, err
}
return sqlDB, nil
}
// Main is the main entrypoint for the frontend server program.
func Main(enterpriseSetupHook func(db database.DB, c conftypes.UnifiedWatchable, outOfBandMigrationRunner *oobmigration.Runner) enterprise.Services) error {
ctx := context.Background()
log.SetFlags(0)
log.SetPrefix("")
if err := profiler.Init(); err != nil {
log.Fatalf("failed to initialize profiling: %v", err)
}
ready := make(chan struct{})
go debugserver.NewServerRoutine(ready).Start()
sqlDB, err := InitDB()
if err != nil {
log.Fatalf("ERROR: %v", err)
}
db := database.NewDB(sqlDB)
// override site config first
if err := overrideSiteConfig(ctx, db); err != nil {
log.Fatalf("failed to apply site config overrides: %v", err)
}
globals.ConfigurationServerFrontendOnly = conf.InitConfigurationServerFrontendOnly(&configurationSource{db: db})
conf.Init()
conf.MustValidateDefaults()
// now we can init the keyring, as it depends on site config
if err := keyring.Init(ctx); err != nil {
log.Fatalf("failed to initialize encryption keyring: %v", err)
}
if err := overrideGlobalSettings(ctx, db); err != nil {
log.Fatalf("failed to override global settings: %v", err)
}
// now the keyring is configured it's safe to override the rest of the config
// and that config can access the keyring
if err := overrideExtSvcConfig(ctx, db); err != nil {
log.Fatalf("failed to override external service config: %v", err)
}
// Filter trace logs
d, _ := time.ParseDuration(traceThreshold)
logging.Init(logging.Filter(loghandlers.Trace(strings.Fields(traceFields), d)))
tracer.Init(conf.DefaultClient())
sentry.Init(conf.DefaultClient())
trace.Init()
// Create an out-of-band migration runner onto which each enterprise init function
// can register migration routines to run in the background while they still have
// work remaining.
outOfBandMigrationRunner := newOutOfBandMigrationRunner(ctx, db)
// Run a background job to handle encryption of external service configuration.
extsvcMigrator := oobmigration.NewExternalServiceConfigMigratorWithDB(db)
extsvcMigrator.AllowDecrypt = os.Getenv("ALLOW_DECRYPT_MIGRATION") == "true"
if err := outOfBandMigrationRunner.Register(extsvcMigrator.ID(), extsvcMigrator, oobmigration.MigratorOptions{Interval: 3 * time.Second}); err != nil {
log.Fatalf("failed to run external service encryption job: %v", err)
}
// Run a background job to handle encryption of external service configuration.
extAccMigrator := oobmigration.NewExternalAccountsMigratorWithDB(db)
extAccMigrator.AllowDecrypt = os.Getenv("ALLOW_DECRYPT_MIGRATION") == "true"
if err := outOfBandMigrationRunner.Register(extAccMigrator.ID(), extAccMigrator, oobmigration.MigratorOptions{Interval: 3 * time.Second}); err != nil {
log.Fatalf("failed to run user external account encryption job: %v", err)
}
// Run a background job to calculate the has_webhooks field on external
// service records.
webhookMigrator := oobmigration.NewExternalServiceWebhookMigratorWithDB(db)
if err := outOfBandMigrationRunner.Register(webhookMigrator.ID(), webhookMigrator, oobmigration.MigratorOptions{Interval: 3 * time.Second}); err != nil {
log.Fatalf("failed to run external service webhook job: %v", err)
}
// Run enterprise setup hook
enterprise := enterpriseSetupHook(db, conf.DefaultClient(), outOfBandMigrationRunner)
ui.InitRouter(db, enterprise.CodeIntelResolver)
if len(os.Args) >= 2 {
switch os.Args[1] {
case "help", "-h", "--help":
log.Printf("Version: %s", version.Version())
log.Print()
env.PrintHelp()
log.Print()
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
for _, st := range sysreq.Check(ctx, skippedSysReqs()) {
log.Printf("%s:", st.Name)
if st.OK() {
log.Print("\tOK")
continue
}
if st.Skipped {
log.Print("\tSkipped")
continue
}
if st.Problem != "" {
log.Print("\t" + st.Problem)
}
if st.Err != nil {
log.Printf("\tError: %s", st.Err)
}
if st.Fix != "" {
log.Printf("\tPossible fix: %s", st.Fix)
}
}
return nil
}
}
printConfigValidation()
cleanup := tmpfriend.SetupOrNOOP()
defer cleanup()
// Don't proceed if system requirements are missing, to avoid
// presenting users with a half-working experience.
if err := checkSysReqs(context.Background(), os.Stderr); err != nil {
return err
}
siteid.Init(db)
globals.WatchExternalURL(defaultExternalURL(nginxAddr, httpAddr))
globals.WatchPermissionsUserMapping()
goroutine.Go(func() { bg.CheckRedisCacheEvictionPolicy() })
goroutine.Go(func() { bg.DeleteOldCacheDataInRedis() })
goroutine.Go(func() { bg.DeleteOldEventLogsInPostgres(context.Background(), db) })
goroutine.Go(func() { bg.DeleteOldSecurityEventLogsInPostgres(context.Background(), db) })
goroutine.Go(func() { updatecheck.Start(db) })
schema, err := graphqlbackend.NewSchema(db,
enterprise.BatchChangesResolver,
enterprise.CodeIntelResolver,
enterprise.InsightsResolver,
enterprise.AuthzResolver,
enterprise.CodeMonitorsResolver,
enterprise.LicenseResolver,
enterprise.DotcomResolver,
enterprise.SearchContextsResolver,
enterprise.OrgRepositoryResolver,
)
if err != nil {
return err
}
rateLimitWatcher, err := makeRateLimitWatcher()
if err != nil {
return err
}
server, err := makeExternalAPI(db, schema, enterprise, rateLimitWatcher)
if err != nil {
return err
}
internalAPI, err := makeInternalAPI(schema, db, enterprise, rateLimitWatcher)
if err != nil {
return err
}
routines := []goroutine.BackgroundRoutine{
server,
outOfBandMigrationRunner,
}
if internalAPI != nil {
routines = append(routines, internalAPI)
}
if printLogo {
fmt.Println(" ")
fmt.Println(logoColor)
fmt.Println(" ")
}
fmt.Printf("✱ Sourcegraph is ready at: %s\n", globals.ExternalURL())
close(ready)
goroutine.MonitorBackgroundRoutines(context.Background(), routines...)
return nil
}
func makeExternalAPI(db database.DB, schema *graphql.Schema, enterprise enterprise.Services, rateLimiter graphqlbackend.LimitWatcher) (goroutine.BackgroundRoutine, error) {
listener, err := httpserver.NewListener(httpAddr)
if err != nil {
return nil, err
}
// Create the external HTTP handler.
externalHandler, err := newExternalHTTPHandler(
db,
schema,
enterprise.GitHubWebhook,
enterprise.GitLabWebhook,
enterprise.BitbucketServerWebhook,
enterprise.NewCodeIntelUploadHandler,
enterprise.NewExecutorProxyHandler,
rateLimiter,
)
if err != nil {
return nil, err
}
httpServer := &http.Server{
Handler: externalHandler,
ReadTimeout: 75 * time.Second,
WriteTimeout: 10 * time.Minute,
}
server := httpserver.New(listener, httpServer, makeServerOptions()...)
log15.Debug("HTTP running", "on", httpAddr)
return server, nil
}
func makeInternalAPI(schema *graphql.Schema, db database.DB, enterprise enterprise.Services, rateLimiter graphqlbackend.LimitWatcher) (goroutine.BackgroundRoutine, error) {
if httpAddrInternal == "" {
return nil, nil
}
listener, err := httpserver.NewListener(httpAddrInternal)
if err != nil {
return nil, err
}
// The internal HTTP handler does not include the auth handlers.
internalHandler := newInternalHTTPHandler(
schema,
db,
enterprise.NewCodeIntelUploadHandler,
rateLimiter,
)
httpServer := &http.Server{
Handler: internalHandler,
ReadTimeout: 75 * time.Second,
// Higher since for internal RPCs which can have large responses
// (eg git archive). Should match the timeout used for git archive
// in gitserver.
WriteTimeout: time.Hour,
}
server := httpserver.New(listener, httpServer, makeServerOptions()...)
log15.Debug("HTTP (internal) running", "on", httpAddrInternal)
return server, nil
}
func makeServerOptions() (options []httpserver.ServerOptions) {
if deploy.Type() == deploy.Kubernetes {
// On kubernetes, we want to wait an additional 5 seconds after we receive a
// shutdown request to give some additional time for the endpoint changes
// to propagate to services talking to this server like the LB or ingress
// controller. We only do this in frontend and not on all services, because
// frontend is the only publicly exposed service where we don't control
// retries on connection failures (see httpcli.InternalClient).
options = append(options, httpserver.WithPreShutdownPause(time.Second*5))
}
return options
}
func isAllowedOrigin(origin string, allowedOrigins []string) bool {
for _, o := range allowedOrigins {
if o == "*" || o == origin {
return true
}
}
return false
}
func makeRateLimitWatcher() (*graphqlbackend.BasicLimitWatcher, error) {
ratelimitStore, err := redigostore.New(redispool.Cache, "gql:rl:", 0)
if err != nil {
return nil, err
}
return graphqlbackend.NewBasicLimitWatcher(ratelimitStore), nil
}
| [
"\"ALLOW_DECRYPT_MIGRATION\"",
"\"ALLOW_DECRYPT_MIGRATION\""
]
| []
| [
"ALLOW_DECRYPT_MIGRATION"
]
| [] | ["ALLOW_DECRYPT_MIGRATION"] | go | 1 | 0 | |
pkg/structure/structure.go | package structure
import (
"fmt"
"io/ioutil"
"os"
"strings"
toml "github.com/pelletier/go-toml"
)
type Root struct {
Title string
Description string
Path string `toml:",omitempty"`
Version string `toml:",omitempty"`
BasePath string
SubProject []SubProject
Library []Library
}
type Library struct {
Title string
Description string
Path string `toml:",omitempty"`
Version string `toml:",omitempty"`
Type string `toml:",omitempty"`
}
type SubProject struct {
Title string
Description string
Path string `toml:",omitempty"`
Version string `toml:",omitempty"`
Type string `toml:",omitempty"`
}
func LoadStructure(path string) (*Root, error) {
var root Root
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
if err = toml.Unmarshal(data, &root); err != nil {
return nil, err
}
return &root, nil
}
const (
Glide = "glide.yaml"
Dep = "Gopkg.toml"
GoMod = "go.mod"
)
func (r *Root) ParseType() {
files := []string{Glide, Dep, GoMod}
for i := range r.SubProject {
for _, file := range files {
path := strings.Join([]string{os.Getenv("GOPATH"), "src", r.BasePath, r.SubProject[i].Title, file}, "/")
fmt.Println(path)
if _, err := os.Stat(path); err == nil {
r.SubProject[i].Type = file
}
}
}
for i := range r.Library {
for _, file := range files {
path := strings.Join([]string{os.Getenv("GOPATH"), "src", r.BasePath, r.SubProject[i].Title, file}, "/")
fmt.Println(path)
if _, err := os.Stat(path); err == nil {
r.Library[i].Type = file
}
}
}
}
| [
"\"GOPATH\"",
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
app/configuration/diskcache.py | # Copyright 2018-2020 Institute of Neuroscience and Medicine (INM-1),
# Forschungszentrum Jülich GmbH
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import siibra
import os
import logging
logger = logging.getLogger(__name__)
CACHEDIR = siibra.retrieval.CACHE.folder
if os.environ.get('SIIBRA_API_DISABLE_CACHE'):
logger.warning('Not using caching')
def memoize(**kwargs):
def wrapper(func):
return func
return wrapper
else:
from diskcache import FanoutCache
logger.warning('Using diskcahe.FanoutCache')
def memoize(**kwargs):
cache = FanoutCache(CACHEDIR)
return cache.memoize(**kwargs)
| []
| []
| [
"SIIBRA_API_DISABLE_CACHE"
]
| [] | ["SIIBRA_API_DISABLE_CACHE"] | python | 1 | 0 | |
internal/helmutil/version.go | package helmutil
import (
"os"
"os/exec"
)
// IsHelm3 returns true if helm is version 3+.
func IsHelm3() bool {
if os.Getenv("TILLER_HOST") != "" {
return false
}
return helm3Detected()
}
// helm3Detected returns true if helm is v3.
var helm3Detected func() bool
func helmEnvCommand() bool {
cmd := exec.Command("helm", "env")
return cmd.Run() == nil
}
// setupHelmVersionDetection sets up the command used to detect helm version.
func setupHelmVersionDetection() {
helm3Detected = helmEnvCommand
}
| [
"\"TILLER_HOST\""
]
| []
| [
"TILLER_HOST"
]
| [] | ["TILLER_HOST"] | go | 1 | 0 | |
config.py | import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOADED_PHOTOS_DEST = 'app/static/photos'
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
# simple mde configurations
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
@staticmethod
def init_app(app):
pass
class TestConfig(Config):
pass
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
if SQLALCHEMY_DATABASE_URI and SQLALCHEMY_DATABASE_URI.startswith("postgres://"):
SQLALCHEMY_DATABASE_URI = SQLALCHEMY_DATABASE_URI.replace("postgres://", "postgresql://", 1)
pass
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:Anna123!@localhost/blogapp1'
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig,
'test': TestConfig
} | []
| []
| [
"MAIL_PASSWORD",
"SECRET_KEY",
"DATABASE_URL",
"MAIL_USERNAME"
]
| [] | ["MAIL_PASSWORD", "SECRET_KEY", "DATABASE_URL", "MAIL_USERNAME"] | python | 4 | 0 | |
examples/forecast/main.go | package main
import (
"fmt"
"log"
"os"
"time"
"github.com/apixu/apixu-go/v2"
)
func main() {
config := apixu.Config{
APIKey: os.Getenv("APIXUKEY"),
}
a, err := apixu.New(config)
if err != nil {
log.Fatal(err)
}
q := "Rome"
days := 5
// Hourly weather forecast is available for paid licenses only.
// Pass nil with a free license or if you need all hours.
hour := 17
forecast, err := a.Forecast(q, days, &hour)
if err != nil {
if e, ok := err.(*apixu.Error); ok {
log.Fatal(e.Error(), e.Response().Code, e.Response().Message)
}
log.Fatal(err)
}
loc := forecast.Location
fmt.Println("Location")
fmt.Println("\tName:", loc.Name)
fmt.Println("\tRegion:", loc.Region)
fmt.Println("\tCountry:", loc.Country)
fmt.Println("\tLat:", loc.Lat)
fmt.Println("\tLon:", loc.Lon)
fmt.Println("\tTimezone:", loc.Timezone)
fmt.Println("\tLocaltimeEpoch:", loc.LocalTimeEpoch)
fmt.Println("\tLocaltime:", time.Time(loc.LocalTime).String())
curr := forecast.Current
fmt.Println("Current")
fmt.Println("\tLastUpdatedEpoch:", curr.LastUpdatedEpoch)
fmt.Println("\tLastUpdated:", time.Time(curr.LastUpdated).String())
fmt.Println("\tTempCelsius:", curr.TempCelsius)
fmt.Println("\tTempFahrenheit:", curr.TempFahrenheit)
fmt.Println("\tIsDay:", curr.IsDay)
fmt.Println("\tCondition")
fmt.Println("\t\tText:", curr.Condition.Text)
fmt.Println("\t\tIcon:", curr.Condition.Icon)
fmt.Println("\t\tCode:", curr.Condition.Code)
fmt.Println("\tWindMPH:", curr.WindMPH)
fmt.Println("\tWindKPH:", curr.WindKPH)
fmt.Println("\tWindDegree:", curr.WindDegree)
fmt.Println("\tWindDirection:", curr.WindDirection)
fmt.Println("\tPressureMB:", curr.PressureMB)
fmt.Println("\tPressureIN:", curr.PressureIN)
fmt.Println("\tPrecipMM:", curr.PrecipMM)
fmt.Println("\tPrecipIN:", curr.PrecipIN)
fmt.Println("\tHumidity:", curr.Humidity)
fmt.Println("\tCloud:", curr.Cloud)
fmt.Println("\tFeelsLikeCelsius:", curr.FeelsLikeCelsius)
fmt.Println("\tFeelsLikeFahrenheit:", curr.FeelsLikeFahrenheit)
fmt.Println("\tVisKM:", curr.VisKM)
fmt.Println("\tVisMiles:", curr.VisMiles)
fmt.Println("\tUV:", curr.UV)
fmt.Println("\tGustMPH:", curr.GustMPH)
fmt.Println("\tGustKPH:", curr.GustKPH)
fcast := forecast.Forecast.ForecastDay
fmt.Println("Forecast Day")
for _, fc := range fcast {
fmt.Println("\tDate:", time.Time(fc.Date).String())
fmt.Println("\tDateEpoch:", fc.DateEpoch)
fmt.Println("\tDay")
fmt.Println("\t\tMaxTempCelsius:", fc.Day.MaxTempCelsius)
fmt.Println("\t\tMaxTempFahrenheit:", fc.Day.MaxTempFahrenheit)
fmt.Println("\t\tMinTempCelsius:", fc.Day.MinTempCelsius)
fmt.Println("\t\tMinTempFahrenheit:", fc.Day.MinTempFahrenheit)
fmt.Println("\t\tAvgTempCelsius:", fc.Day.AvgTempCelsius)
fmt.Println("\t\tAvgTempFahrenheit:", fc.Day.AvgTempFahrenheit)
fmt.Println("\t\tMaxWindMPH:", fc.Day.MaxWindMPH)
fmt.Println("\t\tMaxWindKPH:", fc.Day.MaxWindKPH)
fmt.Println("\t\tTotalPrecipMM:", fc.Day.TotalPrecipMM)
fmt.Println("\t\tTotalPrecipIN:", fc.Day.TotalPrecipIN)
fmt.Println("\t\tVisKM:", fc.Day.AvgVisKM)
fmt.Println("\t\tVisMiles:", fc.Day.AvgVisMiles)
fmt.Println("\t\tAvgHumidity:", fc.Day.AvgHumidity)
fmt.Println("\t\tCondition")
fmt.Println("\t\t\tText:", fc.Day.Condition.Text)
fmt.Println("\t\t\tIcon:", fc.Day.Condition.Icon)
fmt.Println("\t\t\tCode:", fc.Day.Condition.Code)
fmt.Println("\t\tUV:", fc.Day.UV)
fmt.Println("\tAstro")
fmt.Println("\t\tSunrise:", fc.Astro.Sunrise)
fmt.Println("\t\tSunset:", fc.Astro.Sunset)
fmt.Println("\t\tMoonrise:", fc.Astro.Moonrise)
fmt.Println("\t\tMoonset:", fc.Astro.Moonset)
fmt.Println("\tHourly")
for _, hour := range fc.Hour {
fmt.Println("\t\tCondition")
fmt.Println("\t\t\tText:", hour.Condition.Text)
fmt.Println("\t\t\tIcon:", hour.Condition.Icon)
fmt.Println("\t\t\tCode:", hour.Condition.Code)
fmt.Println("\t\tWindMPH:", hour.WindMPH)
fmt.Println("\t\tWindKPH:", hour.WindKPH)
fmt.Println("\t\tWindDegree:", hour.WindDegree)
fmt.Println("\t\tGustMPH:", hour.GustMPH)
fmt.Println("\t\tGustKPH:", hour.GustKPH)
fmt.Println("\t\tTime:", time.Time(hour.Time))
}
fmt.Println()
}
}
| [
"\"APIXUKEY\""
]
| []
| [
"APIXUKEY"
]
| [] | ["APIXUKEY"] | go | 1 | 0 | |
localization/pipeline/ArmInstLoc.py | #! /usr/local/msl/bin/python
#******************************************************************************
# ArmInstLoc.py <image.IMG>
#
# Project: Fixed Instrument Localization
# Purpose: Localizations for fixed rover instruments on planetary surface
# Note: This script processes only MAHLI and APXS at this time.
#
# Author: Hallie Gengl
#
# Updated: 8/24/18 Corrine Rojas ([email protected])
# Updated: 6/26/19 Hallie Gengl
#
#******************************************************************************
import os
import sys
import netrc
import requests
import math
import subprocess
import argparse
import re
import glob
import ArmInFile
import point2xyz
import parseVicarLabel
import msl.placesTranslation as places
def InstPos(filen,site,drive,oLBL,oDAT):
print "Entering InstPos.ArmInstLoc with: ", filen, site,drive,oLBL,oDAT
#filen = os.path.basename(filen)
print filen,oLBL,oDAT
InFile = ArmInFile.ArmWriteInFile(filen,oDAT,oLBL)
print '[InstPos.ArmInstLoc] INFILE: ', InFile
OutFile = InFile.split('.')[0] + '.out'
print '[InstPos.ArmInstLoc]OutFile: ' + OutFile
#pos,dem_type,InFile,OutFile,sid,dem_status = Intersect(filen,site,drive)
sid = "NA"
dem_status = "NA"
dem_type = "NA"
#temporary override for interestion override
InText = open(InFile,"r")
for line in InText:
x = (line.split())[2]
y = (line.split())[3]
z = (line.split())[4]
pos = x,y,z
return pos,dem_type,InFile,OutFile,sid,dem_status
def Intersect(filen,site,drive):
hirise_dem = 'HiRISE_%s_%s_150m.VIC' %(site,drive)
print hirise_dem
working_dir = 'test_data/'
searchStr = 'N_L000_' + '*' + 'ZZZ' + '*' + site + '*' + drive + '*' + '5RNGM' + '*' + '.VIC'
searchStr = working_dir + searchStr
print searchStr
navcam_dem = str(glob.glob(searchStr)[-1])
print navcam_dem
if os.path.exists(navcam_dem) == True:
dem_type= 'site'
dem = navcam_dem
z_offset = 0 # z offset needs to be corrected
elif os.path.exists(hirise_dem) == True:
dem_type= 'global'
dem = hirise_dem
z_offset = places.getLocoSiteTranslation('ops','0,0,0',site,drive,'rover2orbital')[3]
print "InstPos.ArmInstLoc z_offset: ",z_offset
else:
raise SystemExit('no DEM available')
point2xyz.marsp2xyz(filen,dem,InFile,OutFile,z_offset,'arm')
sid = dem
x,y,z,dem_status = parseXyz(OutFile)
pos = x,y,z
print "Leaving InstPos.ArmInstLoc and returning pos, dem_type, inFile, Outfile, sid, dem_status: ",pos,dem_type,InFile,OutFile,sid,dem_status
return pos,dem_type,InFile,OutFile,sid,dem_status
def parseXyz(OutFile):
print "Entering parseXyz.ArmInstLoc"
line = open(OutFile).read()
dem_status = line.split()[1]
x=line.split()[3]
y=line.split()[4]
z=line.split()[5]
#theta=line.split()[7]
#range=line.split()[9]
print "[parseXyz.ArmInstLoc] Leaving after parsing XYZ coordinate and returning: ",x,y,z
return x,y,z,dem_status #,theta,range
def allLoc(filen,loco):
print "Entering allLoc.ArmInstLoc.py"
site = parseVicarLabel.getSite(filen)
drive = parseVicarLabel.getDrive(filen)
pos,sid,dem_status = str(InstPos(filen,site,drive))
x,y,z = places.getLocoSiteTranslation("ops",pos,site,drive,loco)
print "Leaving allLoc.ArmInstLoc.py and returning x,y,z,sid,dem_status: ",x,y,z,sid,dem_status
return x,y,z,sid,dem_status
def ArmInstLoc(pds,oLBL,oDAT):
print "Entering ArmInstLoc.ArmInstLoc.py"
venue = 'ops'
#print "PDS: ", pds
base = os.path.basename(pds)
#print "Base: ", base
core = os.path.splitext(base)[0]
#print "Core: ",core
filen = core + '.VIC'
#print "filename:", filen
print "inputs", pds, oLBL, oDAT
ext = os.path.splitext(base)[1]
if ext == '.VIC':
filen = core + '.VIC'
if ext == '.IMG':
filen = pds
print "[ArmInstLoc.ArmInstLoc] filen before the rest: ", filen
site = parseVicarLabel.getSite(filen)
print "[ArmInstLoc.ArmInstLoc] Site from filen: ",site
drive = parseVicarLabel.getDrive(filen)
print "[ArmInstLoc.ArmInstLoc] Drive from filen: ",drive
pos,dem_type,InFile,OutFile,sid,dem_status = InstPos(filen,site,drive,oLBL,oDAT)
#print pos
pos = str(pos)
pos = pos.replace("'","")
pos = pos.replace(" ","")
#print venue,pos,site,drive,'rover2orbital'
x,y,z = places.getLocoSiteTranslation(venue,pos,site,drive,'rover2orbital')
print "[ArmInstLoc.ArmInstLoc] Leaving function and returning x,y,z,sid,dem_status : ",x,y,z,sid,dem_status
return x,y,z,sid,dem_status
def main():
print "Entering Main loop [ArmInstLoc]"
try:
os.environ['R2LIB']
except KeyError as e:
print "%s is not set, run select" % (e)
raise SystemExit
usage = "%(prog)s <FILENAME>"
parser = argparse.ArgumentParser(usage=usage,formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("filen",metavar="filen",help="OBSERVATION FILENAME")
args = parser.parse_args()
venue = 'ops' #temporary override
filen = args.filen
site = parseVicarLabel.getSite(filen)
drive = parseVicarLabel.getDrive(filen)
pos = InstPos(filen,site,drive,oLBL,oDAT)
parseXyz(out_file)
if dem_type == 'site':
x,y,z = places.getLocoSiteTranslation(venue,pos,site,drive)
elif dem_type== 'global':
x,y,z = pos[0],pos[1],pos[2]
print "Leaving Main loop [ArmInstLoc] and returing x,y,z: ",x,y,z
return x,y,z
if (__name__ == "__main__"):
print
main()
print
| []
| []
| [
"R2LIB"
]
| [] | ["R2LIB"] | python | 1 | 0 | |
StaticBlogConsole/conf/conf.go | package conf
import (
"os"
"sbc/model"
"sbc/tasks"
"github.com/joho/godotenv"
)
// Init 初始化配置项
func Init() {
// 从本地读取环境变量
godotenv.Load()
// 读取翻译文件
if err := LoadLocales("conf/locales/zh-cn.yaml"); err != nil {
panic(err)
}
// 连接数据库
model.Database(os.Getenv("MYSQL_DSN"))
// 启动定时任务
tasks.CronJob()
}
| [
"\"MYSQL_DSN\""
]
| []
| [
"MYSQL_DSN"
]
| [] | ["MYSQL_DSN"] | go | 1 | 0 | |
mlrun/runtimes/pod.py | # Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import typing
import uuid
from enum import Enum
from kfp.dsl import ContainerOp, _container_op
from kubernetes import client
import mlrun.errors
import mlrun.utils.regex
from ..config import config as mlconf
from ..utils import logger, normalize_name, update_in, verify_field_regex
from .base import BaseRuntime, FunctionSpec
from .utils import (
apply_kfp,
generate_resources,
get_item_name,
get_resource_labels,
set_named_item,
)
class KubeResourceSpec(FunctionSpec):
def __init__(
self,
command=None,
args=None,
image=None,
mode=None,
volumes=None,
volume_mounts=None,
env=None,
resources=None,
default_handler=None,
pythonpath=None,
entry_points=None,
description=None,
workdir=None,
replicas=None,
image_pull_policy=None,
service_account=None,
build=None,
image_pull_secret=None,
node_name=None,
node_selector=None,
affinity=None,
mount_applied=False,
priority_class_name=None,
):
super().__init__(
command=command,
args=args,
image=image,
mode=mode,
build=build,
entry_points=entry_points,
description=description,
workdir=workdir,
default_handler=default_handler,
pythonpath=pythonpath,
mount_applied=mount_applied,
)
self._volumes = {}
self._volume_mounts = {}
self.volumes = volumes or []
self.volume_mounts = volume_mounts or []
self.env = env or []
self.resources = resources or {}
self.replicas = replicas
self.image_pull_policy = image_pull_policy
self.service_account = service_account
self.image_pull_secret = image_pull_secret
self.node_name = node_name
self.node_selector = (
node_selector or mlrun.mlconf.get_default_function_node_selector()
)
self._affinity = affinity
self.priority_class_name = (
priority_class_name or mlrun.mlconf.default_function_priority_class_name
)
@property
def volumes(self) -> list:
return list(self._volumes.values())
@volumes.setter
def volumes(self, volumes):
self._volumes = {}
if volumes:
for vol in volumes:
set_named_item(self._volumes, vol)
@property
def volume_mounts(self) -> list:
return list(self._volume_mounts.values())
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
self._volume_mounts = {}
if volume_mounts:
for volume_mount in volume_mounts:
self._set_volume_mount(volume_mount)
@property
def affinity(self) -> client.V1Affinity:
return self._affinity
@affinity.setter
def affinity(self, affinity):
self._affinity = self._transform_affinity_to_k8s_class_instance(affinity)
def to_dict(self, fields=None, exclude=None):
struct = super().to_dict(fields, exclude=["affinity"])
api = client.ApiClient()
struct["affinity"] = api.sanitize_for_serialization(self.affinity)
return struct
def update_vols_and_mounts(self, volumes, volume_mounts):
if volumes:
for vol in volumes:
set_named_item(self._volumes, vol)
if volume_mounts:
for volume_mount in volume_mounts:
self._set_volume_mount(volume_mount)
def _get_affinity_as_k8s_class_instance(self):
pass
def _transform_affinity_to_k8s_class_instance(self, affinity):
if not affinity:
return None
if isinstance(affinity, dict):
api = client.ApiClient()
# not ideal to use their private method, but looks like that's the only option
# Taken from https://github.com/kubernetes-client/python/issues/977
affinity = api._ApiClient__deserialize(affinity, "V1Affinity")
return affinity
def _get_sanitized_affinity(self):
"""
When using methods like to_dict() on kubernetes class instances we're getting the attributes in snake_case
Which is ok if we're using the kubernetes python package but not if for example we're creating CRDs that we
apply directly. For that we need the sanitized (CamelCase) version.
"""
if not self.affinity:
return {}
if isinstance(self.affinity, dict):
# heuristic - if node_affinity is part of the dict it means to_dict on the kubernetes object performed,
# there's nothing we can do at that point to transform it to the sanitized version
if "node_affinity" in self.affinity:
raise mlrun.errors.MLRunInvalidArgumentError(
"Affinity must be instance of kubernetes' V1Affinity class"
)
elif "nodeAffinity" in self.affinity:
# then it's already the sanitized version
return self.affinity
api = client.ApiClient()
return api.sanitize_for_serialization(self.affinity)
def _set_volume_mount(self, volume_mount):
# calculate volume mount hash
volume_name = get_item_name(volume_mount, "name")
volume_sub_path = get_item_name(volume_mount, "subPath")
volume_mount_path = get_item_name(volume_mount, "mountPath")
volume_mount_key = hash(f"{volume_name}-{volume_sub_path}-{volume_mount_path}")
self._volume_mounts[volume_mount_key] = volume_mount
class AutoMountType(str, Enum):
none = "none"
auto = "auto"
v3io_credentials = "v3io_credentials"
v3io_fuse = "v3io_fuse"
pvc = "pvc"
@classmethod
def _missing_(cls, value):
return AutoMountType.default()
@staticmethod
def default():
return AutoMountType.auto
# Any modifier that configures a mount on a runtime should be included here. These modifiers, if applied to the
# runtime, will suppress the auto-mount functionality.
@classmethod
def all_mount_modifiers(cls):
return [
mlrun.v3io_cred.__name__,
mlrun.mount_v3io.__name__,
mlrun.platforms.other.mount_pvc.__name__,
mlrun.auto_mount.__name__,
]
@staticmethod
def _get_auto_modifier():
# If we're running on Iguazio - use v3io_cred
if mlconf.igz_version != "":
return mlrun.v3io_cred
# Else, either pvc mount if it's configured or do nothing otherwise
pvc_configured = (
"MLRUN_PVC_MOUNT" in os.environ
or "pvc_name" in mlconf.get_storage_auto_mount_params()
)
return mlrun.platforms.other.mount_pvc if pvc_configured else None
def get_modifier(self):
return {
AutoMountType.none: None,
AutoMountType.v3io_credentials: mlrun.v3io_cred,
AutoMountType.v3io_fuse: mlrun.mount_v3io,
AutoMountType.pvc: mlrun.platforms.other.mount_pvc,
AutoMountType.auto: self._get_auto_modifier(),
}[self]
class KubeResource(BaseRuntime):
kind = "job"
_is_nested = True
def __init__(self, spec=None, metadata=None):
super().__init__(metadata, spec)
self.verbose = False
@property
def spec(self) -> KubeResourceSpec:
return self._spec
@spec.setter
def spec(self, spec):
self._spec = self._verify_dict(spec, "spec", KubeResourceSpec)
def to_dict(self, fields=None, exclude=None, strip=False):
struct = super().to_dict(fields, exclude, strip=strip)
api = client.ApiClient()
struct = api.sanitize_for_serialization(struct)
if strip:
spec = struct["spec"]
for attr in ["volumes", "volume_mounts"]:
if attr in spec:
del spec[attr]
if "env" in spec and spec["env"]:
for ev in spec["env"]:
if ev["name"].startswith("V3IO_"):
ev["value"] = ""
return struct
def apply(self, modify):
# Kubeflow pipeline have a hook to add the component to the DAG on ContainerOp init
# we remove the hook to suppress kubeflow op registration and return it after the apply()
old_op_handler = _container_op._register_op_handler
_container_op._register_op_handler = lambda x: self.metadata.name
cop = ContainerOp("name", "image")
_container_op._register_op_handler = old_op_handler
return apply_kfp(modify, cop, self)
def set_env_from_secret(self, name, secret=None, secret_key=None):
"""set pod environment var from secret"""
secret_key = secret_key or name
value_from = client.V1EnvVarSource(
secret_key_ref=client.V1SecretKeySelector(name=secret, key=secret_key)
)
return self._set_env(name, value_from=value_from)
def set_env(self, name, value):
"""set pod environment var from value"""
return self._set_env(name, value=str(value))
def is_env_exists(self, name):
"""Check whether there is an environment variable define for the given key"""
for env_var in self.spec.env:
if get_item_name(env_var) == name:
return True
return False
def _set_env(self, name, value=None, value_from=None):
new_var = client.V1EnvVar(name=name, value=value, value_from=value_from)
i = 0
for v in self.spec.env:
if get_item_name(v) == name:
self.spec.env[i] = new_var
return self
i += 1
self.spec.env.append(new_var)
return self
def set_envs(self, env_vars):
"""set pod environment var key/value dict"""
for name, value in env_vars.items():
self.set_env(name, value)
return self
def gpus(self, gpus, gpu_type="nvidia.com/gpu"):
update_in(self.spec.resources, ["limits", gpu_type], gpus)
def with_limits(self, mem=None, cpu=None, gpus=None, gpu_type="nvidia.com/gpu"):
"""set pod cpu/memory/gpu limits"""
self._verify_and_set_limits("resources", mem, cpu, gpus, gpu_type)
def with_requests(self, mem=None, cpu=None):
"""set requested (desired) pod cpu/memory resources"""
self._verify_and_set_requests("resources", mem, cpu)
def with_node_selection(
self,
node_name: typing.Optional[str] = None,
node_selector: typing.Optional[typing.Dict[str, str]] = None,
affinity: typing.Optional[client.V1Affinity] = None,
):
"""
Enables to control on which k8s node the job will run
:param node_name: The name of the k8s node
:param node_selector: Label selector, only nodes with matching labels will be eligible to be picked
:param affinity: Expands the types of constraints you can express - see
https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
for details
"""
if node_name:
self.spec.node_name = node_name
if node_selector:
self.spec.node_selector = node_selector
if affinity:
self.spec.affinity = affinity
def with_priority_class(self, name: typing.Optional[str] = None):
"""
Enables to control the priority of the pod
If not passed - will default to mlrun.mlconf.default_function_priority_class_name
:param name: The name of the priority class
"""
if name is None:
name = mlconf.default_function_priority_class_name
valid_priority_class_names = self.list_valid_and_default_priority_class_names()[
"valid_function_priority_class_names"
]
if name not in valid_priority_class_names:
message = "Priority class name not in available priority class names"
logger.warning(
message,
priority_class_name=name,
valid_priority_class_names=valid_priority_class_names,
)
raise mlrun.errors.MLRunInvalidArgumentError(message)
self.spec.priority_class_name = name
def list_valid_and_default_priority_class_names(self):
return {
"default_function_priority_class_name": mlconf.default_function_priority_class_name,
"valid_function_priority_class_names": mlconf.get_valid_function_priority_class_names(),
}
def _verify_and_set_limits(
self,
resources_field_name,
mem=None,
cpu=None,
gpus=None,
gpu_type="nvidia.com/gpu",
):
if mem:
verify_field_regex(
f"function.spec.{resources_field_name}.limits.memory",
mem,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
if cpu:
verify_field_regex(
f"function.spec.{resources_field_name}.limits.cpu",
cpu,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
if gpus:
verify_field_regex(
f"function.spec.{resources_field_name}.limits.gpus",
gpus,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
update_in(
getattr(self.spec, resources_field_name),
"limits",
generate_resources(mem=mem, cpu=cpu, gpus=gpus, gpu_type=gpu_type),
)
def _verify_and_set_requests(self, resources_field_name, mem=None, cpu=None):
if mem:
verify_field_regex(
f"function.spec.{resources_field_name}.requests.memory",
mem,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
if cpu:
verify_field_regex(
f"function.spec.{resources_field_name}.requests.cpu",
cpu,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
update_in(
getattr(self.spec, resources_field_name),
"requests",
generate_resources(mem=mem, cpu=cpu),
)
def _get_meta(self, runobj, unique=False):
namespace = self._get_k8s().resolve_namespace()
labels = get_resource_labels(self, runobj, runobj.spec.scrape_metrics)
new_meta = client.V1ObjectMeta(namespace=namespace, labels=labels)
name = runobj.metadata.name or "mlrun"
norm_name = f"{normalize_name(name)}-"
if unique:
norm_name += uuid.uuid4().hex[:8]
new_meta.name = norm_name
runobj.set_label("mlrun/job", norm_name)
else:
new_meta.generate_name = norm_name
return new_meta
def _add_azure_vault_params_to_spec(self, k8s_secret_name=None):
secret_name = (
k8s_secret_name or mlconf.secret_stores.azure_vault.default_secret_name
)
if not secret_name:
logger.warning(
"No k8s secret provided. Azure key vault will not be available"
)
return
# We cannot use expanduser() here, since the user in question is the user running in the pod
# itself (which is root) and not where this code is running. That's why this hacky replacement is needed.
secret_path = mlconf.secret_stores.azure_vault.secret_path.replace("~", "/root")
volumes = [
{
"name": "azure-vault-secret",
"secret": {"defaultMode": 420, "secretName": secret_name},
}
]
volume_mounts = [{"name": "azure-vault-secret", "mountPath": secret_path}]
self.spec.update_vols_and_mounts(volumes, volume_mounts)
def _add_project_k8s_secrets_to_spec(self, secrets, runobj=None, project=None):
project_name = project or runobj.metadata.project
if project_name is None:
logger.warning("No project provided. Cannot add k8s secrets")
return
secret_name = self._get_k8s().get_project_secret_name(project_name)
existing_secret_keys = (
self._get_k8s().get_project_secret_keys(project_name) or {}
)
# If no secrets were passed, we need all existing keys
if not secrets:
secrets = {
key: self._secrets.k8s_env_variable_name_for_secret(key)
for key in existing_secret_keys
}
for key, env_var_name in secrets.items():
if key in existing_secret_keys:
self.set_env_from_secret(env_var_name, secret_name, key)
def _add_vault_params_to_spec(self, runobj=None, project=None):
project_name = project or runobj.metadata.project
if project_name is None:
logger.warning("No project provided. Cannot add vault parameters")
return
service_account_name = mlconf.secret_stores.vault.project_service_account_name.format(
project=project_name
)
project_vault_secret_name = self._get_k8s().get_project_vault_secret_name(
project_name, service_account_name
)
if project_vault_secret_name is None:
logger.info(f"No vault secret associated with project {project_name}")
return
volumes = [
{
"name": "vault-secret",
"secret": {"defaultMode": 420, "secretName": project_vault_secret_name},
}
]
# We cannot use expanduser() here, since the user in question is the user running in the pod
# itself (which is root) and not where this code is running. That's why this hacky replacement is needed.
token_path = mlconf.secret_stores.vault.token_path.replace("~", "/root")
volume_mounts = [{"name": "vault-secret", "mountPath": token_path}]
self.spec.update_vols_and_mounts(volumes, volume_mounts)
self.spec.env.append(
{
"name": "MLRUN_SECRET_STORES__VAULT__ROLE",
"value": f"project:{project_name}",
}
)
# In case remote URL is different than local URL, use it. Else, use the local URL
vault_url = mlconf.secret_stores.vault.remote_url
if vault_url == "":
vault_url = mlconf.secret_stores.vault.url
self.spec.env.append(
{"name": "MLRUN_SECRET_STORES__VAULT__URL", "value": vault_url}
)
def try_auto_mount_based_on_config(self):
if self.spec.mount_applied:
logger.debug("Mount already applied - not performing auto-mount")
return
auto_mount_type = AutoMountType(mlconf.storage.auto_mount_type)
modifier = auto_mount_type.get_modifier()
if not modifier:
logger.debug("Auto mount disabled due to user selection")
return
mount_params_dict = mlconf.get_storage_auto_mount_params()
self.apply(modifier(**mount_params_dict))
def kube_resource_spec_to_pod_spec(
kube_resource_spec: KubeResourceSpec, container: client.V1Container
):
return client.V1PodSpec(
containers=[container],
restart_policy="Never",
volumes=kube_resource_spec.volumes,
service_account=kube_resource_spec.service_account,
node_name=kube_resource_spec.node_name,
node_selector=kube_resource_spec.node_selector,
affinity=kube_resource_spec.affinity,
priority_class_name=kube_resource_spec.priority_class_name
if len(mlconf.get_valid_function_priority_class_names())
else None,
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
rq/worker.py | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import errno
import logging
import os
import random
import signal
import socket
import sys
import time
import traceback
import warnings
from datetime import timedelta
from distutils.version import StrictVersion
from enum import Enum
from uuid import uuid4
from random import shuffle
try:
from signal import SIGKILL
except ImportError:
from signal import SIGTERM as SIGKILL
import redis.exceptions
from . import worker_registration
from .command import parse_payload, PUBSUB_CHANNEL_TEMPLATE, handle_command
from .compat import as_text, string_types, text_type
from .connections import get_current_connection, push_connection, pop_connection
from .defaults import (CALLBACK_TIMEOUT, DEFAULT_RESULT_TTL,
DEFAULT_WORKER_TTL, DEFAULT_JOB_MONITORING_INTERVAL,
DEFAULT_LOGGING_FORMAT, DEFAULT_LOGGING_DATE_FORMAT)
from .exceptions import DeserializationError, DequeueTimeout, ShutDownImminentException
from .job import Job, JobStatus
from .logutils import setup_loghandlers
from .queue import Queue
from .registry import FailedJobRegistry, StartedJobRegistry, clean_registries
from .scheduler import RQScheduler
from .suspension import is_suspended
from .timeouts import JobTimeoutException, HorseMonitorTimeoutException, UnixSignalDeathPenalty
from .utils import (backend_class, ensure_list, get_version,
make_colorizer, utcformat, utcnow, utcparse)
from .version import VERSION
from .worker_registration import clean_worker_registry, get_keys
from .serializers import resolve_serializer
try:
from setproctitle import setproctitle as setprocname
except ImportError:
def setprocname(*args, **kwargs): # noqa
pass
green = make_colorizer('darkgreen')
yellow = make_colorizer('darkyellow')
blue = make_colorizer('darkblue')
logger = logging.getLogger(__name__)
class StopRequested(Exception):
pass
def compact(l):
return [x for x in l if x is not None]
_signames = dict((getattr(signal, signame), signame)
for signame in dir(signal)
if signame.startswith('SIG') and '_' not in signame)
def signal_name(signum):
try:
if sys.version_info[:2] >= (3, 5):
return signal.Signals(signum).name
else:
return _signames[signum]
except KeyError:
return 'SIG_UNKNOWN'
except ValueError:
return 'SIG_UNKNOWN'
class WorkerStatus(str, Enum):
STARTED = 'started'
SUSPENDED = 'suspended'
BUSY = 'busy'
IDLE = 'idle'
class Worker:
redis_worker_namespace_prefix = 'rq:worker:'
redis_workers_keys = worker_registration.REDIS_WORKER_KEYS
death_penalty_class = UnixSignalDeathPenalty
queue_class = Queue
job_class = Job
# `log_result_lifespan` controls whether "Result is kept for XXX seconds"
# messages are logged after every job, by default they are.
log_result_lifespan = True
# `log_job_description` is used to toggle logging an entire jobs description.
log_job_description = True
# factor to increase connection_wait_time incase of continous connection failures.
exponential_backoff_factor = 2.0
# Max Wait time (in seconds) after which exponential_backoff_factor wont be applicable.
max_connection_wait_time = 60.0
@classmethod
def all(cls, connection=None, job_class=None, queue_class=None, queue=None, serializer=None):
"""Returns an iterable of all Workers.
"""
if queue:
connection = queue.connection
elif connection is None:
connection = get_current_connection()
worker_keys = get_keys(queue=queue, connection=connection)
workers = [cls.find_by_key(as_text(key),
connection=connection,
job_class=job_class,
queue_class=queue_class, serializer=serializer)
for key in worker_keys]
return compact(workers)
@classmethod
def all_keys(cls, connection=None, queue=None):
return [as_text(key)
for key in get_keys(queue=queue, connection=connection)]
@classmethod
def count(cls, connection=None, queue=None):
"""Returns the number of workers by queue or connection"""
return len(get_keys(queue=queue, connection=connection))
@classmethod
def find_by_key(cls, worker_key, connection=None, job_class=None,
queue_class=None, serializer=None):
"""Returns a Worker instance, based on the naming conventions for
naming the internal Redis keys. Can be used to reverse-lookup Workers
by their Redis keys.
"""
prefix = cls.redis_worker_namespace_prefix
if not worker_key.startswith(prefix):
raise ValueError('Not a valid RQ worker key: %s' % worker_key)
if connection is None:
connection = get_current_connection()
if not connection.exists(worker_key):
connection.srem(cls.redis_workers_keys, worker_key)
return None
name = worker_key[len(prefix):]
worker = cls([], name, connection=connection, job_class=job_class,
queue_class=queue_class, prepare_for_work=False, serializer=serializer)
worker.refresh()
return worker
def __init__(self, queues, name=None, default_result_ttl=DEFAULT_RESULT_TTL,
connection=None, exc_handler=None, exception_handlers=None,
default_worker_ttl=DEFAULT_WORKER_TTL, job_class=None,
queue_class=None, log_job_description=True,
job_monitoring_interval=DEFAULT_JOB_MONITORING_INTERVAL,
disable_default_exception_handler=False,
prepare_for_work=True, serializer=None): # noqa
if connection is None:
connection = get_current_connection()
self.connection = connection
self.redis_server_version = None
self.job_class = backend_class(self, 'job_class', override=job_class)
self.queue_class = backend_class(self, 'queue_class', override=queue_class)
self.version = VERSION
self.python_version = sys.version
self.serializer = resolve_serializer(serializer)
queues = [self.queue_class(name=q,
connection=connection,
job_class=self.job_class, serializer=self.serializer)
if isinstance(q, string_types) else q
for q in ensure_list(queues)]
self.name = name or uuid4().hex
self.queues = queues
self.validate_queues()
self._ordered_queues = self.queues[:]
self._exc_handlers = []
self.default_result_ttl = default_result_ttl
self.default_worker_ttl = default_worker_ttl
self.job_monitoring_interval = job_monitoring_interval
self._state = 'starting'
self._is_horse = False
self._horse_pid = 0
self._stop_requested = False
self._stopped_job_id = None
self.log = logger
self.log_job_description = log_job_description
self.last_cleaned_at = None
self.successful_job_count = 0
self.failed_job_count = 0
self.total_working_time = 0
self.current_job_working_time = 0
self.birth_date = None
self.scheduler = None
self.pubsub = None
self.pubsub_thread = None
self.disable_default_exception_handler = disable_default_exception_handler
if prepare_for_work:
self.hostname = socket.gethostname()
self.pid = os.getpid()
connection.client_setname(self.name)
self.ip_address = [client['addr'] for client in connection.client_list() if client['name'] == self.name][0]
else:
self.hostname = None
self.pid = None
self.ip_address = None
if isinstance(exception_handlers, (list, tuple)):
for handler in exception_handlers:
self.push_exc_handler(handler)
elif exception_handlers is not None:
self.push_exc_handler(exception_handlers)
def get_redis_server_version(self):
"""Return Redis server version of connection"""
if not self.redis_server_version:
self.redis_server_version = get_version(self.connection)
return self.redis_server_version
def validate_queues(self):
"""Sanity check for the given queues."""
for queue in self.queues:
if not isinstance(queue, self.queue_class):
raise TypeError('{0} is not of type {1} or string types'.format(queue, self.queue_class))
def queue_names(self):
"""Returns the queue names of this worker's queues."""
return [queue.name for queue in self.queues]
def queue_keys(self):
"""Returns the Redis keys representing this worker's queues."""
return [queue.key for queue in self.queues]
@property
def key(self):
"""Returns the worker's Redis hash key."""
return self.redis_worker_namespace_prefix + self.name
@property
def pubsub_channel_name(self):
"""Returns the worker's Redis hash key."""
return PUBSUB_CHANNEL_TEMPLATE % self.name
@property
def horse_pid(self):
"""The horse's process ID. Only available in the worker. Will return
0 in the horse part of the fork.
"""
return self._horse_pid
@property
def is_horse(self):
"""Returns whether or not this is the worker or the work horse."""
return self._is_horse
def procline(self, message):
"""Changes the current procname for the process.
This can be used to make `ps -ef` output more readable.
"""
setprocname('rq: {0}'.format(message))
def register_birth(self):
"""Registers its own birth."""
self.log.debug('Registering birth of worker %s', self.name)
if self.connection.exists(self.key) and \
not self.connection.hexists(self.key, 'death'):
msg = 'There exists an active worker named {0!r} already'
raise ValueError(msg.format(self.name))
key = self.key
queues = ','.join(self.queue_names())
with self.connection.pipeline() as p:
p.delete(key)
now = utcnow()
now_in_string = utcformat(now)
self.birth_date = now
mapping = {
'birth': now_in_string,
'last_heartbeat': now_in_string,
'queues': queues,
'pid': self.pid,
'hostname': self.hostname,
'ip_address': self.ip_address,
'version': self.version,
'python_version': self.python_version,
}
if self.get_redis_server_version() >= StrictVersion("4.0.0"):
p.hset(key, mapping=mapping)
else:
p.hmset(key, mapping)
worker_registration.register(self, p)
p.expire(key, self.default_worker_ttl + 60)
p.execute()
def register_death(self):
"""Registers its own death."""
self.log.debug('Registering death')
with self.connection.pipeline() as p:
# We cannot use self.state = 'dead' here, because that would
# rollback the pipeline
worker_registration.unregister(self, p)
p.hset(self.key, 'death', utcformat(utcnow()))
p.expire(self.key, 60)
p.execute()
def set_shutdown_requested_date(self):
"""Sets the date on which the worker received a (warm) shutdown request"""
self.connection.hset(self.key, 'shutdown_requested_date', utcformat(utcnow()))
# @property
# def birth_date(self):
# """Fetches birth date from Redis."""
# birth_timestamp = self.connection.hget(self.key, 'birth')
# if birth_timestamp is not None:
# return utcparse(as_text(birth_timestamp))
@property
def shutdown_requested_date(self):
"""Fetches shutdown_requested_date from Redis."""
shutdown_requested_timestamp = self.connection.hget(self.key, 'shutdown_requested_date')
if shutdown_requested_timestamp is not None:
return utcparse(as_text(shutdown_requested_timestamp))
@property
def death_date(self):
"""Fetches death date from Redis."""
death_timestamp = self.connection.hget(self.key, 'death')
if death_timestamp is not None:
return utcparse(as_text(death_timestamp))
def set_state(self, state, pipeline=None):
self._state = state
connection = pipeline if pipeline is not None else self.connection
connection.hset(self.key, 'state', state)
def _set_state(self, state):
"""Raise a DeprecationWarning if ``worker.state = X`` is used"""
warnings.warn(
"worker.state is deprecated, use worker.set_state() instead.",
DeprecationWarning
)
self.set_state(state)
def get_state(self):
return self._state
def _get_state(self):
"""Raise a DeprecationWarning if ``worker.state == X`` is used"""
warnings.warn(
"worker.state is deprecated, use worker.get_state() instead.",
DeprecationWarning
)
return self.get_state()
state = property(_get_state, _set_state)
def set_current_job_working_time(self, current_job_working_time, pipeline=None):
self.current_job_working_time = current_job_working_time
connection = pipeline if pipeline is not None else self.connection
connection.hset(self.key, 'current_job_working_time', current_job_working_time)
def set_current_job_id(self, job_id, pipeline=None):
connection = pipeline if pipeline is not None else self.connection
if job_id is None:
connection.hdel(self.key, 'current_job')
else:
connection.hset(self.key, 'current_job', job_id)
def get_current_job_id(self, pipeline=None):
connection = pipeline if pipeline is not None else self.connection
return as_text(connection.hget(self.key, 'current_job'))
def get_current_job(self):
"""Returns the job id of the currently executing job."""
job_id = self.get_current_job_id()
if job_id is None:
return None
return self.job_class.fetch(job_id, self.connection, self.serializer)
def _install_signal_handlers(self):
"""Installs signal handlers for handling SIGINT and SIGTERM
gracefully.
"""
signal.signal(signal.SIGINT, self.request_stop)
signal.signal(signal.SIGTERM, self.request_stop)
def kill_horse(self, sig=SIGKILL):
"""
Kill the horse but catch "No such process" error has the horse could already be dead.
"""
try:
os.killpg(os.getpgid(self.horse_pid), sig)
self.log.info('Killed horse pid %s', self.horse_pid)
except OSError as e:
if e.errno == errno.ESRCH:
# "No such process" is fine with us
self.log.debug('Horse already dead')
else:
raise
def wait_for_horse(self):
"""
A waiting the end of the horse process and recycling resources.
"""
pid = None
stat = None
try:
pid, stat = os.waitpid(self.horse_pid, 0)
except ChildProcessError:
# ChildProcessError: [Errno 10] No child processes
pass
return pid, stat
def request_force_stop(self, signum, frame):
"""Terminates the application (cold shutdown).
"""
self.log.warning('Cold shut down')
# Take down the horse with the worker
if self.horse_pid:
self.log.debug('Taking down horse %s with me', self.horse_pid)
self.kill_horse()
self.wait_for_horse()
raise SystemExit()
def request_stop(self, signum, frame):
"""Stops the current worker loop but waits for child processes to
end gracefully (warm shutdown).
"""
self.log.debug('Got signal %s', signal_name(signum))
signal.signal(signal.SIGINT, self.request_force_stop)
signal.signal(signal.SIGTERM, self.request_force_stop)
self.handle_warm_shutdown_request()
self._shutdown()
def _shutdown(self):
"""
If shutdown is requested in the middle of a job, wait until
finish before shutting down and save the request in redis
"""
if self.get_state() == WorkerStatus.BUSY:
self._stop_requested = True
self.set_shutdown_requested_date()
self.log.debug('Stopping after current horse is finished. '
'Press Ctrl+C again for a cold shutdown.')
if self.scheduler:
self.stop_scheduler()
else:
if self.scheduler:
self.stop_scheduler()
raise StopRequested()
def handle_warm_shutdown_request(self):
self.log.info('Warm shut down requested')
def check_for_suspension(self, burst):
"""Check to see if workers have been suspended by `rq suspend`"""
before_state = None
notified = False
while not self._stop_requested and is_suspended(self.connection, self):
if burst:
self.log.info('Suspended in burst mode, exiting')
self.log.info('Note: There could still be unfinished jobs on the queue')
raise StopRequested
if not notified:
self.log.info('Worker suspended, run `rq resume` to resume')
before_state = self.get_state()
self.set_state(WorkerStatus.SUSPENDED)
notified = True
time.sleep(1)
if before_state:
self.set_state(before_state)
def run_maintenance_tasks(self):
"""
Runs periodic maintenance tasks, these include:
1. Check if scheduler should be started. This check should not be run
on first run since worker.work() already calls
`scheduler.enqueue_scheduled_jobs()` on startup.
2. Cleaning registries
"""
# No need to try to start scheduler on first run
if self.last_cleaned_at:
if self.scheduler and not self.scheduler._process:
self.scheduler.acquire_locks(auto_start=True)
self.clean_registries()
def subscribe(self):
"""Subscribe to this worker's channel"""
self.log.info('Subscribing to channel %s', self.pubsub_channel_name)
self.pubsub = self.connection.pubsub()
self.pubsub.subscribe(**{self.pubsub_channel_name: self.handle_payload})
self.pubsub_thread = self.pubsub.run_in_thread(sleep_time=0.2)
def unsubscribe(self):
"""Unsubscribe from pubsub channel"""
if self.pubsub_thread:
self.log.info('Unsubscribing from channel %s', self.pubsub_channel_name)
self.pubsub_thread.stop()
self.pubsub_thread.join()
self.pubsub.unsubscribe()
self.pubsub.close()
def reorder_queues(self, reference_queue):
pass
def work(self, burst=False, logging_level="INFO", date_format=DEFAULT_LOGGING_DATE_FORMAT,
log_format=DEFAULT_LOGGING_FORMAT, max_jobs=None, with_scheduler=False):
"""Starts the work loop.
Pops and performs all jobs on the current list of queues. When all
queues are empty, block and wait for new jobs to arrive on any of the
queues, unless `burst` mode is enabled.
The return value indicates whether any jobs were processed.
"""
setup_loghandlers(logging_level, date_format, log_format)
completed_jobs = 0
self.register_birth()
self.log.info("Worker %s: started, version %s", self.key, VERSION)
self.subscribe()
self.set_state(WorkerStatus.STARTED)
qnames = self.queue_names()
self.log.info('*** Listening on %s...', green(', '.join(qnames)))
if with_scheduler:
self.scheduler = RQScheduler(
self.queues, connection=self.connection, logging_level=logging_level,
date_format=date_format, log_format=log_format, serializer=self.serializer)
self.scheduler.acquire_locks()
# If lock is acquired, start scheduler
if self.scheduler.acquired_locks:
# If worker is run on burst mode, enqueue_scheduled_jobs()
# before working. Otherwise, start scheduler in a separate process
if burst:
self.scheduler.enqueue_scheduled_jobs()
self.scheduler.release_locks()
else:
self.scheduler.start()
self._install_signal_handlers()
try:
while True:
try:
self.check_for_suspension(burst)
if self.should_run_maintenance_tasks:
self.run_maintenance_tasks()
if self._stop_requested:
self.log.info('Worker %s: stopping on request', self.key)
break
timeout = None if burst else max(1, self.default_worker_ttl - 15)
result = self.dequeue_job_and_maintain_ttl(timeout)
if result is None:
if burst:
self.log.info("Worker %s: done, quitting", self.key)
break
job, queue = result
self.reorder_queues(reference_queue=queue)
self.execute_job(job, queue)
self.heartbeat()
completed_jobs += 1
if max_jobs is not None:
if completed_jobs >= max_jobs:
self.log.info(
"Worker %s: finished executing %d jobs, quitting",
self.key, completed_jobs
)
break
except StopRequested:
break
except SystemExit:
# Cold shutdown detected
raise
except: # noqa
self.log.error(
'Worker %s: found an unhandled exception, quitting...',
self.key, exc_info=True
)
break
finally:
if not self.is_horse:
if self.scheduler:
self.stop_scheduler()
self.register_death()
self.unsubscribe()
return bool(completed_jobs)
def stop_scheduler(self):
"""Ensure scheduler process is stopped"""
if self.scheduler._process and self.scheduler._process.pid:
# Send the kill signal to scheduler process
try:
os.kill(self.scheduler._process.pid, signal.SIGTERM)
except OSError:
pass
self.scheduler._process.join()
def dequeue_job_and_maintain_ttl(self, timeout):
result = None
qnames = ','.join(self.queue_names())
self.set_state(WorkerStatus.IDLE)
self.procline('Listening on ' + qnames)
self.log.debug('*** Listening on %s...', green(qnames))
connection_wait_time = 1.0
while True:
try:
self.heartbeat()
if self.should_run_maintenance_tasks:
self.run_maintenance_tasks()
result = self.queue_class.dequeue_any(self._ordered_queues, timeout,
connection=self.connection,
job_class=self.job_class,
serializer=self.serializer)
if result is not None:
job, queue = result
job.redis_server_version = self.get_redis_server_version()
if self.log_job_description:
self.log.info(
'%s: %s (%s)', green(queue.name),
blue(job.description), job.id)
else:
self.log.info('%s: %s', green(queue.name), job.id)
break
except DequeueTimeout:
pass
except redis.exceptions.ConnectionError as conn_err:
self.log.error('Could not connect to Redis instance: %s Retrying in %d seconds...',
conn_err, connection_wait_time)
time.sleep(connection_wait_time)
connection_wait_time *= self.exponential_backoff_factor
connection_wait_time = min(connection_wait_time, self.max_connection_wait_time)
else:
connection_wait_time = 1.0
self.heartbeat()
return result
def heartbeat(self, timeout=None, pipeline=None):
"""Specifies a new worker timeout, typically by extending the
expiration time of the worker, effectively making this a "heartbeat"
to not expire the worker until the timeout passes.
The next heartbeat should come before this time, or the worker will
die (at least from the monitoring dashboards).
If no timeout is given, the default_worker_ttl will be used to update
the expiration time of the worker.
"""
timeout = timeout or self.default_worker_ttl + 60
connection = pipeline if pipeline is not None else self.connection
connection.expire(self.key, timeout)
connection.hset(self.key, 'last_heartbeat', utcformat(utcnow()))
self.log.debug('Sent heartbeat to prevent worker timeout. '
'Next one should arrive within %s seconds.', timeout)
def refresh(self):
data = self.connection.hmget(
self.key, 'queues', 'state', 'current_job', 'last_heartbeat',
'birth', 'failed_job_count', 'successful_job_count', 'total_working_time',
'current_job_working_time', 'hostname', 'ip_address', 'pid', 'version', 'python_version',
)
(queues, state, job_id, last_heartbeat, birth, failed_job_count,
successful_job_count, total_working_time, current_job_working_time,
hostname, ip_address, pid, version, python_version) = data
queues = as_text(queues)
self.hostname = as_text(hostname)
self.ip_address = as_text(ip_address)
self.pid = int(pid) if pid else None
self.version = as_text(version)
self.python_version = as_text(python_version)
self._state = as_text(state or '?')
self._job_id = job_id or None
if last_heartbeat:
self.last_heartbeat = utcparse(as_text(last_heartbeat))
else:
self.last_heartbeat = None
if birth:
self.birth_date = utcparse(as_text(birth))
else:
self.birth_date = None
if failed_job_count:
self.failed_job_count = int(as_text(failed_job_count))
if successful_job_count:
self.successful_job_count = int(as_text(successful_job_count))
if total_working_time:
self.total_working_time = float(as_text(total_working_time))
if current_job_working_time:
self.current_job_working_time = float(as_text(current_job_working_time))
if queues:
self.queues = [self.queue_class(queue,
connection=self.connection,
job_class=self.job_class, serializer=self.serializer)
for queue in queues.split(',')]
def increment_failed_job_count(self, pipeline=None):
connection = pipeline if pipeline is not None else self.connection
connection.hincrby(self.key, 'failed_job_count', 1)
def increment_successful_job_count(self, pipeline=None):
connection = pipeline if pipeline is not None else self.connection
connection.hincrby(self.key, 'successful_job_count', 1)
def increment_total_working_time(self, job_execution_time, pipeline):
pipeline.hincrbyfloat(self.key, 'total_working_time',
job_execution_time.total_seconds())
def fork_work_horse(self, job, queue):
"""Spawns a work horse to perform the actual work and passes it a job.
"""
child_pid = os.fork()
os.environ['RQ_WORKER_ID'] = self.name
os.environ['RQ_JOB_ID'] = job.id
if child_pid == 0:
os.setsid()
self.main_work_horse(job, queue)
os._exit(0) # just in case
else:
self._horse_pid = child_pid
self.procline('Forked {0} at {1}'.format(child_pid, time.time()))
def get_heartbeat_ttl(self, job):
if job.timeout and job.timeout > 0:
remaining_execution_time = job.timeout - self.current_job_working_time
return min(remaining_execution_time, self.job_monitoring_interval) + 60
else:
return self.job_monitoring_interval + 60
def monitor_work_horse(self, job, queue):
"""The worker will monitor the work horse and make sure that it
either executes successfully or the status of the job is set to
failed
"""
ret_val = None
job.started_at = utcnow()
while True:
try:
with UnixSignalDeathPenalty(self.job_monitoring_interval, HorseMonitorTimeoutException):
retpid, ret_val = self.wait_for_horse()
break
except HorseMonitorTimeoutException:
# Horse has not exited yet and is still running.
# Send a heartbeat to keep the worker alive.
self.set_current_job_working_time((utcnow() - job.started_at).total_seconds())
# Kill the job from this side if something is really wrong (interpreter lock/etc).
if job.timeout != -1 and self.current_job_working_time > (job.timeout + 60):
self.heartbeat(self.job_monitoring_interval + 60)
self.kill_horse()
self.wait_for_horse()
break
with self.connection.pipeline() as pipeline:
self.heartbeat(self.job_monitoring_interval + 60, pipeline=pipeline)
ttl = self.get_heartbeat_ttl(job)
job.heartbeat(utcnow(), ttl, pipeline=pipeline)
pipeline.execute()
except OSError as e:
# In case we encountered an OSError due to EINTR (which is
# caused by a SIGINT or SIGTERM signal during
# os.waitpid()), we simply ignore it and enter the next
# iteration of the loop, waiting for the child to end. In
# any other case, this is some other unexpected OS error,
# which we don't want to catch, so we re-raise those ones.
if e.errno != errno.EINTR:
raise
# Send a heartbeat to keep the worker alive.
self.heartbeat()
self.set_current_job_working_time(0)
self._horse_pid = 0 # Set horse PID to 0, horse has finished working
if ret_val == os.EX_OK: # The process exited normally.
return
job_status = job.get_status()
if job_status is None: # Job completed and its ttl has expired
return
elif job_status == JobStatus.STOPPED:
# Work-horse killed deliberately
self.log.warning('Job stopped by user, moving job to FailedJobRegistry')
self.handle_job_failure(
job, queue=queue,
exc_string="Job stopped by user, work-horse terminated."
)
elif job_status not in [JobStatus.FINISHED, JobStatus.FAILED]:
if not job.ended_at:
job.ended_at = utcnow()
# Unhandled failure: move the job to the failed queue
self.log.warning((
'Moving job to FailedJobRegistry '
'(work-horse terminated unexpectedly; waitpid returned {})'
).format(ret_val))
self.handle_job_failure(
job, queue=queue,
exc_string="Work-horse was terminated unexpectedly "
"(waitpid returned %s)" % ret_val
)
def execute_job(self, job, queue):
"""Spawns a work horse to perform the actual work and passes it a job.
The worker will wait for the work horse and make sure it executes
within the given timeout bounds, or will end the work horse with
SIGALRM.
"""
self.set_state(WorkerStatus.BUSY)
self.fork_work_horse(job, queue)
self.monitor_work_horse(job, queue)
self.set_state(WorkerStatus.IDLE)
def main_work_horse(self, job, queue):
"""This is the entry point of the newly spawned work horse."""
# After fork()'ing, always assure we are generating random sequences
# that are different from the worker.
random.seed()
self.setup_work_horse_signals()
self._is_horse = True
self.log = logger
try:
self.perform_job(job, queue)
except: # noqa
os._exit(1)
# os._exit() is the way to exit from childs after a fork(), in
# contrast to the regular sys.exit()
os._exit(0)
def setup_work_horse_signals(self):
"""Setup signal handing for the newly spawned work horse."""
# Always ignore Ctrl+C in the work horse, as it might abort the
# currently running job.
# The main worker catches the Ctrl+C and requests graceful shutdown
# after the current work is done. When cold shutdown is requested, it
# kills the current job anyway.
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
def prepare_job_execution(self, job):
"""Performs misc bookkeeping like updating states prior to
job execution.
"""
with self.connection.pipeline() as pipeline:
self.set_state(WorkerStatus.BUSY, pipeline=pipeline)
self.set_current_job_id(job.id, pipeline=pipeline)
self.set_current_job_working_time(0, pipeline=pipeline)
heartbeat_ttl = self.get_heartbeat_ttl(job)
self.heartbeat(heartbeat_ttl, pipeline=pipeline)
job.heartbeat(utcnow(), heartbeat_ttl, pipeline=pipeline)
job.prepare_for_execution(self.name, pipeline=pipeline)
pipeline.execute()
msg = 'Processing {0} from {1} since {2}'
self.procline(msg.format(job.func_name, job.origin, time.time()))
def handle_job_failure(self, job, queue, started_job_registry=None,
exc_string=''):
"""Handles the failure or an executing job by:
1. Setting the job status to failed
2. Removing the job from StartedJobRegistry
3. Setting the workers current job to None
4. Add the job to FailedJobRegistry
"""
self.log.debug('Handling failed execution of job %s', job.id)
with self.connection.pipeline() as pipeline:
if started_job_registry is None:
started_job_registry = StartedJobRegistry(
job.origin,
self.connection,
job_class=self.job_class
)
job.worker_name = None
# check whether a job was stopped intentionally and set the job
# status appropriately if it was this job.
job_is_stopped = self._stopped_job_id == job.id
retry = job.retries_left and job.retries_left > 0 and not job_is_stopped
if job_is_stopped:
job.set_status(JobStatus.STOPPED, pipeline=pipeline)
self._stopped_job_id = None
else:
# Requeue/reschedule if retry is configured, otherwise
if not retry:
job.set_status(JobStatus.FAILED, pipeline=pipeline)
started_job_registry.remove(job, pipeline=pipeline)
if not self.disable_default_exception_handler and not retry:
failed_job_registry = FailedJobRegistry(job.origin, job.connection,
job_class=self.job_class)
failed_job_registry.add(job, ttl=job.failure_ttl,
exc_string=exc_string, pipeline=pipeline)
self.set_current_job_id(None, pipeline=pipeline)
self.increment_failed_job_count(pipeline)
if job.started_at and job.ended_at:
self.increment_total_working_time(
job.ended_at - job.started_at, pipeline
)
if retry:
job.retry(queue, pipeline)
try:
pipeline.execute()
except Exception:
# Ensure that custom exception handlers are called
# even if Redis is down
pass
def handle_job_success(self, job, queue, started_job_registry):
self.log.debug('Handling successful execution of job %s', job.id)
with self.connection.pipeline() as pipeline:
while True:
try:
# if dependencies are inserted after enqueue_dependents
# a WatchError is thrown by execute()
pipeline.watch(job.dependents_key)
# enqueue_dependents calls multi() on the pipeline!
queue.enqueue_dependents(job, pipeline=pipeline)
self.set_current_job_id(None, pipeline=pipeline)
self.increment_successful_job_count(pipeline=pipeline)
self.increment_total_working_time(
job.ended_at - job.started_at, pipeline
)
result_ttl = job.get_result_ttl(self.default_result_ttl)
if result_ttl != 0:
job.set_status(JobStatus.FINISHED, pipeline=pipeline)
job.worker_name = None
# Don't clobber the user's meta dictionary!
job.save(pipeline=pipeline, include_meta=False)
finished_job_registry = queue.finished_job_registry
finished_job_registry.add(job, result_ttl, pipeline)
job.cleanup(result_ttl, pipeline=pipeline,
remove_from_queue=False)
started_job_registry.remove(job, pipeline=pipeline)
pipeline.execute()
break
except redis.exceptions.WatchError:
continue
def execute_success_callback(self, job, result):
"""Executes success_callback with timeout"""
job.heartbeat(utcnow(), CALLBACK_TIMEOUT)
with self.death_penalty_class(CALLBACK_TIMEOUT, JobTimeoutException, job_id=job.id):
job.success_callback(job, self.connection, result)
def execute_failure_callback(self, job):
"""Executes failure_callback with timeout"""
job.heartbeat(utcnow(), CALLBACK_TIMEOUT)
with self.death_penalty_class(CALLBACK_TIMEOUT, JobTimeoutException, job_id=job.id):
job.failure_callback(job, self.connection, *sys.exc_info())
def perform_job(self, job, queue):
"""Performs the actual work of a job. Will/should only be called
inside the work horse's process.
"""
push_connection(self.connection)
started_job_registry = queue.started_job_registry
try:
self.prepare_job_execution(job)
job.started_at = utcnow()
timeout = job.timeout or self.queue_class.DEFAULT_TIMEOUT
with self.death_penalty_class(timeout, JobTimeoutException, job_id=job.id):
rv = job.perform()
job.ended_at = utcnow()
# Pickle the result in the same try-except block since we need
# to use the same exc handling when pickling fails
job._result = rv
if job.success_callback:
self.execute_success_callback(job, rv)
self.handle_job_success(job=job,
queue=queue,
started_job_registry=started_job_registry)
except: # NOQA
job.ended_at = utcnow()
exc_info = sys.exc_info()
exc_string = ''.join(traceback.format_exception(*exc_info))
if job.failure_callback:
try:
self.execute_failure_callback(job)
except: # noqa
self.log.error(
'Worker %s: error while executing failure callback',
self.key, exc_info=True
)
exc_info = sys.exc_info()
exc_string = ''.join(traceback.format_exception(*exc_info))
self.handle_job_failure(job=job, exc_string=exc_string, queue=queue,
started_job_registry=started_job_registry)
self.handle_exception(job, *exc_info)
return False
finally:
pop_connection()
self.log.info('%s: %s (%s)', green(job.origin), blue('Job OK'), job.id)
if rv is not None:
log_result = "{0!r}".format(as_text(text_type(rv)))
self.log.debug('Result: %s', yellow(log_result))
if self.log_result_lifespan:
result_ttl = job.get_result_ttl(self.default_result_ttl)
if result_ttl == 0:
self.log.info('Result discarded immediately')
elif result_ttl > 0:
self.log.info('Result is kept for %s seconds', result_ttl)
else:
self.log.info('Result will never expire, clean up result key manually')
return True
def handle_exception(self, job, *exc_info):
"""Walks the exception handler stack to delegate exception handling."""
exc_string = ''.join(traceback.format_exception(*exc_info))
# If the job cannot be deserialized, it will raise when func_name or
# the other properties are accessed, which will stop exceptions from
# being properly logged, so we guard against it here.
try:
extra = {
'func': job.func_name,
'arguments': job.args,
'kwargs': job.kwargs,
}
except DeserializationError:
extra = {}
# the properties below should be safe however
extra.update({'queue': job.origin, 'job_id': job.id})
# func_name
self.log.error(exc_string, exc_info=True, extra=extra)
for handler in self._exc_handlers:
self.log.debug('Invoking exception handler %s', handler)
fallthrough = handler(job, *exc_info)
# Only handlers with explicit return values should disable further
# exc handling, so interpret a None return value as True.
if fallthrough is None:
fallthrough = True
if not fallthrough:
break
def push_exc_handler(self, handler_func):
"""Pushes an exception handler onto the exc handler stack."""
self._exc_handlers.append(handler_func)
def pop_exc_handler(self):
"""Pops the latest exception handler off of the exc handler stack."""
return self._exc_handlers.pop()
def __eq__(self, other):
"""Equality does not take the database/connection into account"""
if not isinstance(other, self.__class__):
raise TypeError('Cannot compare workers to other types (of workers)')
return self.name == other.name
def __hash__(self):
"""The hash does not take the database/connection into account"""
return hash(self.name)
def clean_registries(self):
"""Runs maintenance jobs on each Queue's registries."""
for queue in self.queues:
# If there are multiple workers running, we only want 1 worker
# to run clean_registries().
if queue.acquire_cleaning_lock():
self.log.info('Cleaning registries for queue: %s', queue.name)
clean_registries(queue)
clean_worker_registry(queue)
self.last_cleaned_at = utcnow()
@property
def should_run_maintenance_tasks(self):
"""Maintenance tasks should run on first startup or every 10 minutes."""
if self.last_cleaned_at is None:
return True
if (utcnow() - self.last_cleaned_at) > timedelta(minutes=10):
return True
return False
def handle_payload(self, message):
"""Handle external commands"""
self.log.debug('Received message: %s', message)
payload = parse_payload(message)
handle_command(self, payload)
class SimpleWorker(Worker):
def execute_job(self, job, queue):
"""Execute job in same thread/process, do not fork()"""
return self.perform_job(job, queue)
def get_heartbeat_ttl(self, job):
# "-1" means that jobs never timeout. In this case, we should _not_ do -1 + 60 = 59.
# # We should just stick to DEFAULT_WORKER_TTL.
if job.timeout == -1:
return DEFAULT_WORKER_TTL
else:
return (job.timeout or DEFAULT_WORKER_TTL) + 60
class HerokuWorker(Worker):
"""
Modified version of rq worker which:
* stops work horses getting killed with SIGTERM
* sends SIGRTMIN to work horses on SIGTERM to the main process which in turn
causes the horse to crash `imminent_shutdown_delay` seconds later
"""
imminent_shutdown_delay = 6
frame_properties = ['f_code', 'f_lasti', 'f_lineno', 'f_locals', 'f_trace']
def setup_work_horse_signals(self):
"""Modified to ignore SIGINT and SIGTERM and only handle SIGRTMIN"""
signal.signal(signal.SIGRTMIN, self.request_stop_sigrtmin)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
def handle_warm_shutdown_request(self):
"""If horse is alive send it SIGRTMIN"""
if self.horse_pid != 0:
self.log.info(
'Worker %s: warm shut down requested, sending horse SIGRTMIN signal',
self.key
)
self.kill_horse(sig=signal.SIGRTMIN)
else:
self.log.warning('Warm shut down requested, no horse found')
def request_stop_sigrtmin(self, signum, frame):
if self.imminent_shutdown_delay == 0:
self.log.warning('Imminent shutdown, raising ShutDownImminentException immediately')
self.request_force_stop_sigrtmin(signum, frame)
else:
self.log.warning('Imminent shutdown, raising ShutDownImminentException in %d seconds',
self.imminent_shutdown_delay)
signal.signal(signal.SIGRTMIN, self.request_force_stop_sigrtmin)
signal.signal(signal.SIGALRM, self.request_force_stop_sigrtmin)
signal.alarm(self.imminent_shutdown_delay)
def request_force_stop_sigrtmin(self, signum, frame):
info = dict((attr, getattr(frame, attr)) for attr in self.frame_properties)
self.log.warning('raising ShutDownImminentException to cancel job...')
raise ShutDownImminentException('shut down imminent (signal: %s)' % signal_name(signum), info)
class RoundRobinWorker(Worker):
"""
Modified version of Worker that dequeues jobs from the queues using a round-robin strategy.
"""
def reorder_queues(self, reference_queue):
pos = self._ordered_queues.index(reference_queue)
self._ordered_queues = self._ordered_queues[pos + 1:] + self._ordered_queues[:pos + 1]
class RandomWorker(Worker):
"""
Modified version of Worker that dequeues jobs from the queues using a random strategy.
"""
def reorder_queues(self, reference_queue):
shuffle(self._ordered_queues)
| []
| []
| [
"RQ_WORKER_ID",
"RQ_JOB_ID"
]
| [] | ["RQ_WORKER_ID", "RQ_JOB_ID"] | python | 2 | 0 | |
cmd/dtb/api.go | package main
import (
"github.com/gin-gonic/gin"
"go/build"
"gopkg.in/urfave/cli.v1"
"os"
)
func gopath() string {
path := os.Getenv("GOPATH")
if path == "" {
path = build.Default.GOPATH
}
return path
}
func ApiServe(c *cli.Context) error {
if c.Bool("debug") {
gin.SetMode(gin.DebugMode)
}else{
gin.SetMode(gin.ReleaseMode)
}
r := gin.New()
r.Static("/assets", gopath()+"/src/github.com/dropthebit/assets")
//MainPage Serve
r.GET("/", func(context *gin.Context) {
})
//api
v1 := r.Group("/v1")
{
// get information
v1.GET("info", func(context *gin.Context) {
})
// get crawling state for dashboard
v1.GET("state", func(context *gin.Context) {
})
// block
block := v1.Group("block")
{
block.GET("/types", func(context *gin.Context) {
})
}
// block
bp := v1.Group("blueprint")
{
bp.GET("/:id", func(context *gin.Context) {
})
bp.POST("/:id", func(context *gin.Context) {
})
bp.PUT("/:id", func(context *gin.Context) {
})
bp.DELETE("/:id", func(context *gin.Context) {
})
}
}
err := r.Run()
return err
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go | /*
Copyright 2015 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cbtconfig encapsulates common code for reading configuration from .cbtrc and gcloud.
package cbtconfig
import (
"bufio"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"time"
"golang.org/x/oauth2"
"google.golang.org/grpc/credentials"
)
// Config represents a configuration.
type Config struct {
Project, Instance string // required
Creds string // optional
AdminEndpoint string // optional
DataEndpoint string // optional
CertFile string // optional
UserAgent string // optional
TokenSource oauth2.TokenSource // derived
TLSCreds credentials.TransportCredentials // derived
}
type RequiredFlags uint
const NoneRequired RequiredFlags = 0
const (
ProjectRequired RequiredFlags = 1 << iota
InstanceRequired
)
const ProjectAndInstanceRequired RequiredFlags = ProjectRequired | InstanceRequired
// RegisterFlags registers a set of standard flags for this config.
// It should be called before flag.Parse.
func (c *Config) RegisterFlags() {
flag.StringVar(&c.Project, "project", c.Project, "project ID, if unset uses gcloud configured project")
flag.StringVar(&c.Instance, "instance", c.Instance, "Cloud Bigtable instance")
flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file")
flag.StringVar(&c.AdminEndpoint, "admin-endpoint", c.AdminEndpoint, "Override the admin api endpoint")
flag.StringVar(&c.DataEndpoint, "data-endpoint", c.DataEndpoint, "Override the data api endpoint")
flag.StringVar(&c.CertFile, "cert-file", c.CertFile, "Override the TLS certificates file")
flag.StringVar(&c.UserAgent, "user-agent", c.UserAgent, "Override the user agent string")
}
// CheckFlags checks that the required config values are set.
func (c *Config) CheckFlags(required RequiredFlags) error {
var missing []string
if c.CertFile != "" {
b, err := ioutil.ReadFile(c.CertFile)
if err != nil {
return fmt.Errorf("Failed to load certificates from %s: %v", c.CertFile, err)
}
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(b) {
return fmt.Errorf("Failed to append certificates from %s", c.CertFile)
}
c.TLSCreds = credentials.NewTLS(&tls.Config{RootCAs: cp})
}
if required != NoneRequired {
c.SetFromGcloud()
}
if required&ProjectRequired != 0 && c.Project == "" {
missing = append(missing, "-project")
}
if required&InstanceRequired != 0 && c.Instance == "" {
missing = append(missing, "-instance")
}
if len(missing) > 0 {
return fmt.Errorf("Missing %s", strings.Join(missing, " and "))
}
return nil
}
// Filename returns the filename consulted for standard configuration.
func Filename() string {
// TODO(dsymonds): Might need tweaking for Windows.
return filepath.Join(os.Getenv("HOME"), ".cbtrc")
}
// Load loads a .cbtrc file.
// If the file is not present, an empty config is returned.
func Load() (*Config, error) {
filename := Filename()
data, err := ioutil.ReadFile(filename)
if err != nil {
// silent fail if the file isn't there
if os.IsNotExist(err) {
return &Config{}, nil
}
return nil, fmt.Errorf("Reading %s: %v", filename, err)
}
c := new(Config)
s := bufio.NewScanner(bytes.NewReader(data))
for s.Scan() {
line := s.Text()
i := strings.Index(line, "=")
if i < 0 {
return nil, fmt.Errorf("Bad line in %s: %q", filename, line)
}
key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:])
switch key {
default:
return nil, fmt.Errorf("Unknown key in %s: %q", filename, key)
case "project":
c.Project = val
case "instance":
c.Instance = val
case "creds":
c.Creds = val
case "admin-endpoint":
c.AdminEndpoint = val
case "data-endpoint":
c.DataEndpoint = val
case "cert-file":
c.CertFile = val
case "user-agent":
c.UserAgent = val
}
}
return c, s.Err()
}
type GcloudCredential struct {
AccessToken string `json:"access_token"`
Expiry time.Time `json:"token_expiry"`
}
func (cred *GcloudCredential) Token() *oauth2.Token {
return &oauth2.Token{AccessToken: cred.AccessToken, TokenType: "Bearer", Expiry: cred.Expiry}
}
type GcloudConfig struct {
Configuration struct {
Properties struct {
Core struct {
Project string `json:"project"`
} `json:"core"`
} `json:"properties"`
} `json:"configuration"`
Credential GcloudCredential `json:"credential"`
}
type GcloudCmdTokenSource struct {
Command string
Args []string
}
// Token implements the oauth2.TokenSource interface
func (g *GcloudCmdTokenSource) Token() (*oauth2.Token, error) {
gcloudConfig, err := LoadGcloudConfig(g.Command, g.Args)
if err != nil {
return nil, err
}
return gcloudConfig.Credential.Token(), nil
}
// LoadGcloudConfig retrieves the gcloud configuration values we need use via the
// 'config-helper' command
func LoadGcloudConfig(gcloudCmd string, gcloudCmdArgs []string) (*GcloudConfig, error) {
out, err := exec.Command(gcloudCmd, gcloudCmdArgs...).Output()
if err != nil {
return nil, fmt.Errorf("Could not retrieve gcloud configuration")
}
var gcloudConfig GcloudConfig
if err := json.Unmarshal(out, &gcloudConfig); err != nil {
return nil, fmt.Errorf("Could not parse gcloud configuration")
}
return &gcloudConfig, nil
}
// SetFromGcloud retrieves and sets any missing config values from the gcloud
// configuration if possible possible
func (c *Config) SetFromGcloud() error {
if c.Creds == "" {
c.Creds = os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
if c.Creds == "" {
log.Printf("-creds flag unset, will use gcloud credential")
}
} else {
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", c.Creds)
}
if c.Project == "" {
log.Printf("-project flag unset, will use gcloud active project")
}
if c.Creds != "" && c.Project != "" {
return nil
}
gcloudCmd := "gcloud"
if runtime.GOOS == "windows" {
gcloudCmd = gcloudCmd + ".cmd"
}
gcloudCmdArgs := []string{"config", "config-helper",
"--format=json(configuration.properties.core.project,credential)"}
gcloudConfig, err := LoadGcloudConfig(gcloudCmd, gcloudCmdArgs)
if err != nil {
return err
}
if c.Project == "" && gcloudConfig.Configuration.Properties.Core.Project != "" {
log.Printf("gcloud active project is \"%s\"",
gcloudConfig.Configuration.Properties.Core.Project)
c.Project = gcloudConfig.Configuration.Properties.Core.Project
}
if c.Creds == "" {
c.TokenSource = oauth2.ReuseTokenSource(
gcloudConfig.Credential.Token(),
&GcloudCmdTokenSource{Command: gcloudCmd, Args: gcloudCmdArgs})
}
return nil
}
| [
"\"HOME\"",
"\"GOOGLE_APPLICATION_CREDENTIALS\""
]
| []
| [
"HOME",
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["HOME", "GOOGLE_APPLICATION_CREDENTIALS"] | go | 2 | 0 | |
gateway/logger/log_option.go | package logger
import (
"net/http"
"os"
"strings"
"time"
)
type LogOption func(record *[]string, key, path string, r *http.Request)
func WithTime() LogOption {
return func(record *[]string, key, path string, r *http.Request) {
*record = append(*record, time.Now().Format(time.RFC3339))
}
}
func WithKey() LogOption {
return func(record *[]string, key, path string, r *http.Request) {
*record = append(*record, key)
}
}
func WithPath() LogOption {
return func(record *[]string, key, path string, r *http.Request) {
*record = append(*record, path)
}
}
func HeaderElement(name string) LogOption {
return func(record *[]string, key, path string, r *http.Request) {
*record = append(*record, r.Header.Get(name))
}
}
var logOptionPattern = DefaultLogPattern()
func DefaultLogPattern() []LogOption {
// check log pattern
ptn := os.Getenv("LOG_PATTERN")
if ptn == "" {
ptn = "time,key,path" // default
}
// get column name and set function to write log
var pattern []LogOption
for _, value := range strings.Split(ptn, ",") {
switch value {
case "time":
pattern = append(pattern, WithTime())
case "key":
pattern = append(pattern, WithKey())
case "path":
pattern = append(pattern, WithPath())
default:
pattern = append(pattern, HeaderElement(value))
}
}
return pattern
}
| [
"\"LOG_PATTERN\""
]
| []
| [
"LOG_PATTERN"
]
| [] | ["LOG_PATTERN"] | go | 1 | 0 | |
recognition/label_image.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 1 22:06:48 2018
@author: kanchana
"""
import os, sys
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# change this as you see fit
image_path = sys.argv[1]
# Read in the image_data
image_data = tf.gfile.FastGFile(image_path, 'rb').read()
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("retrained_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
print('%s (score = %.5f)' % (human_string, score))
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
tests/unit/gapic/compute_v1/test_instance_templates.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.instance_templates import InstanceTemplatesClient
from google.cloud.compute_v1.services.instance_templates import pagers
from google.cloud.compute_v1.services.instance_templates import transports
from google.cloud.compute_v1.services.instance_templates.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert InstanceTemplatesClient._get_default_mtls_endpoint(None) is None
assert (
InstanceTemplatesClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
InstanceTemplatesClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
InstanceTemplatesClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
InstanceTemplatesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
InstanceTemplatesClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize("client_class", [InstanceTemplatesClient,])
def test_instance_templates_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
@pytest.mark.parametrize("client_class", [InstanceTemplatesClient,])
def test_instance_templates_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
def test_instance_templates_client_get_transport_class():
transport = InstanceTemplatesClient.get_transport_class()
available_transports = [
transports.InstanceTemplatesRestTransport,
]
assert transport in available_transports
transport = InstanceTemplatesClient.get_transport_class("rest")
assert transport == transports.InstanceTemplatesRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(InstanceTemplatesClient, transports.InstanceTemplatesRestTransport, "rest"),],
)
@mock.patch.object(
InstanceTemplatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(InstanceTemplatesClient),
)
def test_instance_templates_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(InstanceTemplatesClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(InstanceTemplatesClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
InstanceTemplatesClient,
transports.InstanceTemplatesRestTransport,
"rest",
"true",
),
(
InstanceTemplatesClient,
transports.InstanceTemplatesRestTransport,
"rest",
"false",
),
],
)
@mock.patch.object(
InstanceTemplatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(InstanceTemplatesClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_instance_templates_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(InstanceTemplatesClient, transports.InstanceTemplatesRestTransport, "rest"),],
)
def test_instance_templates_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(InstanceTemplatesClient, transports.InstanceTemplatesRestTransport, "rest"),],
)
def test_instance_templates_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_delete_rest(
transport: str = "rest", request_type=compute.DeleteInstanceTemplateRequest
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_delete_rest_from_dict():
test_delete_rest(request_type=dict)
def test_delete_rest_flattened():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete(
project="project_value", instance_template="instance_template_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "instance_template_value" in http_call[1] + str(body)
def test_delete_rest_flattened_error():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete(
compute.DeleteInstanceTemplateRequest(),
project="project_value",
instance_template="instance_template_value",
)
def test_get_rest(
transport: str = "rest", request_type=compute.GetInstanceTemplateRequest
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.InstanceTemplate(
creation_timestamp="creation_timestamp_value",
description="description_value",
id="id_value",
kind="kind_value",
name="name_value",
properties=compute.InstanceProperties(
advanced_machine_features=compute.AdvancedMachineFeatures(
enable_nested_virtualization=True
)
),
self_link="self_link_value",
source_instance="source_instance_value",
source_instance_params=compute.SourceInstanceParams(
disk_configs=[compute.DiskInstantiationConfig(auto_delete=True)]
),
)
# Wrap the value into a proper Response obj
json_return_value = compute.InstanceTemplate.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.InstanceTemplate)
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.properties == compute.InstanceProperties(
advanced_machine_features=compute.AdvancedMachineFeatures(
enable_nested_virtualization=True
)
)
assert response.self_link == "self_link_value"
assert response.source_instance == "source_instance_value"
assert response.source_instance_params == compute.SourceInstanceParams(
disk_configs=[compute.DiskInstantiationConfig(auto_delete=True)]
)
def test_get_rest_from_dict():
test_get_rest(request_type=dict)
def test_get_rest_flattened():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.InstanceTemplate()
# Wrap the value into a proper Response obj
json_return_value = compute.InstanceTemplate.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get(
project="project_value", instance_template="instance_template_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "instance_template_value" in http_call[1] + str(body)
def test_get_rest_flattened_error():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetInstanceTemplateRequest(),
project="project_value",
instance_template="instance_template_value",
)
def test_get_iam_policy_rest(
transport: str = "rest", request_type=compute.GetIamPolicyInstanceTemplateRequest
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy(
audit_configs=[
compute.AuditConfig(
audit_log_configs=[
compute.AuditLogConfig(
exempted_members=["exempted_members_value"]
)
]
)
],
bindings=[compute.Binding(binding_id="binding_id_value")],
etag="etag_value",
iam_owned=True,
rules=[compute.Rule(action=compute.Rule.Action.ALLOW)],
version=774,
)
# Wrap the value into a proper Response obj
json_return_value = compute.Policy.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get_iam_policy(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Policy)
assert response.audit_configs == [
compute.AuditConfig(
audit_log_configs=[
compute.AuditLogConfig(exempted_members=["exempted_members_value"])
]
)
]
assert response.bindings == [compute.Binding(binding_id="binding_id_value")]
assert response.etag == "etag_value"
assert response.iam_owned is True
assert response.rules == [compute.Rule(action=compute.Rule.Action.ALLOW)]
assert response.version == 774
def test_get_iam_policy_rest_from_dict():
test_get_iam_policy_rest(request_type=dict)
def test_get_iam_policy_rest_flattened():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# Wrap the value into a proper Response obj
json_return_value = compute.Policy.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_iam_policy(
project="project_value", resource="resource_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "resource_value" in http_call[1] + str(body)
def test_get_iam_policy_rest_flattened_error():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_iam_policy(
compute.GetIamPolicyInstanceTemplateRequest(),
project="project_value",
resource="resource_value",
)
def test_insert_rest(
transport: str = "rest", request_type=compute.InsertInstanceTemplateRequest
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_insert_rest_from_dict():
test_insert_rest(request_type=dict)
def test_insert_rest_flattened():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
instance_template_resource = compute.InstanceTemplate(
creation_timestamp="creation_timestamp_value"
)
client.insert(
project="project_value",
instance_template_resource=instance_template_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert compute.InstanceTemplate.to_json(
instance_template_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body)
def test_insert_rest_flattened_error():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert(
compute.InsertInstanceTemplateRequest(),
project="project_value",
instance_template_resource=compute.InstanceTemplate(
creation_timestamp="creation_timestamp_value"
),
)
def test_list_rest(
transport: str = "rest", request_type=compute.ListInstanceTemplatesRequest
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.InstanceTemplateList(
id="id_value",
items=[
compute.InstanceTemplate(creation_timestamp="creation_timestamp_value")
],
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
warning=compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED),
)
# Wrap the value into a proper Response obj
json_return_value = compute.InstanceTemplateList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.items == [
compute.InstanceTemplate(creation_timestamp="creation_timestamp_value")
]
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
assert response.warning == compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED)
def test_list_rest_from_dict():
test_list_rest(request_type=dict)
def test_list_rest_flattened():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.InstanceTemplateList()
# Wrap the value into a proper Response obj
json_return_value = compute.InstanceTemplateList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list(project="project_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
def test_list_rest_flattened_error():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListInstanceTemplatesRequest(), project="project_value",
)
def test_list_pager():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Set the response as a series of pages
response = (
compute.InstanceTemplateList(
items=[
compute.InstanceTemplate(),
compute.InstanceTemplate(),
compute.InstanceTemplate(),
],
next_page_token="abc",
),
compute.InstanceTemplateList(items=[], next_page_token="def",),
compute.InstanceTemplateList(
items=[compute.InstanceTemplate(),], next_page_token="ghi",
),
compute.InstanceTemplateList(
items=[compute.InstanceTemplate(), compute.InstanceTemplate(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.InstanceTemplateList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
metadata = ()
pager = client.list(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.InstanceTemplate) for i in results)
pages = list(client.list(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_set_iam_policy_rest(
transport: str = "rest", request_type=compute.SetIamPolicyInstanceTemplateRequest
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy(
audit_configs=[
compute.AuditConfig(
audit_log_configs=[
compute.AuditLogConfig(
exempted_members=["exempted_members_value"]
)
]
)
],
bindings=[compute.Binding(binding_id="binding_id_value")],
etag="etag_value",
iam_owned=True,
rules=[compute.Rule(action=compute.Rule.Action.ALLOW)],
version=774,
)
# Wrap the value into a proper Response obj
json_return_value = compute.Policy.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.set_iam_policy(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Policy)
assert response.audit_configs == [
compute.AuditConfig(
audit_log_configs=[
compute.AuditLogConfig(exempted_members=["exempted_members_value"])
]
)
]
assert response.bindings == [compute.Binding(binding_id="binding_id_value")]
assert response.etag == "etag_value"
assert response.iam_owned is True
assert response.rules == [compute.Rule(action=compute.Rule.Action.ALLOW)]
assert response.version == 774
def test_set_iam_policy_rest_from_dict():
test_set_iam_policy_rest(request_type=dict)
def test_set_iam_policy_rest_flattened():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# Wrap the value into a proper Response obj
json_return_value = compute.Policy.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
global_set_policy_request_resource = compute.GlobalSetPolicyRequest(
bindings=[compute.Binding(binding_id="binding_id_value")]
)
client.set_iam_policy(
project="project_value",
resource="resource_value",
global_set_policy_request_resource=global_set_policy_request_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "resource_value" in http_call[1] + str(body)
assert compute.GlobalSetPolicyRequest.to_json(
global_set_policy_request_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body)
def test_set_iam_policy_rest_flattened_error():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_iam_policy(
compute.SetIamPolicyInstanceTemplateRequest(),
project="project_value",
resource="resource_value",
global_set_policy_request_resource=compute.GlobalSetPolicyRequest(
bindings=[compute.Binding(binding_id="binding_id_value")]
),
)
def test_test_iam_permissions_rest(
transport: str = "rest",
request_type=compute.TestIamPermissionsInstanceTemplateRequest,
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse(
permissions=["permissions_value"],
)
# Wrap the value into a proper Response obj
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.test_iam_permissions(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.TestPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_rest_from_dict():
test_test_iam_permissions_rest(request_type=dict)
def test_test_iam_permissions_rest_flattened():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse()
# Wrap the value into a proper Response obj
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
test_permissions_request_resource = compute.TestPermissionsRequest(
permissions=["permissions_value"]
)
client.test_iam_permissions(
project="project_value",
resource="resource_value",
test_permissions_request_resource=test_permissions_request_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "resource_value" in http_call[1] + str(body)
assert compute.TestPermissionsRequest.to_json(
test_permissions_request_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body)
def test_test_iam_permissions_rest_flattened_error():
client = InstanceTemplatesClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.test_iam_permissions(
compute.TestIamPermissionsInstanceTemplateRequest(),
project="project_value",
resource="resource_value",
test_permissions_request_resource=compute.TestPermissionsRequest(
permissions=["permissions_value"]
),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = InstanceTemplatesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = InstanceTemplatesClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = InstanceTemplatesClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize(
"transport_class", [transports.InstanceTemplatesRestTransport,]
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_instance_templates_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.InstanceTemplatesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_instance_templates_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.instance_templates.transports.InstanceTemplatesTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.InstanceTemplatesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"delete",
"get",
"get_iam_policy",
"insert",
"list",
"set_iam_policy",
"test_iam_permissions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_instance_templates_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.instance_templates.transports.InstanceTemplatesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.InstanceTemplatesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_instance_templates_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.instance_templates.transports.InstanceTemplatesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.InstanceTemplatesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_instance_templates_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.instance_templates.transports.InstanceTemplatesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.InstanceTemplatesTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_instance_templates_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
InstanceTemplatesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_instance_templates_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
InstanceTemplatesClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_instance_templates_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.InstanceTemplatesRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
def test_instance_templates_host_no_port():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
)
assert client.transport._host == "compute.googleapis.com:443"
def test_instance_templates_host_with_port():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
)
assert client.transport._host == "compute.googleapis.com:8000"
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = InstanceTemplatesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = InstanceTemplatesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = InstanceTemplatesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = InstanceTemplatesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = InstanceTemplatesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = InstanceTemplatesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = InstanceTemplatesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = InstanceTemplatesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = InstanceTemplatesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = InstanceTemplatesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = InstanceTemplatesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = InstanceTemplatesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = InstanceTemplatesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = InstanceTemplatesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = InstanceTemplatesClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.InstanceTemplatesTransport, "_prep_wrapped_messages"
) as prep:
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.InstanceTemplatesTransport, "_prep_wrapped_messages"
) as prep:
transport_class = InstanceTemplatesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
controller/controller.go | package controller
import (
"bytes"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"strings"
"time"
"github.com/TwinProduction/gatus/config"
"github.com/TwinProduction/gatus/security"
"github.com/TwinProduction/gatus/storage"
"github.com/TwinProduction/gocache"
"github.com/TwinProduction/health"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
cacheTTL = 10 * time.Second
)
var (
cache = gocache.NewCache().WithMaxSize(100).WithEvictionPolicy(gocache.FirstInFirstOut)
// staticFolder is the path to the location of the static folder from the root path of the project
// The only reason this is exposed is to allow running tests from a different path than the root path of the project
staticFolder = "./web/static"
// server is the http.Server created by Handle.
// The only reason it exists is for testing purposes.
server *http.Server
)
func init() {
if err := cache.StartJanitor(); err != nil {
log.Fatal("[controller][init] Failed to start cache janitor:", err.Error())
}
}
// Handle creates the router and starts the server
func Handle(securityConfig *security.Config, webConfig *config.WebConfig, enableMetrics bool) {
var router http.Handler = CreateRouter(securityConfig, enableMetrics)
if os.Getenv("ENVIRONMENT") == "dev" {
router = developmentCorsHandler(router)
}
server = &http.Server{
Addr: fmt.Sprintf("%s:%d", webConfig.Address, webConfig.Port),
Handler: router,
ReadTimeout: 15 * time.Second,
WriteTimeout: 15 * time.Second,
IdleTimeout: 15 * time.Second,
}
log.Println("[controller][Handle] Listening on " + webConfig.SocketAddress())
if os.Getenv("ROUTER_TEST") == "true" {
return
}
log.Println("[controller][Handle]", server.ListenAndServe())
}
// Shutdown stops the server
func Shutdown() {
if server != nil {
_ = server.Shutdown(context.TODO())
server = nil
}
}
// CreateRouter creates the router for the http server
func CreateRouter(securityConfig *security.Config, enabledMetrics bool) *mux.Router {
router := mux.NewRouter()
if enabledMetrics {
router.Handle("/metrics", promhttp.Handler()).Methods("GET")
}
router.Handle("/health", health.Handler().WithJSON(true)).Methods("GET")
router.HandleFunc("/favicon.ico", favIconHandler).Methods("GET")
router.HandleFunc("/api/v1/statuses", secureIfNecessary(securityConfig, serviceStatusesHandler)).Methods("GET") // No GzipHandler for this one, because we cache the content
router.HandleFunc("/api/v1/statuses/{key}", secureIfNecessary(securityConfig, GzipHandlerFunc(serviceStatusHandler))).Methods("GET")
router.HandleFunc("/api/v1/badges/uptime/{duration}/{identifier}", badgeHandler).Methods("GET")
// SPA
router.HandleFunc("/services/{service}", spaHandler).Methods("GET")
// Everything else falls back on static content
router.PathPrefix("/monitor").Handler(GzipHandler(http.FileServer(http.Dir(staticFolder))))
return router
}
func secureIfNecessary(securityConfig *security.Config, handler http.HandlerFunc) http.HandlerFunc {
if securityConfig != nil && securityConfig.IsValid() {
return security.Handler(handler, securityConfig)
}
return handler
}
// serviceStatusesHandler handles requests to retrieve all service statuses
// Due to the size of the response, this function leverages a cache.
// Must not be wrapped by GzipHandler
func serviceStatusesHandler(writer http.ResponseWriter, r *http.Request) {
page, pageSize := extractPageAndPageSizeFromRequest(r)
gzipped := strings.Contains(r.Header.Get("Accept-Encoding"), "gzip")
var exists bool
var value interface{}
if gzipped {
writer.Header().Set("Content-Encoding", "gzip")
value, exists = cache.Get(fmt.Sprintf("service-status-%d-%d-gzipped", page, pageSize))
} else {
value, exists = cache.Get(fmt.Sprintf("service-status-%d-%d", page, pageSize))
}
var data []byte
if !exists {
var err error
buffer := &bytes.Buffer{}
gzipWriter := gzip.NewWriter(buffer)
data, err = json.Marshal(storage.Get().GetAllServiceStatusesWithResultPagination(page, pageSize))
if err != nil {
log.Printf("[controller][serviceStatusesHandler] Unable to marshal object to JSON: %s", err.Error())
writer.WriteHeader(http.StatusInternalServerError)
_, _ = writer.Write([]byte("Unable to marshal object to JSON"))
return
}
_, _ = gzipWriter.Write(data)
_ = gzipWriter.Close()
gzippedData := buffer.Bytes()
cache.SetWithTTL(fmt.Sprintf("service-status-%d-%d", page, pageSize), data, cacheTTL)
cache.SetWithTTL(fmt.Sprintf("service-status-%d-%d-gzipped", page, pageSize), gzippedData, cacheTTL)
if gzipped {
data = gzippedData
}
} else {
data = value.([]byte)
}
writer.Header().Add("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
_, _ = writer.Write(data)
}
// serviceStatusHandler retrieves a single ServiceStatus by group name and service name
func serviceStatusHandler(writer http.ResponseWriter, r *http.Request) {
page, pageSize := extractPageAndPageSizeFromRequest(r)
vars := mux.Vars(r)
serviceStatus := storage.Get().GetServiceStatusByKey(vars["key"])
if serviceStatus == nil {
log.Printf("[controller][serviceStatusHandler] Service with key=%s not found", vars["key"])
writer.WriteHeader(http.StatusNotFound)
_, _ = writer.Write([]byte("not found"))
return
}
data := map[string]interface{}{
"serviceStatus": serviceStatus.WithResultPagination(page, pageSize),
// The following fields, while present on core.ServiceStatus, are annotated to remain hidden so that we can
// expose only the necessary data on /api/v1/statuses.
// Since the /api/v1/statuses/{key} endpoint does need this data, however, we explicitly expose it here
"events": serviceStatus.Events,
"uptime": serviceStatus.Uptime,
}
output, err := json.Marshal(data)
if err != nil {
log.Printf("[controller][serviceStatusHandler] Unable to marshal object to JSON: %s", err.Error())
writer.WriteHeader(http.StatusInternalServerError)
_, _ = writer.Write([]byte("unable to marshal object to JSON"))
return
}
writer.Header().Add("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
_, _ = writer.Write(output)
}
| [
"\"ENVIRONMENT\"",
"\"ROUTER_TEST\""
]
| []
| [
"ENVIRONMENT",
"ROUTER_TEST"
]
| [] | ["ENVIRONMENT", "ROUTER_TEST"] | go | 2 | 0 | |
internal/utils/k8s.go | package utils
import (
"context"
"fmt"
"io/ioutil"
"os"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
authorization "k8s.io/client-go/kubernetes/typed/authorization/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
// Namespace can retrieve current namespace
func Namespace() (string, error) {
nsPath := "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
if FileExists(nsPath) {
// Running in k8s cluster
nsBytes, err := ioutil.ReadFile(nsPath)
if err != nil {
return "", fmt.Errorf("could not read file %s", nsPath)
}
return string(nsBytes), nil
}
// Not running in k8s cluster (may be running locally)
ns := os.Getenv("NAMESPACE")
if ns == "" {
ns = "cd-system"
}
return ns, nil
}
// AuthClient is a K8s client for Authorization
func AuthClient() (*authorization.AuthorizationV1Client, error) {
cfg, err := config.GetConfig()
if err != nil {
return nil, err
}
c, err := authorization.NewForConfig(cfg)
if err != nil {
return nil, err
}
return c, nil
}
// Client is a k8s client
func Client(scheme *runtime.Scheme) (client.Client, error) {
cfg, err := config.GetConfig()
if err != nil {
return nil, err
}
c, err := client.New(cfg, client.Options{Scheme: scheme})
if err != nil {
return nil, err
}
return c, nil
}
// CreateOrPatchObject patches the object if it exists, and creates one if not
func CreateOrPatchObject(obj, original, parent client.Object, cli client.Client, scheme *runtime.Scheme) error {
if obj == nil {
return fmt.Errorf("obj cannot be nil")
}
objMeta, objOk := obj.(metav1.Object)
if !objOk {
return fmt.Errorf("obj cannot be casted to metav1.Object")
}
if parent != nil {
parentMeta, parentOk := parent.(metav1.Object)
if !parentOk {
return fmt.Errorf("parent cannot be casted to metav1.Object")
}
if err := controllerutil.SetControllerReference(parentMeta, objMeta, scheme); err != nil {
return err
}
}
// Update if resourceVersion exists, but create if not
if objMeta.GetResourceVersion() != "" {
if original == nil {
return fmt.Errorf("original object exists but not passed")
}
p := client.MergeFrom(original)
if err := cli.Patch(context.Background(), obj, p); err != nil {
return err
}
} else {
if err := cli.Create(context.Background(), obj); err != nil {
return err
}
}
return nil
}
| [
"\"NAMESPACE\""
]
| []
| [
"NAMESPACE"
]
| [] | ["NAMESPACE"] | go | 1 | 0 | |
src/bot/main.go | package main
import (
"fmt"
"math/rand"
"os"
"time"
"strings"
slackbot "github.com/BeepBoopHQ/go-slackbot"
"github.com/nlopes/slack"
"golang.org/x/net/context"
)
const (
WithTyping = slackbot.WithTyping
WithoutTyping = slackbot.WithoutTyping
HelpText = "I will respond to the following messages: \n" +
"`bot hi` for a simple message.\n" +
"`bot attachment` to see a Slack attachment message.\n" +
"`hey @<your bot's name>` to demonstrate detecting a mention.\n" +
"`bot help` to see this again."
)
var greetingPrefixes = []string{"Hi", "Hello", "Howdy", "Wazzzup", "Hey"}
func main() {
bot := slackbot.New(os.Getenv("SLACK_TOKEN"))
toMe := bot.Messages(slackbot.DirectMessage, slackbot.DirectMention).Subrouter()
hi := "hi|hello|bot hi|bot hello"
xkcd := "xkcd"
bot.Hear(xkcd).MessageHandler(XkcdHandler)
toMe.Hear(hi).MessageHandler(HelloHandler)
bot.Hear(hi).MessageHandler(HelloHandler)
bot.Hear("help|bot help").MessageHandler(HelpHandler)
bot.Hear("attachment|bot attachment").MessageHandler(AttachmentsHandler)
bot.Hear(`<@([a-zA-z0-9]+)?>`).MessageHandler(MentionHandler)
bot.Hear("(bot ).*").MessageHandler(CatchAllHandler)
bot.Run()
}
func XkcdHandler(ctx context.Context, bot *slackbot.Bot, evt *slack.MessageEvent) {
msg := strings.Replace(Xkcd(), "\\", "", -1)
bot.Reply(evt, msg, WithTyping)
}
func HelloHandler(ctx context.Context, bot *slackbot.Bot, evt *slack.MessageEvent) {
rand.Seed(time.Now().UnixNano())
msg := greetingPrefixes[rand.Intn(len(greetingPrefixes))] + " <@" + evt.User + ">!"
bot.Reply(evt, msg, WithTyping)
if slackbot.IsDirectMessage(evt) {
dmMsg := "It's nice to talk to you directly."
bot.Reply(evt, dmMsg, WithoutTyping)
}
bot.Reply(evt, "If you'd like to talk some more, "+HelpText, WithTyping)
}
func CatchAllHandler(ctx context.Context, bot *slackbot.Bot, evt *slack.MessageEvent) {
msg := fmt.Sprintf("I'm sorry, I don't know how to: `%s`.\n%s", evt.Text, HelpText)
bot.Reply(evt, msg, WithTyping)
}
func MentionHandler(ctx context.Context, bot *slackbot.Bot, evt *slack.MessageEvent) {
if slackbot.IsMentioned(evt, bot.BotUserID()) {
bot.Reply(evt, "You really do care about me. :heart:", WithTyping)
}
}
func HelpHandler(ctx context.Context, bot *slackbot.Bot, evt *slack.MessageEvent) {
bot.Reply(evt, HelpText, WithTyping)
}
func AttachmentsHandler(ctx context.Context, bot *slackbot.Bot, evt *slack.MessageEvent) {
txt := "Beep Beep Boop is a ridiculously simple hosting platform for your Slackbots."
attachment := slack.Attachment{
Pretext: "We bring bots to life. :sunglasses: :thumbsup:",
Title: "Host, deploy and share your bot in seconds.",
TitleLink: "https://beepboophq.com/",
Text: txt,
Fallback: txt,
ImageURL: "https://storage.googleapis.com/beepboophq/_assets/bot-1.22f6fb.png",
Color: "#7CD197",
}
// supports multiple attachments
attachments := []slack.Attachment{attachment}
bot.ReplyWithAttachments(evt, attachments, WithTyping)
}
| [
"\"SLACK_TOKEN\""
]
| []
| [
"SLACK_TOKEN"
]
| [] | ["SLACK_TOKEN"] | go | 1 | 0 | |
gluon/tools.py | #!/bin/python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <[email protected]>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Auth, Mail, PluginManager and various utilities
------------------------------------------------
"""
import base64
from functools import reduce
from gluon._compat import pickle, thread, urllib2, Cookie, StringIO, urlencode
from gluon._compat import configparser, MIMEBase, MIMEMultipart, MIMEText, Header
from gluon._compat import Encoders, Charset, long, urllib_quote, iteritems
from gluon._compat import to_bytes, to_native, add_charset
from gluon._compat import charset_QP, basestring, unicodeT, to_unicode
import datetime
import logging
import sys
import glob
import os
import re
import time
import fnmatch
import traceback
import smtplib
import email.utils
import random
import hmac
import hashlib
import json
from email import message_from_string
from gluon.authapi import AuthAPI
from gluon.contenttype import contenttype
from gluon.storage import Storage, StorageList, Settings, Messages
from gluon.utils import web2py_uuid, compare
from gluon.fileutils import read_file, check_credentials
from gluon import *
from gluon.contrib.autolinks import expand_one
from gluon.contrib.markmin.markmin2html import replace_at_urls
from gluon.contrib.markmin.markmin2html import replace_autolinks
from gluon.contrib.markmin.markmin2html import replace_components
from pydal.objects import Row, Set, Query
import gluon.serializers as serializers
Table = DAL.Table
Field = DAL.Field
__all__ = ['Mail', 'Auth', 'Recaptcha2', 'Crud', 'Service', 'Wiki',
'PluginManager', 'fetch', 'geocode', 'reverse_geocode', 'prettydate']
# mind there are two loggers here (logger and crud.settings.logger)!
logger = logging.getLogger("web2py")
DEFAULT = lambda: None
def getarg(position, default=None):
args = current.request.args
if position < 0 and len(args) >= -position:
return args[position]
elif position >= 0 and len(args) > position:
return args[position]
else:
return default
def callback(actions, form, tablename=None):
if actions:
if tablename and isinstance(actions, dict):
actions = actions.get(tablename, [])
if not isinstance(actions, (list, tuple)):
actions = [actions]
[action(form) for action in actions]
def validators(*a):
b = []
for item in a:
if isinstance(item, (list, tuple)):
b = b + list(item)
else:
b.append(item)
return b
def call_or_redirect(f, *args):
if callable(f):
redirect(f(*args))
else:
redirect(f)
def replace_id(url, form):
if url:
url = url.replace('[id]', str(form.vars.id))
if url[0] == '/' or url[:4] == 'http':
return url
return URL(url)
class Mail(object):
"""
Class for configuring and sending emails with alternative text / html
body, multiple attachments and encryption support
Works with SMTP and Google App Engine.
Args:
server: SMTP server address in address:port notation
sender: sender email address
login: sender login name and password in login:password notation
or None if no authentication is required
tls: enables/disables encryption (True by default)
In Google App Engine use ::
server='gae'
For sake of backward compatibility all fields are optional and default
to None, however, to be able to send emails at least server and sender
must be specified. They are available under following fields::
mail.settings.server
mail.settings.sender
mail.settings.login
mail.settings.timeout = 60 # seconds (default)
When server is 'logging', email is logged but not sent (debug mode)
Optionally you can use PGP encryption or X509::
mail.settings.cipher_type = None
mail.settings.gpg_home = None
mail.settings.sign = True
mail.settings.sign_passphrase = None
mail.settings.encrypt = True
mail.settings.x509_sign_keyfile = None
mail.settings.x509_sign_certfile = None
mail.settings.x509_sign_chainfile = None
mail.settings.x509_nocerts = False
mail.settings.x509_crypt_certfiles = None
cipher_type : None
gpg - need a python-pyme package and gpgme lib
x509 - smime
gpg_home : you can set a GNUPGHOME environment variable
to specify home of gnupg
sign : sign the message (True or False)
sign_passphrase : passphrase for key signing
encrypt : encrypt the message (True or False). It defaults
to True
... x509 only ...
x509_sign_keyfile : the signers private key filename or
string containing the key. (PEM format)
x509_sign_certfile: the signers certificate filename or
string containing the cert. (PEM format)
x509_sign_chainfile: sets the optional all-in-one file where you
can assemble the certificates of Certification
Authorities (CA) which form the certificate
chain of email certificate. It can be a
string containing the certs to. (PEM format)
x509_nocerts : if True then no attached certificate in mail
x509_crypt_certfiles: the certificates file or strings to encrypt
the messages with can be a file name /
string or a list of file names /
strings (PEM format)
Examples:
Create Mail object with authentication data for remote server::
mail = Mail('example.com:25', '[email protected]', 'me:password')
Notice for GAE users:
attachments have an automatic content_id='attachment-i' where i is progressive number
in this way the can be referenced from the HTML as <img src="cid:attachment-0" /> etc.
"""
class Attachment(MIMEBase):
"""
Email attachment
Args:
payload: path to file or file-like object with read() method
filename: name of the attachment stored in message; if set to
None, it will be fetched from payload path; file-like
object payload must have explicit filename specified
content_id: id of the attachment; automatically contained within
`<` and `>`
content_type: content type of the attachment; if set to None,
it will be fetched from filename using gluon.contenttype
module
encoding: encoding of all strings passed to this function (except
attachment body)
Content ID is used to identify attachments within the html body;
in example, attached image with content ID 'photo' may be used in
html message as a source of img tag `<img src="cid:photo" />`.
Example::
Create attachment from text file::
attachment = Mail.Attachment('/path/to/file.txt')
Content-Type: text/plain
MIME-Version: 1.0
Content-Disposition: attachment; filename="file.txt"
Content-Transfer-Encoding: base64
SOMEBASE64CONTENT=
Create attachment from image file with custom filename and cid::
attachment = Mail.Attachment('/path/to/file.png',
filename='photo.png',
content_id='photo')
Content-Type: image/png
MIME-Version: 1.0
Content-Disposition: attachment; filename="photo.png"
Content-Id: <photo>
Content-Transfer-Encoding: base64
SOMEOTHERBASE64CONTENT=
"""
def __init__(
self,
payload,
filename=None,
content_id=None,
content_type=None,
encoding='utf-8'):
if isinstance(payload, str):
if filename is None:
filename = os.path.basename(payload)
payload = read_file(payload, 'rb')
else:
if filename is None:
raise Exception('Missing attachment name')
payload = payload.read()
# FIXME PY3 can be used to_native?
filename = filename.encode(encoding)
if content_type is None:
content_type = contenttype(filename)
self.my_filename = filename
self.my_payload = payload
MIMEBase.__init__(self, *content_type.split('/', 1))
self.set_payload(payload)
self['Content-Disposition'] = 'attachment; filename="%s"' % to_native(filename, encoding)
if content_id is not None:
self['Content-Id'] = '<%s>' % to_native(content_id, encoding)
Encoders.encode_base64(self)
def __init__(self, server=None, sender=None, login=None, tls=True):
settings = self.settings = Settings()
settings.server = server
settings.sender = sender
settings.login = login
settings.tls = tls
settings.timeout = 5 # seconds
settings.hostname = None
settings.ssl = False
settings.cipher_type = None
settings.gpg_home = None
settings.sign = True
settings.sign_passphrase = None
settings.encrypt = True
settings.x509_sign_keyfile = None
settings.x509_sign_certfile = None
settings.x509_sign_chainfile = None
settings.x509_nocerts = False
settings.x509_crypt_certfiles = None
settings.debug = False
settings.lock_keys = True
self.result = {}
self.error = None
def send(self,
to,
subject='[no subject]',
message='[no message]',
attachments=None,
cc=None,
bcc=None,
reply_to=None,
sender=None,
encoding='utf-8',
raw=False,
headers={},
from_address=None,
cipher_type=None,
sign=None,
sign_passphrase=None,
encrypt=None,
x509_sign_keyfile=None,
x509_sign_chainfile=None,
x509_sign_certfile=None,
x509_crypt_certfiles=None,
x509_nocerts=None
):
"""
Sends an email using data specified in constructor
Args:
to: list or tuple of receiver addresses; will also accept single
object
subject: subject of the email
message: email body text; depends on type of passed object:
- if 2-list or 2-tuple is passed: first element will be
source of plain text while second of html text;
- otherwise: object will be the only source of plain text
and html source will be set to None
If text or html source is:
- None: content part will be ignored,
- string: content part will be set to it,
- file-like object: content part will be fetched from it using
it's read() method
attachments: list or tuple of Mail.Attachment objects; will also
accept single object
cc: list or tuple of carbon copy receiver addresses; will also
accept single object
bcc: list or tuple of blind carbon copy receiver addresses; will
also accept single object
reply_to: address to which reply should be composed
encoding: encoding of all strings passed to this method (including
message bodies)
headers: dictionary of headers to refine the headers just before
sending mail, e.g. `{'X-Mailer' : 'web2py mailer'}`
from_address: address to appear in the 'From:' header, this is not
the envelope sender. If not specified the sender will be used
cipher_type :
gpg - need a python-pyme package and gpgme lib
x509 - smime
gpg_home : you can set a GNUPGHOME environment variable
to specify home of gnupg
sign : sign the message (True or False)
sign_passphrase : passphrase for key signing
encrypt : encrypt the message (True or False). It defaults to True.
... x509 only ...
x509_sign_keyfile : the signers private key filename or
string containing the key. (PEM format)
x509_sign_certfile: the signers certificate filename or
string containing the cert. (PEM format)
x509_sign_chainfile: sets the optional all-in-one file where you
can assemble the certificates of Certification
Authorities (CA) which form the certificate
chain of email certificate. It can be a
string containing the certs to. (PEM format)
x509_nocerts : if True then no attached certificate in mail
x509_crypt_certfiles: the certificates file or strings to encrypt
the messages with can be a file name / string or
a list of file names / strings (PEM format)
Examples:
Send plain text message to single address::
mail.send('[email protected]',
'Message subject',
'Plain text body of the message')
Send html message to single address::
mail.send('[email protected]',
'Message subject',
'<html>Plain text body of the message</html>')
Send text and html message to three addresses (two in cc)::
mail.send('[email protected]',
'Message subject',
('Plain text body', '<html>html body</html>'),
cc=['[email protected]', '[email protected]'])
Send html only message with image attachment available from the
message by 'photo' content id::
mail.send('[email protected]',
'Message subject',
(None, '<html><img src="cid:photo" /></html>'),
Mail.Attachment('/path/to/photo.jpg'
content_id='photo'))
Send email with two attachments and no body text::
mail.send('[email protected],
'Message subject',
None,
[Mail.Attachment('/path/to/fist.file'),
Mail.Attachment('/path/to/second.file')])
Returns:
True on success, False on failure.
Before return, method updates two object's fields:
- self.result: return value of smtplib.SMTP.sendmail() or GAE's
mail.send_mail() method
- self.error: Exception message or None if above was successful
"""
# We don't want to use base64 encoding for unicode mail
add_charset('utf-8', charset_QP, charset_QP, 'utf-8')
def encode_header(key):
if [c for c in key if 32 > ord(c) or ord(c) > 127]:
return Header(key.encode('utf-8'), 'utf-8')
else:
return key
# encoded or raw text
def encoded_or_raw(text):
if raw:
text = encode_header(text)
return text
sender = sender or self.settings.sender
if not isinstance(self.settings.server, str):
raise Exception('Server address not specified')
if not isinstance(sender, str):
raise Exception('Sender address not specified')
if not raw and attachments:
# Use multipart/mixed if there is attachments
payload_in = MIMEMultipart('mixed')
elif raw:
# no encoding configuration for raw messages
if not isinstance(message, basestring):
message = message.read()
if isinstance(message, unicodeT):
text = message.encode('utf-8')
elif not encoding == 'utf-8':
text = message.decode(encoding).encode('utf-8')
else:
text = message
# No charset passed to avoid transport encoding
# NOTE: some unicode encoded strings will produce
# unreadable mail contents.
payload_in = MIMEText(text)
if to:
if not isinstance(to, (list, tuple)):
to = [to]
else:
raise Exception('Target receiver address not specified')
if cc:
if not isinstance(cc, (list, tuple)):
cc = [cc]
if bcc:
if not isinstance(bcc, (list, tuple)):
bcc = [bcc]
if message is None:
text = html = None
elif isinstance(message, (list, tuple)):
text, html = message
elif message.strip().startswith('<html') and \
message.strip().endswith('</html>'):
text = self.settings.server == 'gae' and message or None
html = message
else:
text = message
html = None
if (text is not None or html is not None) and (not raw):
if text is not None:
if not isinstance(text, basestring):
text = text.read()
if isinstance(text, unicodeT):
text = text.encode('utf-8')
elif not encoding == 'utf-8':
text = text.decode(encoding).encode('utf-8')
if html is not None:
if not isinstance(html, basestring):
html = html.read()
if isinstance(html, unicodeT):
html = html.encode('utf-8')
elif not encoding == 'utf-8':
html = html.decode(encoding).encode('utf-8')
# Construct mime part only if needed
if text is not None and html:
# We have text and html we need multipart/alternative
attachment = MIMEMultipart('alternative')
attachment.attach(MIMEText(text, _charset='utf-8'))
attachment.attach(MIMEText(html, 'html', _charset='utf-8'))
elif text is not None:
attachment = MIMEText(text, _charset='utf-8')
elif html:
attachment = MIMEText(html, 'html', _charset='utf-8')
if attachments:
# If there is attachments put text and html into
# multipart/mixed
payload_in.attach(attachment)
else:
# No attachments no multipart/mixed
payload_in = attachment
if (attachments is None) or raw:
pass
elif isinstance(attachments, (list, tuple)):
for attachment in attachments:
payload_in.attach(attachment)
else:
payload_in.attach(attachments)
attachments = [attachments]
#######################################################
# CIPHER #
#######################################################
cipher_type = cipher_type or self.settings.cipher_type
sign = sign if sign is not None else self.settings.sign
sign_passphrase = sign_passphrase or self.settings.sign_passphrase
encrypt = encrypt if encrypt is not None else self.settings.encrypt
#######################################################
# GPGME #
#######################################################
if cipher_type == 'gpg':
if self.settings.gpg_home:
# Set GNUPGHOME environment variable to set home of gnupg
import os
os.environ['GNUPGHOME'] = self.settings.gpg_home
if not sign and not encrypt:
self.error = "No sign and no encrypt is set but cipher type to gpg"
return False
# need a python-pyme package and gpgme lib
from pyme import core, errors
from pyme.constants.sig import mode
############################################
# sign #
############################################
if sign:
import string
core.check_version(None)
pin = string.replace(payload_in.as_string(), '\n', '\r\n')
plain = core.Data(pin)
sig = core.Data()
c = core.Context()
c.set_armor(1)
c.signers_clear()
# search for signing key for From:
for sigkey in c.op_keylist_all(sender, 1):
if sigkey.can_sign:
c.signers_add(sigkey)
if not c.signers_enum(0):
self.error = 'No key for signing [%s]' % sender
return False
c.set_passphrase_cb(lambda x, y, z: sign_passphrase)
try:
# make a signature
c.op_sign(plain, sig, mode.DETACH)
sig.seek(0, 0)
# make it part of the email
payload = MIMEMultipart('signed',
boundary=None,
_subparts=None,
**dict(micalg="pgp-sha1",
protocol="application/pgp-signature"))
# insert the origin payload
payload.attach(payload_in)
# insert the detached signature
p = MIMEBase("application", 'pgp-signature')
p.set_payload(sig.read())
payload.attach(p)
# it's just a trick to handle the no encryption case
payload_in = payload
except errors.GPGMEError as ex:
self.error = "GPG error: %s" % ex.getstring()
return False
############################################
# encrypt #
############################################
if encrypt:
core.check_version(None)
plain = core.Data(payload_in.as_string())
cipher = core.Data()
c = core.Context()
c.set_armor(1)
# collect the public keys for encryption
recipients = []
rec = to[:]
if cc:
rec.extend(cc)
if bcc:
rec.extend(bcc)
for addr in rec:
c.op_keylist_start(addr, 0)
r = c.op_keylist_next()
if r is None:
self.error = 'No key for [%s]' % addr
return False
recipients.append(r)
try:
# make the encryption
c.op_encrypt(recipients, 1, plain, cipher)
cipher.seek(0, 0)
# make it a part of the email
payload = MIMEMultipart('encrypted',
boundary=None,
_subparts=None,
**dict(protocol="application/pgp-encrypted"))
p = MIMEBase("application", 'pgp-encrypted')
p.set_payload("Version: 1\r\n")
payload.attach(p)
p = MIMEBase("application", 'octet-stream')
p.set_payload(cipher.read())
payload.attach(p)
except errors.GPGMEError as ex:
self.error = "GPG error: %s" % ex.getstring()
return False
#######################################################
# X.509 #
#######################################################
elif cipher_type == 'x509':
if not sign and not encrypt:
self.error = "No sign and no encrypt is set but cipher type to x509"
return False
import os
x509_sign_keyfile = x509_sign_keyfile or self.settings.x509_sign_keyfile
x509_sign_chainfile = x509_sign_chainfile or self.settings.x509_sign_chainfile
x509_sign_certfile = x509_sign_certfile or self.settings.x509_sign_certfile or \
x509_sign_keyfile or self.settings.x509_sign_certfile
# crypt certfiles could be a string or a list
x509_crypt_certfiles = x509_crypt_certfiles or self.settings.x509_crypt_certfiles
x509_nocerts = x509_nocerts or\
self.settings.x509_nocerts
# need m2crypto
try:
from M2Crypto import BIO, SMIME, X509
except Exception as e:
self.error = "Can't load M2Crypto module"
return False
msg_bio = BIO.MemoryBuffer(payload_in.as_string())
s = SMIME.SMIME()
# SIGN
if sign:
# key for signing
try:
keyfile_bio = BIO.openfile(x509_sign_keyfile)\
if os.path.isfile(x509_sign_keyfile)\
else BIO.MemoryBuffer(x509_sign_keyfile)
sign_certfile_bio = BIO.openfile(x509_sign_certfile)\
if os.path.isfile(x509_sign_certfile)\
else BIO.MemoryBuffer(x509_sign_certfile)
s.load_key_bio(keyfile_bio, sign_certfile_bio,
callback=lambda x: sign_passphrase)
if x509_sign_chainfile:
sk = X509.X509_Stack()
chain = X509.load_cert(x509_sign_chainfile)\
if os.path.isfile(x509_sign_chainfile)\
else X509.load_cert_string(x509_sign_chainfile)
sk.push(chain)
s.set_x509_stack(sk)
except Exception as e:
self.error = "Something went wrong on certificate / private key loading: <%s>" % str(e)
return False
try:
if x509_nocerts:
flags = SMIME.PKCS7_NOCERTS
else:
flags = 0
if not encrypt:
flags += SMIME.PKCS7_DETACHED
p7 = s.sign(msg_bio, flags=flags)
msg_bio = BIO.MemoryBuffer(payload_in.as_string(
)) # Recreate coz sign() has consumed it.
except Exception as e:
self.error = "Something went wrong on signing: <%s> %s" % (
str(e), str(flags))
return False
# ENCRYPT
if encrypt:
try:
sk = X509.X509_Stack()
if not isinstance(x509_crypt_certfiles, (list, tuple)):
x509_crypt_certfiles = [x509_crypt_certfiles]
# make an encryption cert's stack
for crypt_certfile in x509_crypt_certfiles:
certfile = X509.load_cert(crypt_certfile)\
if os.path.isfile(crypt_certfile)\
else X509.load_cert_string(crypt_certfile)
sk.push(certfile)
s.set_x509_stack(sk)
s.set_cipher(SMIME.Cipher('des_ede3_cbc'))
tmp_bio = BIO.MemoryBuffer()
if sign:
s.write(tmp_bio, p7)
else:
tmp_bio.write(payload_in.as_string())
p7 = s.encrypt(tmp_bio)
except Exception as e:
self.error = "Something went wrong on encrypting: <%s>" % str(e)
return False
# Final stage in sign and encryption
out = BIO.MemoryBuffer()
if encrypt:
s.write(out, p7)
else:
if sign:
s.write(out, p7, msg_bio, SMIME.PKCS7_DETACHED)
else:
out.write('\r\n')
out.write(payload_in.as_string())
out.close()
st = str(out.read())
payload = message_from_string(st)
else:
# no cryptography process as usual
payload = payload_in
if from_address:
payload['From'] = encoded_or_raw(to_unicode(from_address, encoding))
else:
payload['From'] = encoded_or_raw(to_unicode(sender, encoding))
origTo = to[:]
if to:
payload['To'] = encoded_or_raw(to_unicode(', '.join(to), encoding))
if reply_to:
payload['Reply-To'] = encoded_or_raw(to_unicode(reply_to, encoding))
if cc:
payload['Cc'] = encoded_or_raw(to_unicode(', '.join(cc), encoding))
to.extend(cc)
if bcc:
to.extend(bcc)
payload['Subject'] = encoded_or_raw(to_unicode(subject, encoding))
payload['Date'] = email.utils.formatdate()
for k, v in iteritems(headers):
payload[k] = encoded_or_raw(to_unicode(v, encoding))
result = {}
try:
if self.settings.server == 'logging':
entry = 'email not sent\n%s\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n%s\n' % \
('-' * 40, sender, ', '.join(to), subject, text or html, '-' * 40)
logger.warning(entry)
elif self.settings.server.startswith('logging:'):
entry = 'email not sent\n%s\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n%s\n' % \
('-' * 40, sender, ', '.join(to), subject, text or html, '-' * 40)
open(self.settings.server[8:], 'a').write(entry)
elif self.settings.server == 'gae':
xcc = dict()
if cc:
xcc['cc'] = cc
if bcc:
xcc['bcc'] = bcc
if reply_to:
xcc['reply_to'] = reply_to
from google.appengine.api import mail
attachments = attachments and [mail.Attachment(
a.my_filename,
a.my_payload,
content_id='<attachment-%s>' % k
) for k, a in enumerate(attachments) if not raw]
if attachments:
result = mail.send_mail(
sender=sender, to=origTo,
subject=to_unicode(subject, encoding),
body=to_unicode(text or '', encoding),
html=html,
attachments=attachments, **xcc)
elif html and (not raw):
result = mail.send_mail(
sender=sender, to=origTo,
subject=to_unicode(subject, encoding), body=to_unicode(text or '', encoding), html=html, **xcc)
else:
result = mail.send_mail(
sender=sender, to=origTo,
subject=to_unicode(subject, encoding), body=to_unicode(text or '', encoding), **xcc)
elif self.settings.server == 'aws':
import boto3
from botocore.exceptions import ClientError
client = boto3.client('ses')
try:
raw = {'Data': payload.as_string()}
response = client.send_raw_email(RawMessage=raw,
Source=sender,
Destinations=to)
return True
except ClientError as e:
# we should log this error:
# print e.response['Error']['Message']
return False
else:
smtp_args = self.settings.server.split(':')
kwargs = dict(timeout=self.settings.timeout)
func = smtplib.SMTP_SSL if self.settings.ssl else smtplib.SMTP
server = func(*smtp_args, **kwargs)
try:
if self.settings.tls and not self.settings.ssl:
server.ehlo(self.settings.hostname)
server.starttls()
server.ehlo(self.settings.hostname)
if self.settings.login:
server.login(*self.settings.login.split(':', 1))
result = server.sendmail(sender, to, payload.as_string())
finally:
server.quit()
except Exception as e:
logger.warning('Mail.send failure:%s' % e)
self.result = result
self.error = e
return False
self.result = result
self.error = None
return True
class Recaptcha2(DIV):
"""
Experimental:
Creates a DIV holding the newer Recaptcha from Google (v2)
Args:
request : the request. If not passed, uses current request
public_key : the public key Google gave you
private_key : the private key Google gave you
error_message : the error message to show if verification fails
label : the label to use
options (dict) : takes these parameters
- hl
- theme
- type
- tabindex
- callback
- expired-callback
see https://developers.google.com/recaptcha/docs/display for docs about those
comment : the comment
Examples:
Use as::
form = FORM(Recaptcha2(public_key='...', private_key='...'))
or::
form = SQLFORM(...)
form.append(Recaptcha2(public_key='...', private_key='...'))
to protect the login page instead, use::
from gluon.tools import Recaptcha2
auth.settings.captcha = Recaptcha2(request, public_key='...', private_key='...')
"""
API_URI = 'https://www.google.com/recaptcha/api.js'
VERIFY_SERVER = 'https://www.google.com/recaptcha/api/siteverify'
def __init__(self,
request=None,
public_key='',
private_key='',
error_message='invalid',
label='Verify:',
options=None,
comment='',
):
request = request or current.request
self.request_vars = request and request.vars or current.request.vars
self.remote_addr = request.env.remote_addr
self.public_key = public_key
self.private_key = private_key
self.errors = Storage()
self.error_message = error_message
self.components = []
self.attributes = {}
self.label = label
self.options = options or {}
self.comment = comment
def _validate(self):
recaptcha_response_field = self.request_vars.pop('g-recaptcha-response', None)
remoteip = self.remote_addr
if not recaptcha_response_field:
self.errors['captcha'] = self.error_message
return False
params = urlencode({
'secret': self.private_key,
'remoteip': remoteip,
'response': recaptcha_response_field,
})
request = urllib2.Request(
url=self.VERIFY_SERVER,
data=params,
headers={'Content-type': 'application/x-www-form-urlencoded',
'User-agent': 'reCAPTCHA Python'})
httpresp = urllib2.urlopen(request)
content = httpresp.read()
httpresp.close()
try:
response_dict = json.loads(content)
except:
self.errors['captcha'] = self.error_message
return False
if response_dict.get('success', False):
self.request_vars.captcha = ''
return True
else:
self.errors['captcha'] = self.error_message
return False
def xml(self):
api_uri = self.API_URI
hl = self.options.pop('hl', None)
if hl:
api_uri = self.API_URI + '?hl=%s' % hl
public_key = self.public_key
self.options['sitekey'] = public_key
captcha = DIV(
SCRIPT(_src=api_uri, _async='', _defer=''),
DIV(_class="g-recaptcha", data=self.options),
TAG.noscript(XML("""
<div style="width: 302px; height: 352px;">
<div style="width: 302px; height: 352px; position: relative;">
<div style="width: 302px; height: 352px; position: absolute;">
<iframe src="https://www.google.com/recaptcha/api/fallback?k=%(public_key)s"
frameborder="0" scrolling="no"
style="width: 302px; height:352px; border-style: none;">
</iframe>
</div>
<div style="width: 250px; height: 80px; position: absolute; border-style: none;
bottom: 21px; left: 25px; margin: 0px; padding: 0px; right: 25px;">
<textarea id="g-recaptcha-response" name="g-recaptcha-response"
class="g-recaptcha-response"
style="width: 250px; height: 80px; border: 1px solid #c1c1c1;
margin: 0px; padding: 0px; resize: none;" value="">
</textarea>
</div>
</div>
</div>""" % dict(public_key=public_key))
)
)
if not self.errors.captcha:
return XML(captcha).xml()
else:
captcha.append(DIV(self.errors['captcha'], _class='error'))
return XML(captcha).xml()
# this should only be used for captcha and perhaps not even for that
def addrow(form, a, b, c, style, _id, position=-1):
if style == "divs":
form[0].insert(position, DIV(DIV(LABEL(a), _class='w2p_fl'),
DIV(b, _class='w2p_fw'),
DIV(c, _class='w2p_fc'),
_id=_id))
elif style == "table2cols":
form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'),
TD(c, _class='w2p_fc')))
form[0].insert(position + 1, TR(TD(b, _class='w2p_fw'),
_colspan=2, _id=_id))
elif style == "ul":
form[0].insert(position, LI(DIV(LABEL(a), _class='w2p_fl'),
DIV(b, _class='w2p_fw'),
DIV(c, _class='w2p_fc'),
_id=_id))
elif style == "bootstrap":
form[0].insert(position, DIV(LABEL(a, _class='control-label'),
DIV(b, SPAN(c, _class='inline-help'),
_class='controls'),
_class='control-group', _id=_id))
elif style == "bootstrap3_inline":
form[0].insert(position, DIV(LABEL(a, _class='control-label col-sm-3'),
DIV(b, SPAN(c, _class='help-block'),
_class='col-sm-9'),
_class='form-group', _id=_id))
elif style == "bootstrap3_stacked":
form[0].insert(position, DIV(LABEL(a, _class='control-label'),
b, SPAN(c, _class='help-block'),
_class='form-group', _id=_id))
else:
form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'),
TD(b, _class='w2p_fw'),
TD(c, _class='w2p_fc'), _id=_id))
class AuthJWT(object):
"""
Experimental!
Args:
- secret_key: the secret. Without salting, an attacker knowing this can impersonate
any user
- algorithm : uses as they are in the JWT specs, HS256, HS384 or HS512 basically means
signing with HMAC with a 256, 284 or 512bit hash
- verify_expiration : verifies the expiration checking the exp claim
- leeway: allow n seconds of skew when checking for token expiration
- expiration : how many seconds a token may be valid
- allow_refresh: enable the machinery to get a refreshed token passing a not-already-expired
token
- refresh_expiration_delta: to avoid continous refresh of the token
- header_prefix : self-explanatory. "JWT" and "Bearer" seems to be the emerging standards
- jwt_add_header: a dict holding additional mappings to the header. by default only alg and typ are filled
- user_param: the name of the parameter holding the username when requesting a token. Can be useful, e.g, for
email-based authentication, with "email" as a parameter
- pass_param: same as above, but for the password
- realm: self-explanatory
- salt: can be static or a function that takes the payload as an argument.
Example:
def mysalt(payload):
return payload['hmac_key'].split('-')[0]
- additional_payload: can be a dict to merge with the payload or a function that takes
the payload as input and returns the modified payload
Example:
def myadditional_payload(payload):
payload['my_name_is'] = 'bond,james bond'
return payload
- before_authorization: can be a callable that takes the deserialized token (a dict) as input.
Gets called right after signature verification but before the actual
authorization takes place. It may be use to cast
the extra auth_user fields to their actual types.
You can raise with HTTP a proper error message
Example:
def mybefore_authorization(tokend):
if not tokend['my_name_is'] == 'bond,james bond':
raise HTTP(400, u'Invalid JWT my_name_is claim')
- max_header_length: check max length to avoid load()ing unusually large tokens (could mean crafted, e.g. in a DDoS.)
Basic Usage:
in models (or the controller needing it)
myjwt = AuthJWT(auth, secret_key='secret')
in the controller issuing tokens
def login_and_take_token():
return myjwt.jwt_token_manager()
A call then to /app/controller/login_and_take_token with username and password returns the token
A call to /app/controller/login_and_take_token with the original token returns the refreshed token
To protect a function with JWT
@myjwt.allows_jwt()
@auth.requires_login()
def protected():
return '%s$%s' % (request.now, auth.user_id)
To inject optional auth info into the action with JWT
@myjwt.allows_jwt()
def unprotected():
if auth.user:
return '%s$%s' % (request.now, auth.user_id)
return "No auth info!"
"""
def __init__(self,
auth,
secret_key,
algorithm='HS256',
verify_expiration=True,
leeway=30,
expiration=60 * 5,
allow_refresh=True,
refresh_expiration_delta=60 * 60,
header_prefix='Bearer',
jwt_add_header=None,
user_param='username',
pass_param='password',
realm='Login required',
salt=None,
additional_payload=None,
before_authorization=None,
max_header_length=4 * 1024,
):
self.secret_key = secret_key
self.auth = auth
self.algorithm = algorithm
if self.algorithm not in ('HS256', 'HS384', 'HS512'):
raise NotImplementedError('Algorithm %s not allowed' % algorithm)
self.verify_expiration = verify_expiration
self.leeway = leeway
self.expiration = expiration
self.allow_refresh = allow_refresh
self.refresh_expiration_delta = refresh_expiration_delta
self.header_prefix = header_prefix
self.jwt_add_header = jwt_add_header or {}
base_header = {'alg': self.algorithm, 'typ': 'JWT'}
for k, v in iteritems(self.jwt_add_header):
base_header[k] = v
self.cached_b64h = self.jwt_b64e(json.dumps(base_header))
digestmod_mapping = {
'HS256': hashlib.sha256,
'HS384': hashlib.sha384,
'HS512': hashlib.sha512
}
self.digestmod = digestmod_mapping[algorithm]
self.user_param = user_param
self.pass_param = pass_param
self.realm = realm
self.salt = salt
self.additional_payload = additional_payload
self.before_authorization = before_authorization
self.max_header_length = max_header_length
self.recvd_token = None
@staticmethod
def jwt_b64e(string):
string = to_bytes(string)
return base64.urlsafe_b64encode(string).strip(b'=')
@staticmethod
def jwt_b64d(string):
"""base64 decodes a single bytestring (and is tolerant to getting
called with a unicode string).
The result is also a bytestring.
"""
string = to_bytes(string, 'ascii', 'ignore')
return base64.urlsafe_b64decode(string + b'=' * (-len(string) % 4))
def generate_token(self, payload):
secret = to_bytes(self.secret_key)
if self.salt:
if callable(self.salt):
secret = "%s$%s" % (secret, self.salt(payload))
else:
secret = "%s$%s" % (secret, self.salt)
if isinstance(secret, unicodeT):
secret = secret.encode('ascii', 'ignore')
b64h = self.cached_b64h
b64p = self.jwt_b64e(serializers.json(payload))
jbody = b64h + b'.' + b64p
mauth = hmac.new(key=secret, msg=jbody, digestmod=self.digestmod)
jsign = self.jwt_b64e(mauth.digest())
return to_native(jbody + b'.' + jsign)
def verify_signature(self, body, signature, secret):
mauth = hmac.new(key=secret, msg=body, digestmod=self.digestmod)
return compare(self.jwt_b64e(mauth.digest()), signature)
def load_token(self, token):
token = to_bytes(token, 'utf-8', 'strict')
body, sig = token.rsplit(b'.', 1)
b64h, b64b = body.split(b'.', 1)
if b64h != self.cached_b64h:
# header not the same
raise HTTP(400, u'Invalid JWT Header')
secret = self.secret_key
tokend = serializers.loads_json(to_native(self.jwt_b64d(b64b)))
if self.salt:
if callable(self.salt):
secret = "%s$%s" % (secret, self.salt(tokend))
else:
secret = "%s$%s" % (secret, self.salt)
secret = to_bytes(secret, 'ascii', 'ignore')
if not self.verify_signature(body, sig, secret):
# signature verification failed
raise HTTP(400, u'Token signature is invalid')
if self.verify_expiration:
now = time.mktime(datetime.datetime.utcnow().timetuple())
if tokend['exp'] + self.leeway < now:
raise HTTP(400, u'Token is expired')
if callable(self.before_authorization):
self.before_authorization(tokend)
return tokend
def serialize_auth_session(self, session_auth):
"""
As bad as it sounds, as long as this is rarely used (vs using the token)
this is the faster method, even if we ditch session in jwt_token_manager().
We (mis)use the heavy default auth mechanism to avoid any further computation,
while sticking to a somewhat-stable Auth API.
"""
# TODO: Check the following comment
# is the following safe or should we use
# calendar.timegm(datetime.datetime.utcnow().timetuple())
# result seem to be the same (seconds since epoch, in UTC)
now = time.mktime(datetime.datetime.utcnow().timetuple())
expires = now + self.expiration
payload = dict(
hmac_key=session_auth['hmac_key'],
user_groups=session_auth['user_groups'],
user=session_auth['user'].as_dict(),
iat=now,
exp=expires
)
return payload
def refresh_token(self, orig_payload):
now = time.mktime(datetime.datetime.utcnow().timetuple())
if self.verify_expiration:
orig_exp = orig_payload['exp']
if orig_exp + self.leeway < now:
# token already expired, can't be used for refresh
raise HTTP(400, u'Token already expired')
orig_iat = orig_payload.get('orig_iat') or orig_payload['iat']
if orig_iat + self.refresh_expiration_delta < now:
# refreshed too long ago
raise HTTP(400, u'Token issued too long ago')
expires = now + self.expiration
orig_payload.update(
orig_iat=orig_iat,
iat=now,
exp=expires,
hmac_key=web2py_uuid()
)
self.alter_payload(orig_payload)
return orig_payload
def alter_payload(self, payload):
if self.additional_payload:
if callable(self.additional_payload):
payload = self.additional_payload(payload)
elif isinstance(self.additional_payload, dict):
payload.update(self.additional_payload)
return payload
def jwt_token_manager(self, token_param='_token'):
"""
The part that issues (and refreshes) tokens.
Used in a controller, given myjwt is the istantiated class, as
@myjwt.allow_jwt(required=False, verify_expiration=False)
def api_auth():
return myjwt.jwt_token_manager()
Then, a call to /app/c/api_auth with username and password
returns a token, while /app/c/api_auth with the current token
issues another token (expired, but within grace time)
"""
request = current.request
response = current.response
session = current.session
# forget and unlock response
session.forget(response)
valid_user = None
ret = None
token = None
try:
token = self.recvd_token or self.get_jwt_token_from_request(token_param)
except HTTP:
pass
if token:
if not self.allow_refresh:
raise HTTP(403, u'Refreshing token is not allowed')
tokend = self.load_token(token)
# verification can fail here
refreshed = self.refresh_token(tokend)
ret = {'token': self.generate_token(refreshed)}
elif self.user_param in request.vars and self.pass_param in request.vars:
username = request.vars[self.user_param]
password = request.vars[self.pass_param]
valid_user = self.auth.login_bare(username, password)
else:
valid_user = self.auth.user
self.auth.login_user(valid_user)
if valid_user:
payload = self.serialize_auth_session(session.auth)
self.alter_payload(payload)
ret = {'token': self.generate_token(payload)}
elif ret is None:
raise HTTP(401,
u'Not Authorized - need to be logged in, to pass a token '
u'for refresh or username and password for login',
**{'WWW-Authenticate': u'JWT realm="%s"' % self.realm})
response.headers['Content-Type'] = 'application/json'
return serializers.json(ret)
def inject_token(self, tokend):
"""
The real deal, not touching the db but still logging-in the user
"""
self.auth.user = Storage(tokend['user'])
self.auth.user_groups = tokend['user_groups']
self.auth.hmac_key = tokend['hmac_key']
def get_jwt_token_from_request(self, token_param='_token'):
"""
The method that extracts and validates the token, either
from the header or the _token var
token_param: request.vars attribute with the token used only if the http authorization header is not present.
"""
token = None
token_in_header = current.request.env.http_authorization
if token_in_header:
parts = token_in_header.split()
if parts[0].lower() != self.header_prefix.lower():
raise HTTP(400, u'Invalid JWT header')
elif len(parts) == 1:
raise HTTP(400, u'Invalid JWT header, missing token')
elif len(parts) > 2:
raise HTTP(400, 'Invalid JWT header, token contains spaces')
token = parts[1]
else:
token = current.request.vars.get(token_param)
if token is None:
raise HTTP(400, 'JWT header not found and JWT parameter {} missing in request'.format(token_param))
self.recvd_token = token
return token
def allows_jwt(self, otherwise=None, required=True, verify_expiration=True, token_param='_token'):
"""
The decorator that takes care of injecting auth info in the decorated action.
Works w/o resorting to session.
Args:
required: the token is mandatory (either in request.var._token or in the HTTP hearder Authorization Bearer)
verify_expiration: allows to bypass expiration check. Useful to manage token renewal.
token_param: request.vars attribute with the token used only if the http authorization header is not present (default: "_token").
"""
def decorator(action):
def f(*args, **kwargs):
try:
token = self.get_jwt_token_from_request(token_param=token_param)
except HTTP as e:
if required:
raise e
token = None
if token and len(token) < self.max_header_length:
old_verify_expiration = self.verify_expiration
try:
self.verify_expiration = verify_expiration
tokend = self.load_token(token)
except ValueError:
raise HTTP(400, 'Invalid JWT header, wrong token format')
finally:
self.verify_expiration = old_verify_expiration
self.inject_token(tokend)
return action(*args, **kwargs)
f.__doc__ = action.__doc__
f.__name__ = action.__name__
f.__dict__.update(action.__dict__)
return f
return decorator
class Auth(AuthAPI):
default_settings = dict(AuthAPI.default_settings,
allow_basic_login=False,
allow_basic_login_only=False,
allow_delete_accounts=False,
alternate_requires_registration=False,
auth_manager_role=None,
auth_two_factor_enabled=False,
auth_two_factor_tries_left=3,
bulk_register_enabled=False,
captcha=None,
cas_maps=None,
client_side=True,
formstyle=None,
hideerror=False,
label_separator=None,
login_after_password_change=True,
login_after_registration=False,
login_captcha=None,
login_specify_error=False,
long_expiration=3600 * 30 * 24, # one month
mailer=None,
manager_actions={},
multi_login=False,
on_failed_authentication=lambda x: redirect(x),
pre_registration_div=None,
prevent_open_redirect_attacks=True,
prevent_password_reset_attacks=True,
profile_fields=None,
register_captcha=None,
register_fields=None,
register_verify_password=True,
remember_me_form=True,
reset_password_requires_verification=False,
retrieve_password_captcha=None,
retrieve_username_captcha=None,
showid=False,
table_cas=None,
table_cas_name='auth_cas',
table_event=None,
table_group=None,
table_membership=None,
table_permission=None,
table_token_name='auth_token',
table_user=None,
two_factor_authentication_group=None,
update_fields=['email'],
wiki=Settings()
)
# ## these are messages that can be customized
default_messages = dict(AuthAPI.default_messages,
access_denied='Insufficient privileges',
bulk_invite_body='You have been invited to join %(site)s, click %(link)s to complete '
'the process',
bulk_invite_subject='Invitation to join %(site)s',
delete_label='Check to delete',
email_sent='Email sent',
email_verified='Email verified',
function_disabled='Function disabled',
impersonate_log='User %(id)s is impersonating %(other_id)s',
invalid_reset_password='Invalid reset password',
invalid_two_factor_code='Incorrect code. {0} more attempt(s) remaining.',
is_empty="Cannot be empty",
label_client_ip='Client IP',
label_description='Description',
label_email='E-mail',
label_first_name='First name',
label_group_id='Group ID',
label_last_name='Last name',
label_name='Name',
label_origin='Origin',
label_password='Password',
label_record_id='Record ID',
label_registration_id='Registration identifier',
label_registration_key='Registration key',
label_remember_me="Remember me (for 30 days)",
label_reset_password_key='Reset Password key',
label_role='Role',
label_table_name='Object or table name',
label_time_stamp='Timestamp',
label_two_factor='Authentication code',
label_user_id='User ID',
label_username='Username',
login_button='Log In',
login_disabled='Login disabled by administrator',
new_password='New password',
new_password_sent='A new password was emailed to you',
old_password='Old password',
password_change_button='Change password',
password_reset_button='Request reset password',
profile_save_button='Apply changes',
register_button='Sign Up',
reset_password='Click on the link %(link)s to reset your password',
reset_password_log='User %(id)s Password reset',
reset_password_subject='Password reset',
retrieve_password='Your password is: %(password)s',
retrieve_password_log='User %(id)s Password retrieved',
retrieve_password_subject='Password retrieve',
retrieve_two_factor_code='Your temporary login code is {0}',
retrieve_two_factor_code_subject='Two-step Login Authentication Code',
retrieve_username='Your username is: %(username)s',
retrieve_username_log='User %(id)s Username retrieved',
retrieve_username_subject='Username retrieve',
submit_button='Submit',
two_factor_comment='This code was emailed to you and is required for login.',
unable_send_email='Unable to send email',
username_sent='Your username was emailed to you',
verify_email='Welcome %(username)s! Click on the link %(link)s to verify your email',
verify_email_log='User %(id)s Verification email sent',
verify_email_subject='Email verification',
verify_password='Verify Password',
verify_password_comment='please input your password again'
)
"""
Class for authentication, authorization, role based access control.
Includes:
- registration and profile
- login and logout
- username and password retrieval
- event logging
- role creation and assignment
- user defined group/role based permission
Args:
environment: is there for legacy but unused (awful)
db: has to be the database where to create tables for authentication
mailer: `Mail(...)` or None (no mailer) or True (make a mailer)
hmac_key: can be a hmac_key or hmac_key=Auth.get_or_create_key()
controller: (where is the user action?)
cas_provider: (delegate authentication to the URL, CAS2)
Authentication Example::
from gluon.contrib.utils import *
mail=Mail()
mail.settings.server='smtp.gmail.com:587'
mail.settings.sender='[email protected]'
mail.settings.login='username:password'
auth=Auth(db)
auth.settings.mailer=mail
# auth.settings....=...
auth.define_tables()
def authentication():
return dict(form=auth())
Exposes:
- `http://.../{application}/{controller}/authentication/login`
- `http://.../{application}/{controller}/authentication/logout`
- `http://.../{application}/{controller}/authentication/register`
- `http://.../{application}/{controller}/authentication/verify_email`
- `http://.../{application}/{controller}/authentication/retrieve_username`
- `http://.../{application}/{controller}/authentication/retrieve_password`
- `http://.../{application}/{controller}/authentication/reset_password`
- `http://.../{application}/{controller}/authentication/profile`
- `http://.../{application}/{controller}/authentication/change_password`
On registration a group with role=new_user.id is created
and user is given membership of this group.
You can create a group with::
group_id=auth.add_group('Manager', 'can access the manage action')
auth.add_permission(group_id, 'access to manage')
Here "access to manage" is just a user defined string.
You can give access to a user::
auth.add_membership(group_id, user_id)
If user id is omitted, the logged in user is assumed
Then you can decorate any action::
@auth.requires_permission('access to manage')
def manage():
return dict()
You can restrict a permission to a specific table::
auth.add_permission(group_id, 'edit', db.sometable)
@auth.requires_permission('edit', db.sometable)
Or to a specific record::
auth.add_permission(group_id, 'edit', db.sometable, 45)
@auth.requires_permission('edit', db.sometable, 45)
If authorization is not granted calls::
auth.settings.on_failed_authorization
Other options::
auth.settings.mailer=None
auth.settings.expiration=3600 # seconds
...
### these are messages that can be customized
...
"""
@staticmethod
def get_or_create_key(filename=None, alg='sha512'):
request = current.request
if not filename:
filename = os.path.join(request.folder, 'private', 'auth.key')
if os.path.exists(filename):
key = open(filename, 'r').read().strip()
else:
key = alg + ':' + web2py_uuid()
open(filename, 'w').write(key)
return key
def url(self, f=None, args=None, vars=None, scheme=False):
if args is None:
args = []
if vars is None:
vars = {}
host = scheme and self.settings.host
return URL(c=self.settings.controller,
f=f, args=args, vars=vars, scheme=scheme, host=host)
def here(self):
return URL(args=current.request.args, vars=current.request.get_vars)
def select_host(self, host, host_names=None):
"""
checks that host is valid, i.e. in the list of glob host_names
if the host is missing, then is it selects the first entry from host_names
read more here: https://github.com/web2py/web2py/issues/1196
"""
if host:
if host_names:
for item in host_names:
if fnmatch.fnmatch(host, item):
break
else:
raise HTTP(403, "Invalid Hostname")
elif host_names:
host = host_names[0]
else:
host = 'localhost'
return host
def __init__(self, environment=None, db=None, mailer=True,
hmac_key=None, controller='default', function='user',
cas_provider=None, signature=True, secure=False,
csrf_prevention=True, propagate_extension=None,
url_index=None, jwt=None, host_names=None):
# next two lines for backward compatibility
if not db and environment and isinstance(environment, DAL):
db = environment
self.db = db
self.environment = current
self.csrf_prevention = csrf_prevention
request = current.request
session = current.session
auth = session.auth
self.user_groups = auth and auth.user_groups or {}
if secure:
request.requires_https()
now = request.now
# if we have auth info
# if not expired it, used it
# if expired, clear the session
# else, only clear auth info in the session
if auth:
delta = datetime.timedelta(days=0, seconds=auth.expiration)
if auth.last_visit and auth.last_visit + delta > now:
self.user = auth.user
# this is a trick to speed up sessions to avoid many writes
if (now - auth.last_visit).seconds > (auth.expiration // 10):
auth.last_visit = now
else:
self.user = None
if session.auth:
del session.auth
session.renew(clear_session=True)
else:
self.user = None
if session.auth:
del session.auth
# ## what happens after login?
url_index = url_index or URL(controller, 'index')
url_login = URL(controller, function, args='login',
extension=propagate_extension)
# ## what happens after registration?
settings = self.settings = Settings()
settings.update(Auth.default_settings)
host = self.select_host(request.env.http_host, host_names)
settings.update(
cas_domains=[host],
enable_tokens=False,
cas_provider=cas_provider,
cas_actions=dict(login='login',
validate='validate',
servicevalidate='serviceValidate',
proxyvalidate='proxyValidate',
logout='logout'),
cas_create_user=True,
extra_fields={},
actions_disabled=[],
controller=controller,
function=function,
login_url=url_login,
logged_url=URL(controller, function, args='profile'),
download_url=URL(controller, 'download'),
mailer=(mailer is True) and Mail() or mailer,
on_failed_authorization=URL(controller, function, args='not_authorized'),
login_next=url_index,
login_onvalidation=[],
login_onaccept=[],
login_onfail=[],
login_methods=[self],
login_form=self,
logout_next=url_index,
logout_onlogout=None,
register_next=url_index,
register_onvalidation=[],
register_onaccept=[],
verify_email_next=url_login,
verify_email_onaccept=[],
profile_next=url_index,
profile_onvalidation=[],
profile_onaccept=[],
retrieve_username_next=url_index,
retrieve_password_next=url_index,
request_reset_password_next=url_login,
reset_password_next=url_index,
change_password_next=url_index,
change_password_onvalidation=[],
change_password_onaccept=[],
retrieve_password_onvalidation=[],
request_reset_password_onvalidation=[],
request_reset_password_onaccept=[],
reset_password_onvalidation=[],
reset_password_onaccept=[],
hmac_key=hmac_key,
formstyle=current.response.formstyle,
label_separator=current.response.form_label_separator,
two_factor_methods=[],
two_factor_onvalidation=[],
host=host,
)
settings.lock_keys = True
# ## these are messages that can be customized
messages = self.messages = Messages(current.T)
messages.update(Auth.default_messages)
messages.update(ajax_failed_authentication=
DIV(H4('NOT AUTHORIZED'),
'Please ',
A('login',
_href=self.settings.login_url +
('?_next=' + urllib_quote(current.request.env.http_web2py_component_location))
if current.request.env.http_web2py_component_location else ''),
' to view this content.',
_class='not-authorized alert alert-block'))
messages.lock_keys = True
# for "remember me" option
response = current.response
if auth and auth.remember_me:
# when user wants to be logged in for longer
response.session_cookie_expires = auth.expiration
if signature:
self.define_signature()
else:
self.signature = None
self.jwt_handler = jwt and AuthJWT(self, **jwt)
def get_vars_next(self):
next = current.request.vars._next
host = current.request.env.http_host
if isinstance(next, (list, tuple)):
next = next[0]
if next and self.settings.prevent_open_redirect_attacks:
return self.prevent_open_redirect(next, host)
return next or None
@staticmethod
def prevent_open_redirect(next, host):
# Prevent an attacker from adding an arbitrary url after the
# _next variable in the request.
if next:
parts = next.split('/')
if ':' not in parts[0]:
return next
elif len(parts) > 2 and parts[0].endswith(':') and parts[1:3] == ['', host]:
return next
return None
def table_cas(self):
return self.db[self.settings.table_cas_name]
def table_token(self):
return self.db[self.settings.table_token_name]
def _HTTP(self, *a, **b):
"""
only used in lambda: self._HTTP(404)
"""
raise HTTP(*a, **b)
def __call__(self):
"""
Example:
Use as::
def authentication():
return dict(form=auth())
"""
request = current.request
args = request.args
if not args:
redirect(self.url(args='login', vars=request.vars))
elif args[0] in self.settings.actions_disabled:
raise HTTP(404)
if args[0] in ('login', 'logout', 'register', 'verify_email',
'retrieve_username', 'retrieve_password',
'reset_password', 'request_reset_password',
'change_password', 'profile', 'groups',
'impersonate', 'not_authorized', 'confirm_registration',
'bulk_register', 'manage_tokens', 'jwt'):
if len(request.args) >= 2 and args[0] == 'impersonate':
return getattr(self, args[0])(request.args[1])
else:
return getattr(self, args[0])()
elif args[0] == 'cas' and not self.settings.cas_provider:
if args(1) == self.settings.cas_actions['login']:
return self.cas_login(version=2)
elif args(1) == self.settings.cas_actions['validate']:
return self.cas_validate(version=1)
elif args(1) == self.settings.cas_actions['servicevalidate']:
return self.cas_validate(version=2, proxy=False)
elif args(1) == self.settings.cas_actions['proxyvalidate']:
return self.cas_validate(version=2, proxy=True)
elif (args(1) == 'p3'
and args(2) == self.settings.cas_actions['servicevalidate']):
return self.cas_validate(version=3, proxy=False)
elif (args(1) == 'p3'
and args(2) == self.settings.cas_actions['proxyvalidate']):
return self.cas_validate(version=3, proxy=True)
elif args(1) == self.settings.cas_actions['logout']:
return self.logout(next=request.vars.service or DEFAULT)
else:
raise HTTP(404)
def navbar(self, prefix='Welcome', action=None,
separators=(' [ ', ' | ', ' ] '), user_identifier=DEFAULT,
referrer_actions=DEFAULT, mode='default'):
""" Navbar with support for more templates
This uses some code from the old navbar.
Args:
mode: see options for list of
"""
items = [] # Hold all menu items in a list
self.bar = '' # The final
T = current.T
referrer_actions = [] if not referrer_actions else referrer_actions
if not action:
action = self.url(self.settings.function)
request = current.request
if URL() == action:
next = ''
else:
next = '?_next=' + urllib_quote(URL(args=request.args,
vars=request.get_vars))
href = lambda function: \
'%s/%s%s' % (action, function, next if referrer_actions is DEFAULT or function in referrer_actions else '')
if isinstance(prefix, str):
prefix = T(prefix)
if prefix:
prefix = prefix.strip() + ' '
def Anr(*a, **b):
b['_rel'] = 'nofollow'
return A(*a, **b)
if self.user_id: # User is logged in
logout_next = self.settings.logout_next
items.append({'name': T('Log Out'),
'href': '%s/logout?_next=%s' % (action, urllib_quote(logout_next)),
'icon': 'icon-off'})
if 'profile' not in self.settings.actions_disabled:
items.append({'name': T('Profile'), 'href': href('profile'),
'icon': 'icon-user'})
if 'change_password' not in self.settings.actions_disabled:
items.append({'name': T('Password'),
'href': href('change_password'),
'icon': 'icon-lock'})
if user_identifier is DEFAULT:
user_identifier = '%(first_name)s'
if callable(user_identifier):
user_identifier = user_identifier(self.user)
elif ((isinstance(user_identifier, str) or
type(user_identifier).__name__ == 'lazyT') and
re.search(r'%\(.+\)s', user_identifier)):
user_identifier = user_identifier % self.user
if not user_identifier:
user_identifier = ''
else: # User is not logged in
items.append({'name': T('Log In'), 'href': href('login'),
'icon': 'icon-off'})
if 'register' not in self.settings.actions_disabled:
items.append({'name': T('Sign Up'), 'href': href('register'),
'icon': 'icon-user'})
if 'request_reset_password' not in self.settings.actions_disabled:
items.append({'name': T('Lost password?'),
'href': href('request_reset_password'),
'icon': 'icon-lock'})
if self.settings.use_username and 'retrieve_username' not in self.settings.actions_disabled:
items.append({'name': T('Forgot username?'),
'href': href('retrieve_username'),
'icon': 'icon-edit'})
def menu(): # For inclusion in MENU
self.bar = [(items[0]['name'], False, items[0]['href'], [])]
del items[0]
for item in items:
self.bar[0][3].append((item['name'], False, item['href']))
def bootstrap3(): # Default web2py scaffolding
def rename(icon): return icon + ' ' + icon.replace('icon', 'glyphicon')
self.bar = UL(LI(Anr(I(_class=rename('icon ' + items[0]['icon'])),
' ' + items[0]['name'],
_href=items[0]['href'])), _class='dropdown-menu')
del items[0]
for item in items:
self.bar.insert(-1, LI(Anr(I(_class=rename('icon ' + item['icon'])),
' ' + item['name'],
_href=item['href'])))
self.bar.insert(-1, LI('', _class='divider'))
if self.user_id:
self.bar = LI(Anr(prefix, user_identifier,
_href='#', _class="dropdown-toggle",
data={'toggle': 'dropdown'}),
self.bar, _class='dropdown')
else:
self.bar = LI(Anr(T('Log In'),
_href='#', _class="dropdown-toggle",
data={'toggle': 'dropdown'}), self.bar,
_class='dropdown')
def bare():
""" In order to do advanced customization we only need the
prefix, the user_identifier and the href attribute of items
Examples:
Use as::
# in module custom_layout.py
from gluon import *
def navbar(auth_navbar):
bar = auth_navbar
user = bar["user"]
if not user:
btn_login = A(current.T("Login"),
_href=bar["login"],
_class="btn btn-success",
_rel="nofollow")
btn_register = A(current.T("Sign up"),
_href=bar["register"],
_class="btn btn-primary",
_rel="nofollow")
return DIV(btn_register, btn_login, _class="btn-group")
else:
toggletext = "%s back %s" % (bar["prefix"], user)
toggle = A(toggletext,
_href="#",
_class="dropdown-toggle",
_rel="nofollow",
**{"_data-toggle": "dropdown"})
li_profile = LI(A(I(_class="icon-user"), ' ',
current.T("Account details"),
_href=bar["profile"], _rel="nofollow"))
li_custom = LI(A(I(_class="icon-book"), ' ',
current.T("My Agenda"),
_href="#", rel="nofollow"))
li_logout = LI(A(I(_class="icon-off"), ' ',
current.T("logout"),
_href=bar["logout"], _rel="nofollow"))
dropdown = UL(li_profile,
li_custom,
LI('', _class="divider"),
li_logout,
_class="dropdown-menu", _role="menu")
return LI(toggle, dropdown, _class="dropdown")
# in models db.py
import custom_layout as custom
# in layout.html
<ul id="navbar" class="nav pull-right">
{{='auth' in globals() and \
custom.navbar(auth.navbar(mode='bare')) or ''}}</ul>
"""
bare = {'prefix': prefix, 'user': user_identifier if self.user_id else None}
for i in items:
if i['name'] == T('Log In'):
k = 'login'
elif i['name'] == T('Sign Up'):
k = 'register'
elif i['name'] == T('Lost password?'):
k = 'request_reset_password'
elif i['name'] == T('Forgot username?'):
k = 'retrieve_username'
elif i['name'] == T('Log Out'):
k = 'logout'
elif i['name'] == T('Profile'):
k = 'profile'
elif i['name'] == T('Password'):
k = 'change_password'
bare[k] = i['href']
self.bar = bare
options = {'asmenu': menu,
'dropdown': bootstrap3,
'bare': bare
} # Define custom modes.
if mode in options and callable(options[mode]):
options[mode]()
else:
s1, s2, s3 = separators
if self.user_id:
self.bar = SPAN(prefix, user_identifier, s1,
Anr(items[0]['name'],
_href=items[0]['href']), s3,
_class='auth_navbar')
else:
self.bar = SPAN(s1, Anr(items[0]['name'],
_href=items[0]['href']), s3,
_class='auth_navbar')
for item in items[1:]:
self.bar.insert(-1, s2)
self.bar.insert(-1, Anr(item['name'], _href=item['href']))
return self.bar
def enable_record_versioning(self,
tables,
archive_db=None,
archive_names='%(tablename)s_archive',
current_record='current_record',
current_record_label=None):
"""
Used to enable full record versioning (including auth tables)::
auth = Auth(db)
auth.define_tables(signature=True)
# define our own tables
db.define_table('mything',Field('name'),auth.signature)
auth.enable_record_versioning(tables=db)
tables can be the db (all table) or a list of tables.
only tables with modified_by and modified_on fiels (as created
by auth.signature) will have versioning. Old record versions will be
in table 'mything_archive' automatically defined.
when you enable enable_record_versioning, records are never
deleted but marked with is_active=False.
enable_record_versioning enables a common_filter for
every table that filters out records with is_active = False
Note:
If you use auth.enable_record_versioning,
do not use auth.archive or you will end up with duplicates.
auth.archive does explicitly what enable_record_versioning
does automatically.
"""
current_record_label = current_record_label or current.T(
current_record.replace('_', ' ').title())
for table in tables:
fieldnames = table.fields()
if 'id' in fieldnames and 'modified_on' in fieldnames and current_record not in fieldnames:
table._enable_record_versioning(archive_db=archive_db,
archive_name=archive_names,
current_record=current_record,
current_record_label=current_record_label)
def define_tables(self, username=None, signature=None, enable_tokens=False,
migrate=None, fake_migrate=None):
"""
To be called unless tables are defined manually
Examples:
Use as::
# defines all needed tables and table files
# 'myprefix_auth_user.table', ...
auth.define_tables(migrate='myprefix_')
# defines all needed tables without migration/table files
auth.define_tables(migrate=False)
"""
db = self.db
if migrate is None:
migrate = db._migrate
if fake_migrate is None:
fake_migrate = db._fake_migrate
settings = self.settings
settings.enable_tokens = enable_tokens
signature_list = \
super(Auth, self).define_tables(username, signature, migrate, fake_migrate)._table_signature_list
now = current.request.now
reference_table_user = 'reference %s' % settings.table_user_name
if settings.cas_domains:
if settings.table_cas_name not in db.tables:
db.define_table(
settings.table_cas_name,
Field('user_id', reference_table_user, default=None,
label=self.messages.label_user_id),
Field('created_on', 'datetime', default=now),
Field('service', requires=IS_URL()),
Field('ticket'),
Field('renew', 'boolean', default=False),
*settings.extra_fields.get(settings.table_cas_name, []),
**dict(
migrate=self._get_migrate(
settings.table_cas_name, migrate),
fake_migrate=fake_migrate))
if settings.enable_tokens:
extra_fields = settings.extra_fields.get(
settings.table_token_name, []) + signature_list
if settings.table_token_name not in db.tables:
db.define_table(
settings.table_token_name,
Field('user_id', reference_table_user, default=None,
label=self.messages.label_user_id),
Field('expires_on', 'datetime', default=datetime.datetime(2999, 12, 31)),
Field('token', writable=False, default=web2py_uuid, unique=True),
*extra_fields,
**dict(migrate=self._get_migrate(settings.table_token_name, migrate),
fake_migrate=fake_migrate))
if not db._lazy_tables:
settings.table_user = db[settings.table_user_name]
settings.table_group = db[settings.table_group_name]
settings.table_membership = db[settings.table_membership_name]
settings.table_permission = db[settings.table_permission_name]
settings.table_event = db[settings.table_event_name]
if settings.cas_domains:
settings.table_cas = db[settings.table_cas_name]
if settings.cas_provider: # THIS IS NOT LAZY
settings.actions_disabled = \
['profile', 'register', 'change_password',
'request_reset_password', 'retrieve_username']
from gluon.contrib.login_methods.cas_auth import CasAuth
maps = settings.cas_maps
if not maps:
table_user = self.table_user()
maps = dict((name, lambda v, n=name: v.get(n, None)) for name in
table_user.fields if name != 'id'
and table_user[name].readable)
maps['registration_id'] = \
lambda v, p=settings.cas_provider: '%s/%s' % (p, v['user'])
actions = [settings.cas_actions['login'],
settings.cas_actions['servicevalidate'],
settings.cas_actions['logout']]
settings.login_form = CasAuth(
casversion=2,
urlbase=settings.cas_provider,
actions=actions,
maps=maps)
return self
def get_or_create_user(self, keys, update_fields=['email'],
login=True, get=True):
"""
Used for alternate login methods:
If the user exists already then password is updated.
If the user doesn't yet exist, then they are created.
"""
table_user = self.table_user()
create_user = self.settings.cas_create_user
user = None
checks = []
# make a guess about who this user is
guess_fields = ['registration_id', 'username', 'email']
if self.settings.login_userfield:
guess_fields.append(self.settings.login_userfield)
for fieldname in guess_fields:
if fieldname in table_user.fields() and \
keys.get(fieldname, None):
checks.append(fieldname)
value = keys[fieldname]
user = table_user(**{fieldname: value})
if user:
break
if not checks:
return None
if 'registration_id' not in keys:
keys['registration_id'] = keys[checks[0]]
# if we think we found the user but registration_id does not match,
# make new user
if 'registration_id' in checks \
and user \
and user.registration_id \
and ('registration_id' not in keys or user.registration_id != str(keys['registration_id'])):
user = None # THINK MORE ABOUT THIS? DO WE TRUST OPENID PROVIDER?
if user:
if not get:
# added for register_bare to avoid overwriting users
return None
update_keys = dict(registration_id=keys['registration_id'])
for key in update_fields:
if key in keys:
update_keys[key] = keys[key]
user.update_record(**update_keys)
elif checks:
if create_user is False:
# Remove current open session a send message
self.logout(next=None, onlogout=None, log=None)
raise HTTP(403, "Forbidden. User need to be created first.")
if 'first_name' not in keys and 'first_name' in table_user.fields:
guess = keys.get('email', 'anonymous').split('@')[0]
keys['first_name'] = keys.get('username', guess)
vars = table_user._filter_fields(keys)
user_id = table_user.insert(**vars)
user = table_user[user_id]
if self.settings.create_user_groups:
group_id = self.add_group(self.settings.create_user_groups % user)
self.add_membership(group_id, user_id)
if self.settings.everybody_group_id:
self.add_membership(self.settings.everybody_group_id, user_id)
if login:
self.user = user
if self.settings.register_onaccept:
callback(self.settings.register_onaccept, Storage(vars=user))
return user
def basic(self, basic_auth_realm=False):
"""
Performs basic login.
Args:
basic_auth_realm: optional basic http authentication realm. Can take
str or unicode or function or callable or boolean.
reads current.request.env.http_authorization
and returns basic_allowed,basic_accepted,user.
if basic_auth_realm is defined is a callable it's return value
is used to set the basic authentication realm, if it's a string
its content is used instead. Otherwise basic authentication realm
is set to the application name.
If basic_auth_realm is None or False (the default) the behavior
is to skip sending any challenge.
"""
if not self.settings.allow_basic_login:
return (False, False, False)
basic = current.request.env.http_authorization
if basic_auth_realm:
if callable(basic_auth_realm):
basic_auth_realm = basic_auth_realm()
elif isinstance(basic_auth_realm, (unicode, str)):
basic_realm = unicode(basic_auth_realm) # Warning python 3.5 does not have method unicod
elif basic_auth_realm is True:
basic_realm = u'' + current.request.application
http_401 = HTTP(401, u'Not Authorized', **{'WWW-Authenticate': u'Basic realm="' + basic_realm + '"'})
if not basic or not basic[:6].lower() == 'basic ':
if basic_auth_realm:
raise http_401
return (True, False, False)
(username, sep, password) = base64.b64decode(basic[6:]).partition(':')
is_valid_user = sep and self.login_bare(username, password)
if not is_valid_user and basic_auth_realm:
raise http_401
return (True, True, is_valid_user)
def _get_login_settings(self):
table_user = self.table_user()
userfield = self.settings.login_userfield or 'username' \
if self.settings.login_userfield or 'username' \
in table_user.fields else 'email'
passfield = self.settings.password_field
return Storage({'table_user': table_user,
'userfield': userfield,
'passfield': passfield})
def login_bare(self, username, password):
"""
Logins user as specified by username (or email) and password
"""
settings = self._get_login_settings()
user = settings.table_user(**{settings.userfield: username})
if user and user.get(settings.passfield, False):
password = settings.table_user[
settings.passfield].validate(password)[0]
if ((user.registration_key is None or
not user.registration_key.strip()) and
password == user[settings.passfield]):
self.login_user(user)
return user
else:
# user not in database try other login methods
for login_method in self.settings.login_methods:
if login_method != self and login_method(username, password):
self.user = user
return user
return False
def register_bare(self, **fields):
"""
Registers a user as specified by username (or email)
and a raw password.
"""
settings = self._get_login_settings()
# users can register_bare even if no password is provided,
# in this case they will have to reset their password to login
if fields.get(settings.passfield):
fields[settings.passfield] = \
settings.table_user[settings.passfield].validate(fields[settings.passfield])[0]
if not fields.get(settings.userfield):
raise ValueError('register_bare: userfield not provided or invalid')
user = self.get_or_create_user(fields, login=False, get=False,
update_fields=self.settings.update_fields)
if not user:
# get or create did not create a user (it ignores duplicate records)
return False
return user
def cas_login(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
version=2,
):
request = current.request
response = current.response
session = current.session
db, table = self.db, self.table_cas()
session._cas_service = request.vars.service or session._cas_service
if request.env.http_host not in self.settings.cas_domains or \
not session._cas_service:
raise HTTP(403, 'not authorized')
def allow_access(interactivelogin=False):
row = table(service=session._cas_service, user_id=self.user.id)
if row:
ticket = row.ticket
else:
ticket = 'ST-' + web2py_uuid()
table.insert(service=session._cas_service,
user_id=self.user.id,
ticket=ticket,
created_on=request.now,
renew=interactivelogin)
service = session._cas_service
query_sep = '&' if '?' in service else '?'
del session._cas_service
if 'warn' in request.vars and not interactivelogin:
response.headers[
'refresh'] = "5;URL=%s" % service + query_sep + "ticket=" + ticket
return A("Continue to %s" % service,
_href=service + query_sep + "ticket=" + ticket)
else:
redirect(service + query_sep + "ticket=" + ticket)
if self.is_logged_in() and 'renew' not in request.vars:
return allow_access()
elif not self.is_logged_in() and 'gateway' in request.vars:
redirect(session._cas_service)
def cas_onaccept(form, onaccept=onaccept):
if onaccept is not DEFAULT:
onaccept(form)
return allow_access(interactivelogin=True)
return self.login(next, onvalidation, cas_onaccept, log)
def cas_validate(self, version=2, proxy=False):
request = current.request
db, table = self.db, self.table_cas()
current.response.headers['Content-Type'] = 'text'
ticket = request.vars.ticket
renew = 'renew' in request.vars
row = table(ticket=ticket)
success = False
if row:
userfield = self.settings.login_userfield or 'username' \
if 'username' in table.fields else 'email'
# If ticket is a service Ticket and RENEW flag respected
if ticket[0:3] == 'ST-' and \
not ((row.renew and renew) ^ renew):
user = self.table_user()(row.user_id)
row.delete_record()
success = True
def build_response(body):
xml_body = to_native(TAG['cas:serviceResponse'](
body, **{'_xmlns:cas': 'http://www.yale.edu/tp/cas'}).xml())
return '<?xml version="1.0" encoding="UTF-8"?>\n' + xml_body
if success:
if version == 1:
message = 'yes\n%s' % user[userfield]
elif version == 3:
username = user.get('username', user[userfield])
message = build_response(
TAG['cas:authenticationSuccess'](
TAG['cas:user'](username),
TAG['cas:attributes'](
*[TAG['cas:' + field.name](user[field.name])
for field in self.table_user()
if field.readable])))
else: # assume version 2
username = user.get('username', user[userfield])
message = build_response(
TAG['cas:authenticationSuccess'](
TAG['cas:user'](username),
*[TAG['cas:' + field.name](user[field.name])
for field in self.table_user()
if field.readable]))
else:
if version == 1:
message = 'no\n'
elif row:
message = build_response(TAG['cas:authenticationFailure']())
else:
message = build_response(
TAG['cas:authenticationFailure'](
'Ticket %s not recognized' % ticket,
_code='INVALID TICKET'))
raise HTTP(200, message)
def _reset_two_factor_auth(self, session):
"""
When two-step authentication is enabled, this function is used to
clear the session after successfully completing second challenge
or when the maximum number of tries allowed has expired.
"""
session.auth_two_factor_user = None
session.auth_two_factor = None
session.auth_two_factor_enabled = False
# Set the number of attempts. It should be more than 1.
session.auth_two_factor_tries_left = self.settings.auth_two_factor_tries_left
def when_is_logged_in_bypass_next_in_url(self, next, session):
"""
This function should be use when someone want to avoid asking for user
credentials when loaded page contains "user/login?_next=NEXT_COMPONENT"
in the URL is refresh but user is already authenticated.
"""
if self.is_logged_in():
if next == session._auth_next:
del session._auth_next
redirect(next, client_side=self.settings.client_side)
def login(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a login form
"""
settings = self.settings
request = current.request
response = current.response
session = current.session
# use session for federated login
snext = self.get_vars_next()
if snext:
session._auth_next = snext
elif session._auth_next:
snext = session._auth_next
# pass
if next is DEFAULT:
# important for security
next = settings.login_next
if callable(next):
next = next()
user_next = snext
if user_next:
external = user_next.split('://')
if external[0].lower() in ['http', 'https', 'ftp']:
host_next = user_next.split('//', 1)[-1].split('/')[0]
if host_next in settings.cas_domains:
next = user_next
else:
next = user_next
# Avoid asking unnecessary user credentials when user is logged in
self.when_is_logged_in_bypass_next_in_url(next=next, session=session)
# Moved here to avoid unnecessary execution in case of redirection to next in case of logged in user
table_user = self.table_user()
if 'username' in table_user.fields or \
not settings.login_email_validate:
tmpvalidator = IS_NOT_EMPTY(error_message=self.messages.is_empty)
if not settings.username_case_sensitive:
tmpvalidator = [IS_LOWER(), tmpvalidator]
else:
tmpvalidator = IS_EMAIL(error_message=self.messages.invalid_email)
if not settings.email_case_sensitive:
tmpvalidator = [IS_LOWER(), tmpvalidator]
passfield = settings.password_field
try:
table_user[passfield].requires[-1].min_length = 0
except:
pass
if onvalidation is DEFAULT:
onvalidation = settings.login_onvalidation
if onaccept is DEFAULT:
onaccept = settings.login_onaccept
if log is DEFAULT:
log = self.messages['login_log']
onfail = settings.login_onfail
user = None # default
# Setup the default field used for the form
multi_login = False
if self.settings.login_userfield:
username = self.settings.login_userfield
else:
if 'username' in table_user.fields:
username = 'username'
else:
username = 'email'
if self.settings.multi_login:
multi_login = True
old_requires = table_user[username].requires
table_user[username].requires = tmpvalidator
# If two-factor authentication is enabled, and the maximum
# number of tries allowed is used up, reset the session to
# pre-login state with two-factor auth
if session.auth_two_factor_enabled and session.auth_two_factor_tries_left < 1:
# Exceeded maximum allowed tries for this code. Require user to enter
# username and password again.
user = None
accepted_form = False
self._reset_two_factor_auth(session)
# Redirect to the default 'next' page without logging
# in. If that page requires login, user will be redirected
# back to the main login form
redirect(next, client_side=settings.client_side)
# Before showing the default login form, check whether
# we are already on the second step of two-step authentication.
# If we are, then skip this login form and use the form for the
# second challenge instead.
# Note to devs: The code inside the if-block is unchanged from the
# previous version of this file, other than for indentation inside
# to put it inside the if-block
if session.auth_two_factor_user is None:
if settings.remember_me_form:
extra_fields = [
Field('remember_me', 'boolean', default=False,
label=self.messages.label_remember_me)]
else:
extra_fields = []
# do we use our own login form, or from a central source?
if settings.login_form == self:
form = SQLFORM(table_user,
fields=[username, passfield],
hidden=dict(_next=next),
showid=settings.showid,
submit_button=self.messages.login_button,
delete_label=self.messages.delete_label,
formstyle=settings.formstyle,
separator=settings.label_separator,
extra_fields=extra_fields,
)
captcha = settings.login_captcha or \
(settings.login_captcha is not False and settings.captcha)
if captcha:
addrow(form, captcha.label, captcha, captcha.comment,
settings.formstyle, 'captcha__row')
accepted_form = False
specific_error = self.messages.invalid_user
if form.accepts(request, session if self.csrf_prevention else None,
formname='login', dbio=False,
onvalidation=onvalidation,
hideerror=settings.hideerror):
accepted_form = True
# check for username in db
entered_username = form.vars[username]
if multi_login and '@' in entered_username:
# if '@' in username check for email, not username
user = table_user(email=entered_username)
else:
user = table_user(**{username: entered_username})
if user:
# user in db, check if registration pending or disabled
specific_error = self.messages.invalid_password
temp_user = user
if (temp_user.registration_key or '').startswith('pending'):
response.flash = self.messages.registration_pending
return form
elif temp_user.registration_key in ('disabled', 'blocked'):
response.flash = self.messages.login_disabled
return form
elif (temp_user.registration_key is not None and temp_user.registration_key.strip()):
response.flash = \
self.messages.registration_verifying
return form
# try alternate logins 1st as these have the
# current version of the password
user = None
for login_method in settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if self not in settings.login_methods:
# do not store password in db
form.vars[passfield] = None
user = self.get_or_create_user(
form.vars, settings.update_fields)
break
if not user:
# alternates have failed, maybe because service inaccessible
if settings.login_methods[0] == self:
# try logging in locally using cached credentials
if form.vars.get(passfield, '') == temp_user[passfield]:
# success
user = temp_user
else:
# user not in db
if not settings.alternate_requires_registration:
# we're allowed to auto-register users from external systems
for login_method in settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if self not in settings.login_methods:
# do not store password in db
form.vars[passfield] = None
user = self.get_or_create_user(
form.vars, settings.update_fields)
break
if not user:
self.log_event(self.messages['login_failed_log'],
request.post_vars)
# invalid login
session.flash = specific_error if self.settings.login_specify_error else self.messages.invalid_login
callback(onfail, None)
redirect(
self.url(args=request.args, vars=request.get_vars),
client_side=settings.client_side)
else: # use a central authentication server
cas = settings.login_form
cas_user = cas.get_user()
if cas_user:
cas_user[passfield] = None
user = self.get_or_create_user(
table_user._filter_fields(cas_user),
settings.update_fields)
elif hasattr(cas, 'login_form'):
return cas.login_form()
else:
# we need to pass through login again before going on
next = self.url(settings.function, args='login')
redirect(cas.login_url(next),
client_side=settings.client_side)
# Extra login logic for two-factor authentication
#################################################
# If the 'user' variable has a value, this means that the first
# authentication step was successful (i.e. user provided correct
# username and password at the first challenge).
# Check if this user is signed up for two-factor authentication
# If auth.settings.auth_two_factor_enabled it will enable two factor
# for all the app. Another way to anble two factor is that the user
# must be part of a group that is called auth.settings.two_factor_authentication_group
if user and self.settings.auth_two_factor_enabled is True:
session.auth_two_factor_enabled = True
elif user and self.settings.two_factor_authentication_group:
role = self.settings.two_factor_authentication_group
session.auth_two_factor_enabled = self.has_membership(user_id=user.id, role=role)
# challenge
if session.auth_two_factor_enabled:
form = SQLFORM.factory(
Field('authentication_code',
label=self.messages.label_two_factor,
required=True,
comment=self.messages.two_factor_comment),
hidden=dict(_next=next),
formstyle=settings.formstyle,
separator=settings.label_separator
)
# accepted_form is used by some default web2py code later in the
# function that handles running specified functions before redirect
# Set it to False until the challenge form is accepted.
accepted_form = False
# Handle the case when a user has submitted the login/password
# form successfully, and the password has been validated, but
# the two-factor form has not been displayed or validated yet.
if session.auth_two_factor_user is None and user is not None:
session.auth_two_factor_user = user # store the validated user and associate with this session
session.auth_two_factor = random.randint(100000, 999999)
session.auth_two_factor_tries_left = self.settings.auth_two_factor_tries_left
# Set the way we generate the code or we send the code. For example using SMS...
two_factor_methods = self.settings.two_factor_methods
if not two_factor_methods:
# TODO: Add some error checking to handle cases where email cannot be sent
self.settings.mailer.send(
to=user.email,
subject=self.messages.retrieve_two_factor_code_subject,
message=self.messages.retrieve_two_factor_code.format(session.auth_two_factor))
else:
# Check for all method. It is possible to have multiples
for two_factor_method in two_factor_methods:
try:
# By default we use session.auth_two_factor generated before.
session.auth_two_factor = two_factor_method(user, session.auth_two_factor)
except:
pass
else:
break
if form.accepts(request, session if self.csrf_prevention else None,
formname='login', dbio=False,
onvalidation=onvalidation,
hideerror=settings.hideerror):
accepted_form = True
"""
The lists is executed after form validation for each of the corresponding action.
For example, in your model:
In your models copy and paste:
# Before define tables, we add some extra field to auth_user
auth.settings.extra_fields['auth_user'] = [
Field('motp_secret', 'password', length=512, default='', label='MOTP Secret'),
Field('motp_pin', 'string', length=128, default='', label='MOTP PIN')]
OFFSET = 60 # Be sure is the same in your OTP Client
# Set session.auth_two_factor to None. Because the code is generated by external app.
# This will avoid to use the default setting and send a code by email.
def _set_two_factor(user, auth_two_factor):
return None
def verify_otp(user, otp):
import time
from hashlib import md5
epoch_time = int(time.time())
time_start = int(str(epoch_time - OFFSET)[:-1])
time_end = int(str(epoch_time + OFFSET)[:-1])
for t in range(time_start - 1, time_end + 1):
to_hash = str(t) + user.motp_secret + user.motp_pin
hash = md5(to_hash).hexdigest()[:6]
if otp == hash:
return hash
auth.settings.auth_two_factor_enabled = True
auth.messages.two_factor_comment = "Verify your OTP Client for the code."
auth.settings.two_factor_methods = [lambda user,
auth_two_factor: _set_two_factor(user, auth_two_factor)]
auth.settings.two_factor_onvalidation = [lambda user, otp: verify_otp(user, otp)]
"""
if self.settings.two_factor_onvalidation:
for two_factor_onvalidation in self.settings.two_factor_onvalidation:
try:
session.auth_two_factor = \
two_factor_onvalidation(session.auth_two_factor_user, form.vars['authentication_code'])
except:
pass
else:
break
if form.vars['authentication_code'] == str(session.auth_two_factor):
# Handle the case when the two-factor form has been successfully validated
# and the user was previously stored (the current user should be None because
# in this case, the previous username/password login form should not be displayed.
# This will allow the code after the 2-factor authentication block to proceed as
# normal.
if user is None or user == session.auth_two_factor_user:
user = session.auth_two_factor_user
# For security, because the username stored in the
# session somehow does not match the just validated
# user. Should not be possible without session stealing
# which is hard with SSL.
elif user != session.auth_two_factor_user:
user = None
# Either way, the user and code associated with this session should
# be removed. This handles cases where the session login may have
# expired but browser window is open, so the old session key and
# session usernamem will still exist
self._reset_two_factor_auth(session)
else:
session.auth_two_factor_tries_left -= 1
# If the number of retries are higher than auth_two_factor_tries_left
# Require user to enter username and password again.
if session.auth_two_factor_enabled and session.auth_two_factor_tries_left < 1:
# Exceeded maximum allowed tries for this code. Require user to enter
# username and password again.
user = None
accepted_form = False
self._reset_two_factor_auth(session)
# Redirect to the default 'next' page without logging
# in. If that page requires login, user will be redirected
# back to the main login form
redirect(next, client_side=settings.client_side)
response.flash = self.messages.invalid_two_factor_code.format(session.auth_two_factor_tries_left)
return form
else:
return form
# End login logic for two-factor authentication
# process authenticated users
if user:
user = Row(table_user._filter_fields(user, id=True))
# process authenticated users
# user wants to be logged in for longer
self.login_user(user)
session.auth.expiration = \
request.post_vars.remember_me and \
settings.long_expiration or \
settings.expiration
session.auth.remember_me = 'remember_me' in request.post_vars
#self.log_event(log, user)
session.flash = self.messages.logged_in
# how to continue
if settings.login_form == self:
if accepted_form:
callback(onaccept, form)
if next == session._auth_next:
session._auth_next = None
next = replace_id(next, form)
redirect(next, client_side=settings.client_side)
table_user[username].requires = old_requires
return form
elif user:
callback(onaccept, None)
if next == session._auth_next:
del session._auth_next
redirect(next, client_side=settings.client_side)
def logout(self, next=DEFAULT, onlogout=DEFAULT, log=DEFAULT):
"""
Logouts and redirects to login
"""
# Clear out 2-step authentication information if user logs
# out. This information is also cleared on successful login.
self._reset_two_factor_auth(current.session)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.logout_next
if onlogout is DEFAULT:
onlogout = self.settings.logout_onlogout
if onlogout:
onlogout(self.user)
if log is DEFAULT:
log = self.messages['logout_log']
if self.user:
self.log_event(log, self.user)
if self.settings.login_form != self:
cas = self.settings.login_form
cas_user = cas.get_user()
if cas_user:
next = cas.logout_url(next)
current.session.auth = None
self.user = None
if self.settings.renew_session_onlogout:
current.session.renew(clear_session=not self.settings.keep_session_onlogout)
current.session.flash = self.messages.logged_out
if next is not None:
redirect(next)
def logout_bare(self):
self.logout(next=None, onlogout=None, log=None)
def register(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a registration form
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
if self.is_logged_in():
redirect(self.settings.logged_url,
client_side=self.settings.client_side)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.register_next
if onvalidation is DEFAULT:
onvalidation = self.settings.register_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.register_onaccept
if log is DEFAULT:
log = self.messages['register_log']
table_user = self.table_user()
if self.settings.login_userfield:
username = self.settings.login_userfield
elif 'username' in table_user.fields:
username = 'username'
else:
username = 'email'
# Ensure the username field is unique.
unique_validator = IS_NOT_IN_DB(self.db, table_user[username])
if not table_user[username].requires:
table_user[username].requires = unique_validator
elif isinstance(table_user[username].requires, (list, tuple)):
if not any([isinstance(validator, IS_NOT_IN_DB) for validator in
table_user[username].requires]):
if isinstance(table_user[username].requires, list):
table_user[username].requires.append(unique_validator)
else:
table_user[username].requires += (unique_validator, )
elif not isinstance(table_user[username].requires, IS_NOT_IN_DB):
table_user[username].requires = [table_user[username].requires,
unique_validator]
passfield = self.settings.password_field
formstyle = self.settings.formstyle
try: # Make sure we have our original minimum length as other auth forms change it
table_user[passfield].requires[-1].min_length = self.settings.password_min_length
except:
pass
if self.settings.register_verify_password:
if self.settings.register_fields is None:
self.settings.register_fields = [f.name for f in table_user if f.writable and not f.compute]
k = self.settings.register_fields.index(passfield)
self.settings.register_fields.insert(k + 1, "password_two")
extra_fields = [
Field("password_two", "password",
requires=IS_EQUAL_TO(request.post_vars.get(passfield, None),
error_message=self.messages.mismatched_password),
label=current.T("Confirm Password"))]
else:
extra_fields = []
form = SQLFORM(table_user,
fields=self.settings.register_fields,
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.register_button,
delete_label=self.messages.delete_label,
formstyle=formstyle,
separator=self.settings.label_separator,
extra_fields=extra_fields
)
captcha = self.settings.register_captcha or self.settings.captcha
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
# Add a message if specified
if self.settings.pre_registration_div:
addrow(form, '',
DIV(_id="pre-reg", *self.settings.pre_registration_div),
'', formstyle, '')
key = web2py_uuid()
if self.settings.registration_requires_approval:
key = 'pending-' + key
table_user.registration_key.default = key
if form.accepts(request, session if self.csrf_prevention else None,
formname='register',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
description = self.messages.group_description % form.vars
if self.settings.create_user_groups:
group_id = self.add_group(self.settings.create_user_groups % form.vars, description)
self.add_membership(group_id, form.vars.id)
if self.settings.everybody_group_id:
self.add_membership(self.settings.everybody_group_id, form.vars.id)
if self.settings.registration_requires_verification:
link = self.url(
self.settings.function, args=('verify_email', key), scheme=True)
d = dict(form.vars)
d.update(dict(key=key, link=link, username=form.vars[username],
firstname=form.vars['firstname'],
lastname=form.vars['lastname']))
if not (self.settings.mailer and self.settings.mailer.send(
to=form.vars.email,
subject=self.messages.verify_email_subject,
message=self.messages.verify_email % d)):
self.db.rollback()
response.flash = self.messages.unable_send_email
return form
session.flash = self.messages.email_sent
if self.settings.registration_requires_approval and \
not self.settings.registration_requires_verification:
table_user[form.vars.id] = dict(registration_key='pending')
session.flash = self.messages.registration_pending
elif (not self.settings.registration_requires_verification or self.settings.login_after_registration):
if not self.settings.registration_requires_verification:
table_user[form.vars.id] = dict(registration_key='')
session.flash = self.messages.registration_successful
user = table_user(**{username: form.vars[username]})
self.login_user(user)
session.flash = self.messages.logged_in
self.log_event(log, form.vars)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def verify_email(self,
next=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Action used to verify the registration email
"""
key = getarg(-1)
table_user = self.table_user()
user = table_user(registration_key=key)
if not user:
redirect(self.settings.login_url)
if self.settings.registration_requires_approval:
user.update_record(registration_key='pending')
current.session.flash = self.messages.registration_pending
else:
user.update_record(registration_key='')
current.session.flash = self.messages.email_verified
# make sure session has same user.registrato_key as db record
if current.session.auth and current.session.auth.user:
current.session.auth.user.registration_key = user.registration_key
if log is DEFAULT:
log = self.messages['verify_email_log']
if next is DEFAULT:
next = self.settings.verify_email_next
if onaccept is DEFAULT:
onaccept = self.settings.verify_email_onaccept
self.log_event(log, user)
callback(onaccept, user)
redirect(next)
def retrieve_username(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to retrieve the user username
(only if there is a username field)
"""
table_user = self.table_user()
if 'username' not in table_user.fields:
raise HTTP(404)
request = current.request
response = current.response
session = current.session
captcha = self.settings.retrieve_username_captcha or \
(self.settings.retrieve_username_captcha is not False and self.settings.captcha)
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if next is DEFAULT:
next = self.get_vars_next() or self.settings.retrieve_username_next
if onvalidation is DEFAULT:
onvalidation = self.settings.retrieve_username_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.retrieve_username_onaccept
if log is DEFAULT:
log = self.messages['retrieve_username_log']
old_requires = table_user.email.requires
table_user.email.requires = [IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
form = SQLFORM(table_user,
fields=['email'],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
if form.accepts(request, session if self.csrf_prevention else None,
formname='retrieve_username', dbio=False,
onvalidation=onvalidation, hideerror=self.settings.hideerror):
users = table_user._db(table_user.email == form.vars.email).select()
if not users:
current.session.flash = \
self.messages.invalid_email
redirect(self.url(args=request.args))
username = ', '.join(u.username for u in users)
self.settings.mailer.send(to=form.vars.email,
subject=self.messages.retrieve_username_subject,
message=self.messages.retrieve_username % dict(username=username))
session.flash = self.messages.email_sent
for user in users:
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next)
table_user.email.requires = old_requires
return form
def random_password(self):
import string
import random
password = ''
specials = r'!#$*'
for i in range(0, 3):
password += random.choice(string.ascii_lowercase)
password += random.choice(string.ascii_uppercase)
password += random.choice(string.digits)
password += random.choice(specials)
return ''.join(random.sample(password, len(password)))
def reset_password_deprecated(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password (deprecated)
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if next is DEFAULT:
next = self.get_vars_next() or self.settings.retrieve_password_next
if onvalidation is DEFAULT:
onvalidation = self.settings.retrieve_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.retrieve_password_onaccept
if log is DEFAULT:
log = self.messages['retrieve_password_log']
old_requires = table_user.email.requires
table_user.email.requires = [IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
form = SQLFORM(table_user,
fields=['email'],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session if self.csrf_prevention else None,
formname='retrieve_password', dbio=False,
onvalidation=onvalidation, hideerror=self.settings.hideerror):
user = table_user(email=form.vars.email)
key = user.registration_key
if not user:
current.session.flash = \
self.messages.invalid_email
redirect(self.url(args=request.args))
elif key in ('pending', 'disabled', 'blocked') or (key or '').startswith('pending'):
current.session.flash = \
self.messages.registration_pending
redirect(self.url(args=request.args))
password = self.random_password()
passfield = self.settings.password_field
d = {
passfield: str(table_user[passfield].validate(password)[0]),
'registration_key': ''
}
user.update_record(**d)
if self.settings.mailer and \
self.settings.mailer.send(to=form.vars.email,
subject=self.messages.retrieve_password_subject,
message=self.messages.retrieve_password % dict(password=password)):
session.flash = self.messages.email_sent
else:
session.flash = self.messages.unable_send_email
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next)
table_user.email.requires = old_requires
return form
def confirm_registration(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to confirm user registration
"""
table_user = self.table_user()
request = current.request
# response = current.response
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.reset_password_next
if self.settings.prevent_password_reset_attacks:
key = request.vars.key
if not key and len(request.args) > 1:
key = request.args[-1]
if key:
session._reset_password_key = key
if next:
redirect_vars = {'_next': next}
else:
redirect_vars = {}
redirect(self.url(args='confirm_registration',
vars=redirect_vars))
else:
key = session._reset_password_key
else:
key = request.vars.key or getarg(-1)
try:
t0 = int(key.split('-')[0])
if time.time() - t0 > 60 * 60 * 24:
raise Exception
user = table_user(reset_password_key=key)
if not user:
raise Exception
except Exception as e:
session.flash = self.messages.invalid_reset_password
redirect(next, client_side=self.settings.client_side)
passfield = self.settings.password_field
form = SQLFORM.factory(
Field('first_name',
label='First Name',
required=True),
Field('last_name',
label='Last Name',
required=True),
Field('new_password', 'password',
label=self.messages.new_password,
requires=self.table_user()[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR('value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button='Confirm Registration',
hidden=dict(_next=next),
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.process().accepted:
user.update_record(
**{passfield: str(form.vars.new_password),
'first_name': str(form.vars.first_name),
'last_name': str(form.vars.last_name),
'registration_key': '',
'reset_password_key': ''})
session.flash = self.messages.password_changed
if self.settings.login_after_password_change:
self.login_user(user)
redirect(next, client_side=self.settings.client_side)
return form
def email_registration(self, subject, body, user):
"""
Sends and email invitation to a user informing they have been registered with the application
"""
reset_password_key = str(int(time.time())) + '-' + web2py_uuid()
link = self.url(self.settings.function,
args=('confirm_registration',), vars={'key': reset_password_key},
scheme=True)
d = dict(user)
d.update(dict(key=reset_password_key, link=link, site=current.request.env.http_host))
if self.settings.mailer and self.settings.mailer.send(
to=user.email,
subject=subject % d,
message=body % d):
user.update_record(reset_password_key=reset_password_key)
return True
return False
def bulk_register(self, max_emails=100):
"""
Creates a form for ther user to send invites to other users to join
"""
if not self.user:
redirect(self.settings.login_url)
if not self.settings.bulk_register_enabled:
return HTTP(404)
form = SQLFORM.factory(
Field('subject', 'string', default=self.messages.bulk_invite_subject, requires=IS_NOT_EMPTY()),
Field('emails', 'text', requires=IS_NOT_EMPTY()),
Field('message', 'text', default=self.messages.bulk_invite_body, requires=IS_NOT_EMPTY()),
formstyle=self.settings.formstyle)
if form.process().accepted:
emails = re.compile('[^\s\'"@<>,;:]+\@[^\s\'"@<>,;:]+').findall(form.vars.emails)
# send the invitations
emails_sent = []
emails_fail = []
emails_exist = []
for email in emails[:max_emails]:
if self.table_user()(email=email):
emails_exist.append(email)
else:
user = self.register_bare(email=email)
if self.email_registration(form.vars.subject, form.vars.message, user):
emails_sent.append(email)
else:
emails_fail.append(email)
emails_fail += emails[max_emails:]
form = DIV(H4('Emails sent'), UL(*[A(x, _href='mailto:' + x) for x in emails_sent]),
H4('Emails failed'), UL(*[A(x, _href='mailto:' + x) for x in emails_fail]),
H4('Emails existing'), UL(*[A(x, _href='mailto:' + x) for x in emails_exist]))
return form
def manage_tokens(self):
if not self.user:
redirect(self.settings.login_url)
table_token = self.table_token()
table_token.user_id.writable = False
table_token.user_id.default = self.user.id
table_token.token.writable = False
if current.request.args(1) == 'new':
table_token.token.readable = False
form = SQLFORM.grid(table_token, args=['manage_tokens'])
return form
def reset_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password
"""
table_user = self.table_user()
request = current.request
# response = current.response
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.reset_password_next
if self.settings.prevent_password_reset_attacks:
key = request.vars.key
if key:
session._reset_password_key = key
redirect(self.url(args='reset_password'))
else:
key = session._reset_password_key
else:
key = request.vars.key
try:
t0 = int(key.split('-')[0])
if time.time() - t0 > 60 * 60 * 24:
raise Exception
user = table_user(reset_password_key=key)
if not user:
raise Exception
except Exception:
session.flash = self.messages.invalid_reset_password
redirect(next, client_side=self.settings.client_side)
key = user.registration_key
if key in ('pending', 'disabled', 'blocked') or (key or '').startswith('pending'):
session.flash = self.messages.registration_pending
redirect(next, client_side=self.settings.client_side)
if onvalidation is DEFAULT:
onvalidation = self.settings.reset_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.reset_password_onaccept
passfield = self.settings.password_field
form = SQLFORM.factory(
Field('new_password', 'password',
label=self.messages.new_password,
requires=self.table_user()[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR('value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button=self.messages.password_reset_button,
hidden=dict(_next=next),
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session, onvalidation=onvalidation,
hideerror=self.settings.hideerror):
user.update_record(
**{passfield: str(form.vars.new_password),
'registration_key': '',
'reset_password_key': ''})
session.flash = self.messages.password_changed
if self.settings.login_after_password_change:
self.login_user(user)
callback(onaccept, form)
redirect(next, client_side=self.settings.client_side)
return form
def request_reset_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
captcha = self.settings.retrieve_password_captcha or \
(self.settings.retrieve_password_captcha is not False and self.settings.captcha)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.request_reset_password_next
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if onvalidation is DEFAULT:
onvalidation = self.settings.request_reset_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.request_reset_password_onaccept
if log is DEFAULT:
log = self.messages['reset_password_log']
userfield = self.settings.login_userfield or 'username' \
if self.settings.login_userfield or 'username' \
in table_user.fields else 'email'
if userfield == 'email':
table_user.email.requires = [
IS_EMAIL(error_message=self.messages.invalid_email),
IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
if not self.settings.email_case_sensitive:
table_user.email.requires.insert(0, IS_LOWER())
elif userfield == 'username':
table_user.username.requires = [
IS_IN_DB(self.db, table_user.username,
error_message=self.messages.invalid_username)]
if not self.settings.username_case_sensitive:
table_user.username.requires.insert(0, IS_LOWER())
form = SQLFORM(table_user,
fields=[userfield],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.password_reset_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
if form.accepts(request, session if self.csrf_prevention else None,
formname='reset_password', dbio=False,
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
user = table_user(**{userfield: form.vars.get(userfield)})
key = user.registration_key
if not user:
session.flash = self.messages['invalid_%s' % userfield]
redirect(self.url(args=request.args),
client_side=self.settings.client_side)
elif key in ('pending', 'disabled', 'blocked') or (key or '').startswith('pending'):
session.flash = self.messages.registration_pending
redirect(self.url(args=request.args),
client_side=self.settings.client_side)
if self.email_reset_password(user):
session.flash = self.messages.email_sent
else:
session.flash = self.messages.unable_send_email
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
# old_requires = table_user.email.requires
return form
def email_reset_password(self, user):
reset_password_key = str(int(time.time())) + '-' + web2py_uuid()
link = self.url(self.settings.function,
args=('reset_password',), vars={'key': reset_password_key},
scheme=True)
d = dict(user)
d.update(dict(key=reset_password_key, link=link))
if self.settings.mailer and self.settings.mailer.send(
to=user.email,
subject=self.messages.reset_password_subject,
message=self.messages.reset_password % d):
user.update_record(reset_password_key=reset_password_key)
return True
return False
def retrieve_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
if self.settings.reset_password_requires_verification:
return self.request_reset_password(next, onvalidation, onaccept, log)
else:
return self.reset_password_deprecated(next, onvalidation, onaccept, log)
def change_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form that lets the user change password
"""
if not self.is_logged_in():
redirect(self.settings.login_url,
client_side=self.settings.client_side)
# Go to external link to change the password
if self.settings.login_form != self:
cas = self.settings.login_form
# To prevent error if change_password_url function is not defined in alternate login
if hasattr(cas, 'change_password_url'):
next = cas.change_password_url(next)
if next is not None:
redirect(next)
db = self.db
table_user = self.table_user()
s = db(table_user.id == self.user.id)
request = current.request
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.change_password_next
if onvalidation is DEFAULT:
onvalidation = self.settings.change_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.change_password_onaccept
if log is DEFAULT:
log = self.messages['change_password_log']
passfield = self.settings.password_field
requires = table_user[passfield].requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
requires = list(filter(lambda t: isinstance(t, CRYPT), requires))
if requires:
requires[0] = CRYPT(**requires[0].__dict__) # Copy the existing CRYPT attributes
requires[0].min_length = 0 # But do not enforce minimum length for the old password
form = SQLFORM.factory(
Field('old_password', 'password', requires=requires,
label=self.messages.old_password),
Field('new_password', 'password',
label=self.messages.new_password,
requires=table_user[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR('value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button=self.messages.password_change_button,
hidden=dict(_next=next),
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session,
formname='change_password',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
current_user = s.select(limitby=(0, 1), orderby_on_limitby=False).first()
if not form.vars['old_password'] == current_user[passfield]:
form.errors['old_password'] = self.messages.invalid_password
else:
d = {passfield: str(form.vars.new_password)}
s.update(**d)
session.flash = self.messages.password_changed
self.log_event(log, self.user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def profile(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form that lets the user change his/her profile
"""
table_user = self.table_user()
if not self.is_logged_in():
redirect(self.settings.login_url,
client_side=self.settings.client_side)
passfield = self.settings.password_field
table_user[passfield].writable = False
table_user['email'].writable = False
request = current.request
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.profile_next
if onvalidation is DEFAULT:
onvalidation = self.settings.profile_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.profile_onaccept
if log is DEFAULT:
log = self.messages['profile_log']
form = SQLFORM(
table_user,
self.user.id,
fields=self.settings.profile_fields,
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.profile_save_button,
delete_label=self.messages.delete_label,
upload=self.settings.download_url,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator,
deletable=self.settings.allow_delete_accounts,
)
if form.accepts(request, session,
formname='profile',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
extra_fields = self.settings.extra_fields.get(self.settings.table_user_name, [])
if any(f.compute for f in extra_fields):
user = table_user[self.user.id]
self._update_session_user(user)
self.update_groups()
else:
self.user.update(table_user._filter_fields(form.vars))
session.flash = self.messages.profile_updated
self.log_event(log, self.user)
callback(onaccept, form)
if form.deleted:
return self.logout()
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def run_login_onaccept(self):
onaccept = self.settings.login_onaccept
if onaccept:
form = Storage(dict(vars=self.user))
if not isinstance(onaccept, (list, tuple)):
onaccept = [onaccept]
for callback in onaccept:
callback(form)
def jwt(self):
"""
To use JWT authentication:
1) instantiate auth with::
auth = Auth(db, jwt = {'secret_key':'secret'})
where 'secret' is your own secret string.
2) Decorate functions that require login but should accept the JWT token credentials::
@auth.allows_jwt()
@auth.requires_login()
def myapi(): return 'hello %s' % auth.user.email
Notice jwt is allowed but not required. if user is logged in, myapi is accessible.
3) Use it!
Now API users can obtain a token with
http://.../app/default/user/jwt?username=...&password=....
(returns json object with a token attribute)
API users can refresh an existing token with
http://.../app/default/user/jwt?token=...
they can authenticate themselves when calling http:/.../myapi by injecting a header
Authorization: Bearer <the jwt token>
Any additional attributes in the jwt argument of Auth() below::
auth = Auth(db, jwt = {...})
are passed to the constructor of class AuthJWT. Look there for documentation.
"""
if not self.jwt_handler:
raise HTTP(400, "Not authorized")
else:
rtn = self.jwt_handler.jwt_token_manager()
raise HTTP(200, rtn, cookies=None, **current.response.headers)
def is_impersonating(self):
return self.is_logged_in() and 'impersonator' in current.session.auth
def impersonate(self, user_id=DEFAULT):
"""
To use this make a POST to
`http://..../impersonate request.post_vars.user_id=<id>`
Set request.post_vars.user_id to 0 to restore original user.
requires impersonator is logged in and::
has_permission('impersonate', 'auth_user', user_id)
"""
request = current.request
session = current.session
auth = session.auth
table_user = self.table_user()
if not self.is_logged_in():
raise HTTP(401, "Not Authorized")
current_id = auth.user.id
requested_id = user_id
user = None
if user_id is DEFAULT:
user_id = current.request.post_vars.user_id
if user_id and user_id != self.user.id and user_id != '0':
if not self.has_permission('impersonate',
self.table_user(),
user_id):
raise HTTP(403, "Forbidden")
user = table_user(user_id)
if not user:
raise HTTP(401, "Not Authorized")
auth.impersonator = pickle.dumps(session, pickle.HIGHEST_PROTOCOL)
auth.user.update(
table_user._filter_fields(user, True))
self.user = auth.user
self.update_groups()
log = self.messages['impersonate_log']
self.log_event(log, dict(id=current_id, other_id=auth.user.id))
self.run_login_onaccept()
elif user_id in (0, '0'):
if self.is_impersonating():
session.clear()
session.update(pickle.loads(auth.impersonator))
self.user = session.auth.user
self.update_groups()
self.run_login_onaccept()
return None
if requested_id is DEFAULT and not request.post_vars:
return SQLFORM.factory(Field('user_id', 'integer'))
elif not user:
return None
else:
return SQLFORM(table_user, user.id, readonly=True)
def groups(self):
"""
Displays the groups and their roles for the logged in user
"""
if not self.is_logged_in():
redirect(self.settings.login_url)
table_membership = self.table_membership()
memberships = self.db(
table_membership.user_id == self.user.id).select()
table = TABLE()
for membership in memberships:
table_group = self.table_group()
groups = self.db(table_group.id == membership.group_id).select()
if groups:
group = groups[0]
table.append(TR(H3(group.role, '(%s)' % group.id)))
table.append(TR(P(group.description)))
if not memberships:
return None
return table
def not_authorized(self):
"""
You can change the view for this page to make it look as you like
"""
if current.request.ajax:
raise HTTP(403, 'ACCESS DENIED')
return self.messages.access_denied
def allows_jwt(self, otherwise=None):
if not self.jwt_handler:
raise HTTP(400, "Not authorized")
else:
return self.jwt_handler.allows_jwt(otherwise=otherwise)
def requires(self, condition, requires_login=True, otherwise=None):
"""
Decorator that prevents access to action if not logged in
"""
def decorator(action):
def f(*a, **b):
basic_allowed, basic_accepted, user = self.basic()
user = user or self.user
login_required = requires_login
if callable(login_required):
login_required = login_required()
if login_required:
if not user:
if current.request.ajax:
raise HTTP(401, self.messages.ajax_failed_authentication)
elif otherwise is not None:
if callable(otherwise):
return otherwise()
redirect(otherwise)
elif self.settings.allow_basic_login_only or \
basic_accepted or current.request.is_restful:
raise HTTP(403, "Not authorized")
else:
next = self.here()
current.session.flash = current.response.flash
return call_or_redirect(self.settings.on_failed_authentication,
self.settings.login_url + '?_next=' + urllib_quote(next))
if callable(condition):
flag = condition()
else:
flag = condition
if not flag:
current.session.flash = self.messages.access_denied
return call_or_redirect(
self.settings.on_failed_authorization)
return action(*a, **b)
f.__doc__ = action.__doc__
f.__name__ = action.__name__
f.__dict__.update(action.__dict__)
return f
return decorator
def requires_login(self, otherwise=None):
"""
Decorator that prevents access to action if not logged in
"""
return self.requires(True, otherwise=otherwise)
def requires_login_or_token(self, otherwise=None):
if self.settings.enable_tokens is True:
user = None
request = current.request
token = request.env.http_web2py_user_token or request.vars._token
table_token = self.table_token()
table_user = self.table_user()
from gluon.settings import global_settings
if global_settings.web2py_runtime_gae:
row = table_token(token=token)
if row:
user = table_user(row.user_id)
else:
row = self.db(table_token.token == token)(table_user.id == table_token.user_id).select().first()
if row:
user = row[table_user._tablename]
if user:
self.login_user(user)
return self.requires(True, otherwise=otherwise)
def requires_membership(self, role=None, group_id=None, otherwise=None):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of group_id.
If role is provided instead of group_id then the
group_id is calculated.
"""
def has_membership(self=self, group_id=group_id, role=role):
return self.has_membership(group_id=group_id, role=role)
return self.requires(has_membership, otherwise=otherwise)
def requires_permission(self, name, table_name='', record_id=0,
otherwise=None):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of any group (role) that
has 'name' access to 'table_name', 'record_id'.
"""
def has_permission(self=self, name=name, table_name=table_name, record_id=record_id):
return self.has_permission(name, table_name, record_id)
return self.requires(has_permission, otherwise=otherwise)
def requires_signature(self, otherwise=None, hash_vars=True):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of group_id.
If role is provided instead of group_id then the
group_id is calculated.
"""
def verify():
return URL.verify(current.request, user_signature=True, hash_vars=hash_vars)
return self.requires(verify, otherwise)
def accessible_query(self, name, table, user_id=None):
"""
Returns a query with all accessible records for user_id or
the current logged in user
this method does not work on GAE because uses JOIN and IN
Example:
Use as::
db(auth.accessible_query('read', db.mytable)).select(db.mytable.ALL)
"""
if not user_id:
user_id = self.user_id
db = self.db
if isinstance(table, str) and table in self.db.tables():
table = self.db[table]
elif isinstance(table, (Set, Query)):
# experimental: build a chained query for all tables
if isinstance(table, Set):
cquery = table.query
else:
cquery = table
tablenames = db._adapter.tables(cquery)
for tablename in tablenames:
cquery &= self.accessible_query(name, tablename, user_id=user_id)
return cquery
if not isinstance(table, str) and \
self.has_permission(name, table, 0, user_id):
return table.id > 0
membership = self.table_membership()
permission = self.table_permission()
query = table.id.belongs(
db(membership.user_id == user_id)
(membership.group_id == permission.group_id)
(permission.name == name)
(permission.table_name == table)
._select(permission.record_id))
if self.settings.everybody_group_id:
query |= table.id.belongs(
db(permission.group_id == self.settings.everybody_group_id)
(permission.name == name)
(permission.table_name == table)
._select(permission.record_id))
return query
@staticmethod
def archive(form,
archive_table=None,
current_record='current_record',
archive_current=False,
fields=None):
"""
If you have a table (db.mytable) that needs full revision history you
can just do::
form = crud.update(db.mytable, myrecord, onaccept=auth.archive)
or::
form = SQLFORM(db.mytable, myrecord).process(onaccept=auth.archive)
crud.archive will define a new table "mytable_archive" and store
a copy of the current record (if archive_current=True)
or a copy of the previous record (if archive_current=False)
in the newly created table including a reference
to the current record.
fields allows to specify extra fields that need to be archived.
If you want to access such table you need to define it yourself
in a model::
db.define_table('mytable_archive',
Field('current_record', db.mytable),
db.mytable)
Notice such table includes all fields of db.mytable plus one: current_record.
crud.archive does not timestamp the stored record unless your original table
has a fields like::
db.define_table(...,
Field('saved_on', 'datetime',
default=request.now, update=request.now, writable=False),
Field('saved_by', auth.user,
default=auth.user_id, update=auth.user_id, writable=False),
there is nothing special about these fields since they are filled before
the record is archived.
If you want to change the archive table name and the name of the reference field
you can do, for example::
db.define_table('myhistory',
Field('parent_record', db.mytable), db.mytable)
and use it as::
form = crud.update(db.mytable, myrecord,
onaccept=lambda form:crud.archive(form,
archive_table=db.myhistory,
current_record='parent_record'))
"""
if not archive_current and not form.record:
return None
table = form.table
if not archive_table:
archive_table_name = '%s_archive' % table
if archive_table_name not in table._db:
table._db.define_table(
archive_table_name,
Field(current_record, table),
*[field.clone(unique=False) for field in table])
archive_table = table._db[archive_table_name]
new_record = {current_record: form.vars.id}
for fieldname in archive_table.fields:
if fieldname not in ['id', current_record]:
if archive_current and fieldname in form.vars:
new_record[fieldname] = form.vars[fieldname]
elif form.record and fieldname in form.record:
new_record[fieldname] = form.record[fieldname]
if fields:
new_record.update(fields)
id = archive_table.insert(**new_record)
return id
def wiki(self,
slug=None,
env=None,
render='markmin',
manage_permissions=False,
force_prefix='',
restrict_search=False,
resolve=True,
extra=None,
menu_groups=None,
templates=None,
migrate=True,
controller=None,
function=None,
force_render=False,
groups=None):
if controller and function:
resolve = False
if not hasattr(self, '_wiki'):
self._wiki = Wiki(self, render=render,
manage_permissions=manage_permissions,
force_prefix=force_prefix,
restrict_search=restrict_search,
env=env, extra=extra or {},
menu_groups=menu_groups,
templates=templates,
migrate=migrate,
controller=controller,
function=function,
groups=groups)
else:
self._wiki.settings.extra = extra or {}
self._wiki.env.update(env or {})
# if resolve is set to True, process request as wiki call
# resolve=False allows initial setup without wiki redirection
wiki = None
if resolve:
if slug:
wiki = self._wiki.read(slug, force_render)
if isinstance(wiki, dict) and 'content' in wiki:
# We don't want to return a dict object, just the wiki
wiki = wiki['content']
else:
wiki = self._wiki()
if isinstance(wiki, basestring):
wiki = XML(wiki)
return wiki
def wikimenu(self):
"""To be used in menu.py for app wide wiki menus"""
if (hasattr(self, "_wiki") and
self._wiki.settings.controller and
self._wiki.settings.function):
self._wiki.automenu()
class Crud(object): # pragma: no cover
default_messages = dict(
submit_button='Submit',
delete_label='Check to delete',
record_created='Record Created',
record_updated='Record Updated',
record_deleted='Record Deleted',
update_log='Record %(id)s updated',
create_log='Record %(id)s created',
read_log='Record %(id)s read',
delete_log='Record %(id)s deleted',
)
def url(self, f=None, args=None, vars=None):
"""
This should point to the controller that exposes
download and crud
"""
if args is None:
args = []
if vars is None:
vars = {}
return URL(c=self.settings.controller, f=f, args=args, vars=vars)
def __init__(self, environment, db=None, controller='default'):
self.db = db
if not db and environment and isinstance(environment, DAL):
self.db = environment
elif not db:
raise SyntaxError("must pass db as first or second argument")
self.environment = current
settings = self.settings = Settings()
settings.auth = None
settings.logger = None
settings.create_next = None
settings.update_next = None
settings.controller = controller
settings.delete_next = self.url()
settings.download_url = self.url('download')
settings.create_onvalidation = StorageList()
settings.update_onvalidation = StorageList()
settings.delete_onvalidation = StorageList()
settings.create_onaccept = StorageList()
settings.update_onaccept = StorageList()
settings.update_ondelete = StorageList()
settings.delete_onaccept = StorageList()
settings.update_deletable = True
settings.showid = False
settings.keepvalues = False
settings.create_captcha = None
settings.update_captcha = None
settings.captcha = None
settings.formstyle = 'table3cols'
settings.label_separator = ': '
settings.hideerror = False
settings.detect_record_change = True
settings.hmac_key = None
settings.lock_keys = True
messages = self.messages = Messages(current.T)
messages.update(Crud.default_messages)
messages.lock_keys = True
def __call__(self):
args = current.request.args
if len(args) < 1:
raise HTTP(404)
elif args[0] == 'tables':
return self.tables()
elif len(args) > 1 and not args(1) in self.db.tables:
raise HTTP(404)
table = self.db[args(1)]
if args[0] == 'create':
return self.create(table)
elif args[0] == 'select':
return self.select(table, linkto=self.url(args='read'))
elif args[0] == 'search':
form, rows = self.search(table, linkto=self.url(args='read'))
return DIV(form, SQLTABLE(rows))
elif args[0] == 'read':
return self.read(table, args(2))
elif args[0] == 'update':
return self.update(table, args(2))
elif args[0] == 'delete':
return self.delete(table, args(2))
else:
raise HTTP(404)
def log_event(self, message, vars):
if self.settings.logger:
self.settings.logger.log_event(message, vars, origin='crud')
def has_permission(self, name, table, record=0):
if not self.settings.auth:
return True
try:
record_id = record.id
except:
record_id = record
return self.settings.auth.has_permission(name, str(table), record_id)
def tables(self):
return TABLE(*[TR(A(name,
_href=self.url(args=('select', name))))
for name in self.db.tables])
@staticmethod
def archive(form, archive_table=None, current_record='current_record'):
return Auth.archive(form, archive_table=archive_table,
current_record=current_record)
def update(self,
table,
record,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
ondelete=DEFAULT,
log=DEFAULT,
message=DEFAULT,
deletable=DEFAULT,
formname=DEFAULT,
**attributes
):
if not (isinstance(table, Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
try:
record_id = record.id
except:
record_id = record or 0
if record_id and not self.has_permission('update', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
if not record_id and not self.has_permission('create', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
request = current.request
response = current.response
session = current.session
if request.extension == 'json' and request.vars.json:
request.vars.update(json.loads(request.vars.json))
if next is DEFAULT:
next = request.get_vars._next \
or request.post_vars._next \
or self.settings.update_next
if onvalidation is DEFAULT:
onvalidation = self.settings.update_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.update_onaccept
if ondelete is DEFAULT:
ondelete = self.settings.update_ondelete
if log is DEFAULT:
log = self.messages['update_log']
if deletable is DEFAULT:
deletable = self.settings.update_deletable
if message is DEFAULT:
message = self.messages.record_updated
if 'hidden' not in attributes:
attributes['hidden'] = {}
attributes['hidden']['_next'] = next
form = SQLFORM(
table,
record,
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
deletable=deletable,
upload=self.settings.download_url,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator,
**attributes # contains hidden
)
self.accepted = False
self.deleted = False
captcha = self.settings.update_captcha or self.settings.captcha
if record and captcha:
addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row')
captcha = self.settings.create_captcha or self.settings.captcha
if not record and captcha:
addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row')
if request.extension not in ('html', 'load'):
(_session, _formname) = (None, None)
else:
(_session, _formname) = (
session, '%s/%s' % (table._tablename, form.record_id))
if formname is not DEFAULT:
_formname = formname
keepvalues = self.settings.keepvalues
if request.vars.delete_this_record:
keepvalues = False
if isinstance(onvalidation, StorageList):
onvalidation = onvalidation.get(table._tablename, [])
if form.accepts(request, _session, formname=_formname,
onvalidation=onvalidation, keepvalues=keepvalues,
hideerror=self.settings.hideerror,
detect_record_change=self.settings.detect_record_change):
self.accepted = True
response.flash = message
if log:
self.log_event(log, form.vars)
if request.vars.delete_this_record:
self.deleted = True
message = self.messages.record_deleted
callback(ondelete, form, table._tablename)
response.flash = message
callback(onaccept, form, table._tablename)
if request.extension not in ('html', 'load'):
raise HTTP(200, 'RECORD CREATED/UPDATED')
if isinstance(next, (list, tuple)): # fix issue with 2.6
next = next[0]
if next: # Only redirect when explicit
next = replace_id(next, form)
session.flash = response.flash
redirect(next)
elif request.extension not in ('html', 'load'):
raise HTTP(401, serializers.json(dict(errors=form.errors)))
return form
def create(self,
table,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
message=DEFAULT,
formname=DEFAULT,
**attributes
):
if next is DEFAULT:
next = self.settings.create_next
if onvalidation is DEFAULT:
onvalidation = self.settings.create_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.create_onaccept
if log is DEFAULT:
log = self.messages['create_log']
if message is DEFAULT:
message = self.messages.record_created
return self.update(table,
None,
next=next,
onvalidation=onvalidation,
onaccept=onaccept,
log=log,
message=message,
deletable=False,
formname=formname,
**attributes
)
def read(self, table, record):
if not (isinstance(table, Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
if not self.has_permission('read', table, record):
redirect(self.settings.auth.settings.on_failed_authorization)
form = SQLFORM(
table,
record,
readonly=True,
comments=False,
upload=self.settings.download_url,
showid=self.settings.showid,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if current.request.extension not in ('html', 'load'):
return table._filter_fields(form.record, id=True)
return form
def delete(self,
table,
record_id,
next=DEFAULT,
message=DEFAULT,
):
if not (isinstance(table, Table) or table in self.db.tables):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
if not self.has_permission('delete', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
request = current.request
session = current.session
if next is DEFAULT:
next = request.get_vars._next \
or request.post_vars._next \
or self.settings.delete_next
if message is DEFAULT:
message = self.messages.record_deleted
record = table[record_id]
if record:
callback(self.settings.delete_onvalidation, record)
del table[record_id]
callback(self.settings.delete_onaccept, record, table._tablename)
session.flash = message
redirect(next)
def rows(self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
):
if not (isinstance(table, Table) or table in self.db.tables):
raise HTTP(404)
if not self.has_permission('select', table):
redirect(self.settings.auth.settings.on_failed_authorization)
# if record_id and not self.has_permission('select', table):
# redirect(self.settings.auth.settings.on_failed_authorization)
if not isinstance(table, Table):
table = self.db[table]
if not query:
query = table.id > 0
if not fields:
fields = [field for field in table if field.readable]
else:
fields = [table[f] if isinstance(f, str) else f for f in fields]
rows = self.db(query).select(*fields, **dict(orderby=orderby,
limitby=limitby))
return rows
def select(self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
headers=None,
**attr
):
headers = headers or {}
rows = self.rows(table, query, fields, orderby, limitby)
if not rows:
return None # Nicer than an empty table.
if 'upload' not in attr:
attr['upload'] = self.url('download')
if current.request.extension not in ('html', 'load'):
return rows.as_list()
if not headers:
if isinstance(table, str):
table = self.db[table]
headers = dict((str(k), k.label) for k in table)
return SQLTABLE(rows, headers=headers, **attr)
def get_format(self, field):
rtable = field._db[field.type[10:]]
format = rtable.get('_format', None)
if format and isinstance(format, str):
return format[2:-2]
return field.name
def get_query(self, field, op, value, refsearch=False):
try:
if refsearch:
format = self.get_format(field)
if op == 'equals':
if not refsearch:
return field == value
else:
return lambda row: row[field.name][format] == value
elif op == 'not equal':
if not refsearch:
return field != value
else:
return lambda row: row[field.name][format] != value
elif op == 'greater than':
if not refsearch:
return field > value
else:
return lambda row: row[field.name][format] > value
elif op == 'less than':
if not refsearch:
return field < value
else:
return lambda row: row[field.name][format] < value
elif op == 'starts with':
if not refsearch:
return field.like(value + '%')
else:
return lambda row: str(row[field.name][format]).startswith(value)
elif op == 'ends with':
if not refsearch:
return field.like('%' + value)
else:
return lambda row: str(row[field.name][format]).endswith(value)
elif op == 'contains':
if not refsearch:
return field.like('%' + value + '%')
else:
return lambda row: value in row[field.name][format]
except:
return None
def search(self, *tables, **args):
"""
Creates a search form and its results for a table
Examples:
Use as::
form, results = crud.search(db.test,
queries = ['equals', 'not equal', 'contains'],
query_labels={'equals':'Equals',
'not equal':'Not equal'},
fields = ['id','children'],
field_labels = {
'id':'ID','children':'Children'},
zero='Please choose',
query = (db.test.id > 0)&(db.test.id != 3) )
"""
table = tables[0]
fields = args.get('fields', table.fields)
validate = args.get('validate', True)
request = current.request
db = self.db
if not (isinstance(table, Table) or table in db.tables):
raise HTTP(404)
attributes = {}
for key in ('orderby', 'groupby', 'left', 'distinct', 'limitby', 'cache'):
if key in args:
attributes[key] = args[key]
tbl = TABLE()
selected = []
refsearch = []
results = []
showall = args.get('showall', False)
if showall:
selected = fields
chkall = args.get('chkall', False)
if chkall:
for f in fields:
request.vars['chk%s' % f] = 'on'
ops = args.get('queries', [])
zero = args.get('zero', '')
if not ops:
ops = ['equals', 'not equal', 'greater than',
'less than', 'starts with',
'ends with', 'contains']
ops.insert(0, zero)
query_labels = args.get('query_labels', {})
query = args.get('query', table.id > 0)
field_labels = args.get('field_labels', {})
for field in fields:
field = table[field]
if not field.readable:
continue
fieldname = field.name
chkval = request.vars.get('chk' + fieldname, None)
txtval = request.vars.get('txt' + fieldname, None)
opval = request.vars.get('op' + fieldname, None)
row = TR(TD(INPUT(_type="checkbox", _name="chk" + fieldname,
_disabled=(field.type == 'id'),
value=(field.type == 'id' or chkval == 'on'))),
TD(field_labels.get(fieldname, field.label)),
TD(SELECT([OPTION(query_labels.get(op, op),
_value=op) for op in ops],
_name="op" + fieldname,
value=opval)),
TD(INPUT(_type="text", _name="txt" + fieldname,
_value=txtval, _id='txt' + fieldname,
_class=str(field.type))))
tbl.append(row)
if request.post_vars and (chkval or field.type == 'id'):
if txtval and opval != '':
if field.type[0:10] == 'reference ':
refsearch.append(self.get_query(field, opval, txtval, refsearch=True))
elif validate:
value, error = field.validate(txtval)
if not error:
# TODO deal with 'starts with', 'ends with', 'contains' on GAE
query &= self.get_query(field, opval, value)
else:
row[3].append(DIV(error, _class='error'))
else:
query &= self.get_query(field, opval, txtval)
selected.append(field)
form = FORM(tbl, INPUT(_type="submit"))
if selected:
try:
results = db(query).select(*selected, **attributes)
for r in refsearch:
results = results.find(r)
except: # TODO: hmmm, we should do better here
results = None
return form, results
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor()))
def fetch(url, data=None, headers=None,
cookie=Cookie.SimpleCookie(),
user_agent='Mozilla/5.0'):
headers = headers or {}
if data is not None:
data = urlencode(data)
if user_agent:
headers['User-agent'] = user_agent
headers['Cookie'] = ' '.join(
['%s=%s;' % (c.key, c.value) for c in cookie.values()])
try:
from google.appengine.api import urlfetch
except ImportError:
req = urllib2.Request(url, data, headers)
html = urllib2.urlopen(req).read()
else:
method = ((data is None) and urlfetch.GET) or urlfetch.POST
while url is not None:
response = urlfetch.fetch(url=url, payload=data,
method=method, headers=headers,
allow_truncated=False, follow_redirects=False,
deadline=10)
# next request will be a get, so no need to send the data again
data = None
method = urlfetch.GET
# load cookies from the response
cookie.load(response.headers.get('set-cookie', ''))
url = response.headers.get('location')
html = response.content
return html
regex_geocode = \
re.compile(r"""<geometry>[\W]*?<location>[\W]*?<lat>(?P<la>[^<]*)</lat>[\W]*?<lng>(?P<lo>[^<]*)</lng>[\W]*?</location>""")
def geocode(address):
try:
a = urllib_quote(address)
txt = fetch('http://maps.googleapis.com/maps/api/geocode/xml?sensor=false&address=%s' % a)
item = regex_geocode.search(txt)
(la, lo) = (float(item.group('la')), float(item.group('lo')))
return (la, lo)
except:
return (0.0, 0.0)
def reverse_geocode(lat, lng, lang=None):
""" Try to get an approximate address for a given latitude, longitude. """
if not lang:
lang = current.T.accepted_language
try:
return json.loads(fetch('http://maps.googleapis.com/maps/api/geocode/json?latlng=%(lat)s,%(lng)s&language=%(lang)s' % locals()))['results'][0]['formatted_address']
except:
return ''
def universal_caller(f, *a, **b):
c = f.__code__.co_argcount
n = f.__code__.co_varnames[:c]
defaults = f.__defaults__ or []
pos_args = n[0:-len(defaults)]
named_args = n[-len(defaults):]
arg_dict = {}
# Fill the arg_dict with name and value for the submitted, positional values
for pos_index, pos_val in enumerate(a[:c]):
arg_dict[n[pos_index]] = pos_val # n[pos_index] is the name of the argument
# There might be pos_args left, that are sent as named_values. Gather them as well.
# If a argument already is populated with values we simply replaces them.
for arg_name in pos_args[len(arg_dict):]:
if arg_name in b:
arg_dict[arg_name] = b[arg_name]
if len(arg_dict) >= len(pos_args):
# All the positional arguments is found. The function may now be called.
# However, we need to update the arg_dict with the values from the named arguments as well.
for arg_name in named_args:
if arg_name in b:
arg_dict[arg_name] = b[arg_name]
return f(**arg_dict)
# Raise an error, the function cannot be called.
raise HTTP(404, "Object does not exist")
class Service(object):
def __init__(self, environment=None, check_args=False):
self.check_args = check_args
self.run_procedures = {}
self.csv_procedures = {}
self.xml_procedures = {}
self.rss_procedures = {}
self.json_procedures = {}
self.jsonrpc_procedures = {}
self.jsonrpc2_procedures = {}
self.xmlrpc_procedures = {}
self.amfrpc_procedures = {}
self.amfrpc3_procedures = {}
self.soap_procedures = {}
def run(self, f):
"""
Example:
Use as::
service = Service()
@service.run
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/run/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def csv(self, f):
"""
Example:
Use as::
service = Service()
@service.csv
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/csv/myfunction?a=3&b=4
"""
self.csv_procedures[f.__name__] = f
return f
def xml(self, f):
"""
Example:
Use as::
service = Service()
@service.xml
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/xml/myfunction?a=3&b=4
"""
self.xml_procedures[f.__name__] = f
return f
def rss(self, f):
"""
Example:
Use as::
service = Service()
@service.rss
def myfunction():
return dict(title=..., link=..., description=...,
created_on=..., entries=[dict(title=..., link=...,
description=..., created_on=...])
def call():
return service()
Then call it with:
wget http://..../app/default/call/rss/myfunction
"""
self.rss_procedures[f.__name__] = f
return f
def json(self, f):
"""
Example:
Use as::
service = Service()
@service.json
def myfunction(a, b):
return [{a: b}]
def call():
return service()
Then call it with:;
wget http://..../app/default/call/json/myfunction?a=hello&b=world
"""
self.json_procedures[f.__name__] = f
return f
def jsonrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.jsonrpc
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/jsonrpc/myfunction?a=hello&b=world
"""
self.jsonrpc_procedures[f.__name__] = f
return f
def jsonrpc2(self, f):
"""
Example:
Use as::
service = Service()
@service.jsonrpc2
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget --post-data '{"jsonrpc": "2.0",
"id": 1,
"method": "myfunction",
"params": {"a": 1, "b": 2}}' http://..../app/default/call/jsonrpc2
"""
self.jsonrpc2_procedures[f.__name__] = f
return f
def xmlrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.xmlrpc
def myfunction(a, b):
return a + b
def call():
return service()
The call it with:
wget http://..../app/default/call/xmlrpc/myfunction?a=hello&b=world
"""
self.xmlrpc_procedures[f.__name__] = f
return f
def amfrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.amfrpc
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/amfrpc/myfunction?a=hello&b=world
"""
self.amfrpc_procedures[f.__name__] = f
return f
def amfrpc3(self, domain='default'):
"""
Example:
Use as::
service = Service()
@service.amfrpc3('domain')
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/amfrpc3/myfunction?a=hello&b=world
"""
if not isinstance(domain, str):
raise SyntaxError("AMF3 requires a domain for function")
def _amfrpc3(f):
if domain:
self.amfrpc3_procedures[domain + '.' + f.__name__] = f
else:
self.amfrpc3_procedures[f.__name__] = f
return f
return _amfrpc3
def soap(self, name=None, returns=None, args=None, doc=None, response_element_name=None):
"""
Example:
Use as::
service = Service()
@service.soap('MyFunction',returns={'result':int},args={'a':int,'b':int,})
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
from gluon.contrib.pysimplesoap.client import SoapClient
client = SoapClient(wsdl="http://..../app/default/call/soap?WSDL")
response = client.MyFunction(a=1,b=2)
return response['result']
It also exposes online generated documentation and xml example messages
at `http://..../app/default/call/soap`
"""
def _soap(f):
self.soap_procedures[name or f.__name__] = f, returns, args, doc, response_element_name
return f
return _soap
def serve_run(self, args=None):
request = current.request
if not args:
args = request.args
if args and args[0] in self.run_procedures:
return str(self.call_service_function(self.run_procedures[args[0]],
*args[1:], **dict(request.vars)))
self.error()
def serve_csv(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'text/x-csv'
if not args:
args = request.args
def none_exception(value):
if isinstance(value, unicodeT):
return value.encode('utf8')
if hasattr(value, 'isoformat'):
return value.isoformat()[:19].replace('T', ' ')
if value is None:
return '<NULL>'
return value
if args and args[0] in self.csv_procedures:
import types
r = self.call_service_function(self.csv_procedures[args[0]],
*args[1:], **dict(request.vars))
s = StringIO()
if hasattr(r, 'export_to_csv_file'):
r.export_to_csv_file(s)
elif r and not isinstance(r, types.GeneratorType) and isinstance(r[0], (dict, Storage)):
import csv
writer = csv.writer(s)
writer.writerow(r[0].keys())
for line in r:
writer.writerow([none_exception(v)
for v in line.values()])
else:
import csv
writer = csv.writer(s)
for line in r:
writer.writerow(line)
return s.getvalue()
self.error()
def serve_xml(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'text/xml'
if not args:
args = request.args
if args and args[0] in self.xml_procedures:
s = self.call_service_function(self.xml_procedures[args[0]],
*args[1:], **dict(request.vars))
if hasattr(s, 'as_list'):
s = s.as_list()
return serializers.xml(s, quote=False)
self.error()
def serve_rss(self, args=None):
request = current.request
response = current.response
if not args:
args = request.args
if args and args[0] in self.rss_procedures:
feed = self.call_service_function(self.rss_procedures[args[0]],
*args[1:], **dict(request.vars))
else:
self.error()
response.headers['Content-Type'] = 'application/rss+xml'
return serializers.rss(feed)
def serve_json(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'application/json; charset=utf-8'
if not args:
args = request.args
d = dict(request.vars)
if args and args[0] in self.json_procedures:
s = self.call_service_function(self.json_procedures[args[0]], *args[1:], **d)
if hasattr(s, 'as_list'):
s = s.as_list()
return response.json(s)
self.error()
class JsonRpcException(Exception):
def __init__(self, code, info):
jrpc_error = Service.jsonrpc_errors.get(code)
if jrpc_error:
self.message, self.description = jrpc_error
self.code, self.info = code, info
# jsonrpc 2.0 error types. records the following structure {code: (message,meaning)}
jsonrpc_errors = {
-32700: ("Parse error. Invalid JSON was received by the server.",
"An error occurred on the server while parsing the JSON text."),
-32600: ("Invalid Request", "The JSON sent is not a valid Request object."),
-32601: ("Method not found", "The method does not exist / is not available."),
-32602: ("Invalid params", "Invalid method parameter(s)."),
-32603: ("Internal error", "Internal JSON-RPC error."),
-32099: ("Server error", "Reserved for implementation-defined server-errors.")}
def serve_jsonrpc(self):
def return_response(id, result):
return serializers.json({'version': '1.1', 'id': id, 'result': result, 'error': None})
def return_error(id, code, message, data=None):
error = {'name': 'JSONRPCError',
'code': code, 'message': message}
if data is not None:
error['data'] = data
return serializers.json({'id': id,
'version': '1.1',
'error': error,
})
request = current.request
response = current.response
response.headers['Content-Type'] = 'application/json; charset=utf-8'
methods = self.jsonrpc_procedures
data = json.loads(request.body.read())
jsonrpc_2 = data.get('jsonrpc')
if jsonrpc_2: # hand over to version 2 of the protocol
return self.serve_jsonrpc2(data)
id, method, params = data.get('id'), data.get('method'), data.get('params', [])
if id is None:
return return_error(0, 100, 'missing id')
if method not in methods:
return return_error(id, 100, 'method "%s" does not exist' % method)
try:
if isinstance(params, dict):
s = methods[method](**params)
else:
s = methods[method](*params)
if hasattr(s, 'as_list'):
s = s.as_list()
return return_response(id, s)
except Service.JsonRpcException as e:
return return_error(id, e.code, e.info)
except:
etype, eval, etb = sys.exc_info()
message = '%s: %s' % (etype.__name__, eval)
data = request.is_local and traceback.format_tb(etb)
logger.warning('jsonrpc exception %s\n%s' % (message, traceback.format_tb(etb)))
return return_error(id, 100, message, data)
def serve_jsonrpc2(self, data=None, batch_element=False):
def return_response(id, result):
if not must_respond:
return None
return serializers.json({'jsonrpc': '2.0', 'id': id, 'result': result})
def return_error(id, code, message=None, data=None):
error = {'code': code}
if code in Service.jsonrpc_errors:
error['message'] = Service.jsonrpc_errors[code][0]
error['data'] = Service.jsonrpc_errors[code][1]
if message is not None:
error['message'] = message
if data is not None:
error['data'] = data
return serializers.json({'jsonrpc': '2.0', 'id': id, 'error': error})
def validate(data):
"""
Validate request as defined in: http://www.jsonrpc.org/specification#request_object.
Args:
data(str): The json object.
Returns:
- True -- if successful
- False -- if no error should be reported (i.e. data is missing 'id' member)
Raises:
JsonRPCException
"""
iparms = set(data.keys())
mandatory_args = set(['jsonrpc', 'method'])
missing_args = mandatory_args - iparms
if missing_args:
raise Service.JsonRpcException(-32600, 'Missing arguments %s.' % list(missing_args))
if data['jsonrpc'] != '2.0':
raise Service.JsonRpcException(-32603, 'Unsupported jsonrpc version "%s"' % data['jsonrpc'])
if 'id' not in iparms:
return False
return True
request = current.request
response = current.response
if not data:
response.headers['Content-Type'] = 'application/json; charset=utf-8'
try:
data = json.loads(request.body.read())
except ValueError: # decoding error in json lib
return return_error(None, -32700)
# Batch handling
if isinstance(data, list) and not batch_element:
retlist = []
for c in data:
retstr = self.serve_jsonrpc2(c, batch_element=True)
if retstr: # do not add empty responses
retlist.append(retstr)
if len(retlist) == 0: # return nothing
return ''
else:
return "[" + ','.join(retlist) + "]"
methods = self.jsonrpc2_procedures
methods.update(self.jsonrpc_procedures)
try:
must_respond = validate(data)
except Service.JsonRpcException as e:
return return_error(None, e.code, e.info)
id, method, params = data.get('id'), data['method'], data.get('params', '')
if method not in methods:
return return_error(id, -32601, data='Method "%s" does not exist' % method)
try:
if isinstance(params, dict):
s = methods[method](**params)
else:
s = methods[method](*params)
if hasattr(s, 'as_list'):
s = s.as_list()
if must_respond:
return return_response(id, s)
else:
return ''
except HTTP as e:
raise e
except Service.JsonRpcException as e:
return return_error(id, e.code, e.info)
except:
etype, eval, etb = sys.exc_info()
data = '%s: %s\n' % (etype.__name__, eval) + str(request.is_local and traceback.format_tb(etb))
logger.warning('%s: %s\n%s' % (etype.__name__, eval, traceback.format_tb(etb)))
return return_error(id, -32099, data=data)
def serve_xmlrpc(self):
request = current.request
response = current.response
services = self.xmlrpc_procedures.values()
return response.xmlrpc(request, services)
def serve_amfrpc(self, version=0):
try:
import pyamf
import pyamf.remoting.gateway
except:
return "pyamf not installed or not in Python sys.path"
request = current.request
response = current.response
if version == 3:
services = self.amfrpc3_procedures
base_gateway = pyamf.remoting.gateway.BaseGateway(services)
pyamf_request = pyamf.remoting.decode(request.body)
else:
services = self.amfrpc_procedures
base_gateway = pyamf.remoting.gateway.BaseGateway(services)
context = pyamf.get_context(pyamf.AMF0)
pyamf_request = pyamf.remoting.decode(request.body, context)
pyamf_response = pyamf.remoting.Envelope(pyamf_request.amfVersion)
for name, message in pyamf_request:
pyamf_response[name] = base_gateway.getProcessor(message)(message)
response.headers['Content-Type'] = pyamf.remoting.CONTENT_TYPE
if version == 3:
return pyamf.remoting.encode(pyamf_response).getvalue()
else:
return pyamf.remoting.encode(pyamf_response, context).getvalue()
def serve_soap(self, version="1.1"):
try:
from gluon.contrib.pysimplesoap.server import SoapDispatcher
except:
return "pysimplesoap not installed in contrib"
request = current.request
response = current.response
procedures = self.soap_procedures
location = "%s://%s%s" % (request.env.wsgi_url_scheme,
request.env.http_host,
URL(r=request, f="call/soap", vars={}))
namespace = 'namespace' in response and response.namespace or location
documentation = response.description or ''
dispatcher = SoapDispatcher(
name=response.title,
location=location,
action=location, # SOAPAction
namespace=namespace,
prefix='pys',
documentation=documentation,
ns=True)
for method, (function, returns, args, doc, resp_elem_name) in iteritems(procedures):
dispatcher.register_function(method, function, returns, args, doc, resp_elem_name)
if request.env.request_method == 'POST':
fault = {}
# Process normal Soap Operation
response.headers['Content-Type'] = 'text/xml'
xml = dispatcher.dispatch(request.body.read(), fault=fault)
if fault:
# May want to consider populating a ticket here...
response.status = 500
# return the soap response
return xml
elif 'WSDL' in request.vars:
# Return Web Service Description
response.headers['Content-Type'] = 'text/xml'
return dispatcher.wsdl()
elif 'op' in request.vars:
# Return method help webpage
response.headers['Content-Type'] = 'text/html'
method = request.vars['op']
sample_req_xml, sample_res_xml, doc = dispatcher.help(method)
body = [H1("Welcome to Web2Py SOAP webservice gateway"),
A("See all webservice operations",
_href=URL(r=request, f="call/soap", vars={})),
H2(method),
P(doc),
UL(LI("Location: %s" % dispatcher.location),
LI("Namespace: %s" % dispatcher.namespace),
LI("SoapAction: %s" % dispatcher.action),
),
H3("Sample SOAP XML Request Message:"),
CODE(sample_req_xml, language="xml"),
H3("Sample SOAP XML Response Message:"),
CODE(sample_res_xml, language="xml"),
]
return {'body': body}
else:
# Return general help and method list webpage
response.headers['Content-Type'] = 'text/html'
body = [H1("Welcome to Web2Py SOAP webservice gateway"),
P(response.description),
P("The following operations are available"),
A("See WSDL for webservice description",
_href=URL(r=request, f="call/soap", vars={"WSDL": None})),
UL([LI(A("%s: %s" % (method, doc or ''),
_href=URL(r=request, f="call/soap", vars={'op': method})))
for method, doc in dispatcher.list_methods()]),
]
return {'body': body}
def __call__(self):
"""
Registers services with::
service = Service()
@service.run
@service.rss
@service.json
@service.jsonrpc
@service.xmlrpc
@service.amfrpc
@service.amfrpc3('domain')
@service.soap('Method', returns={'Result':int}, args={'a':int,'b':int,})
Exposes services with::
def call():
return service()
You can call services with::
http://..../app/default/call/run?[parameters]
http://..../app/default/call/rss?[parameters]
http://..../app/default/call/json?[parameters]
http://..../app/default/call/jsonrpc
http://..../app/default/call/xmlrpc
http://..../app/default/call/amfrpc
http://..../app/default/call/amfrpc3
http://..../app/default/call/soap
"""
request = current.request
if len(request.args) < 1:
raise HTTP(404, "Not Found")
arg0 = request.args(0)
if arg0 == 'run':
return self.serve_run(request.args[1:])
elif arg0 == 'rss':
return self.serve_rss(request.args[1:])
elif arg0 == 'csv':
return self.serve_csv(request.args[1:])
elif arg0 == 'xml':
return self.serve_xml(request.args[1:])
elif arg0 == 'json':
return self.serve_json(request.args[1:])
elif arg0 == 'jsonrpc':
return self.serve_jsonrpc()
elif arg0 == 'jsonrpc2':
return self.serve_jsonrpc2()
elif arg0 == 'xmlrpc':
return self.serve_xmlrpc()
elif arg0 == 'amfrpc':
return self.serve_amfrpc()
elif arg0 == 'amfrpc3':
return self.serve_amfrpc(3)
elif arg0 == 'soap':
return self.serve_soap()
else:
self.error()
def error(self):
raise HTTP(404, "Object does not exist")
# we make this a method so that subclasses can override it if they want to do more specific argument-checking
# but the default implmentation is the simplest: just pass the arguments we got, with no checking
def call_service_function(self, f, *a, **b):
if self.check_args:
return universal_caller(f, *a, **b)
else:
return f(*a, **b)
def completion(callback):
"""
Executes a task on completion of the called action.
Example:
Use as::
from gluon.tools import completion
@completion(lambda d: logging.info(repr(d)))
def index():
return dict(message='hello')
It logs the output of the function every time input is called.
The argument of completion is executed in a new thread.
"""
def _completion(f):
def __completion(*a, **b):
d = None
try:
d = f(*a, **b)
return d
finally:
thread.start_new_thread(callback, (d,))
return __completion
return _completion
def prettydate(d, T=lambda x: x, utc=False):
now = datetime.datetime.utcnow() if utc else datetime.datetime.now()
if isinstance(d, datetime.datetime):
dt = now - d
elif isinstance(d, datetime.date):
dt = now.date() - d
elif not d:
return ''
else:
return '[invalid date]'
if dt.days < 0:
suffix = ' from now'
dt = -dt
else:
suffix = ' ago'
if dt.days >= 2 * 365:
return T('%d years' + suffix) % int(dt.days // 365)
elif dt.days >= 365:
return T('1 year' + suffix)
elif dt.days >= 60:
return T('%d months' + suffix) % int(dt.days // 30)
elif dt.days >= 27: # 4 weeks ugly
return T('1 month' + suffix)
elif dt.days >= 14:
return T('%d weeks' + suffix) % int(dt.days // 7)
elif dt.days >= 7:
return T('1 week' + suffix)
elif dt.days > 1:
return T('%d days' + suffix) % dt.days
elif dt.days == 1:
return T('1 day' + suffix)
elif dt.seconds >= 2 * 60 * 60:
return T('%d hours' + suffix) % int(dt.seconds // 3600)
elif dt.seconds >= 60 * 60:
return T('1 hour' + suffix)
elif dt.seconds >= 2 * 60:
return T('%d minutes' + suffix) % int(dt.seconds // 60)
elif dt.seconds >= 60:
return T('1 minute' + suffix)
elif dt.seconds > 1:
return T('%d seconds' + suffix) % dt.seconds
elif dt.seconds == 1:
return T('1 second' + suffix)
else:
return T('now')
def test_thread_separation():
def f():
c = PluginManager()
lock1.acquire()
lock2.acquire()
c.x = 7
lock1.release()
lock2.release()
lock1 = thread.allocate_lock()
lock2 = thread.allocate_lock()
lock1.acquire()
thread.start_new_thread(f, ())
a = PluginManager()
a.x = 5
lock1.release()
lock2.acquire()
return a.x
class PluginManager(object):
"""
Plugin Manager is similar to a storage object but it is a single level
singleton. This means that multiple instances within the same thread share
the same attributes.
Its constructor is also special. The first argument is the name of the
plugin you are defining.
The named arguments are parameters needed by the plugin with default values.
If the parameters were previous defined, the old values are used.
Example:
in some general configuration file::
plugins = PluginManager()
plugins.me.param1=3
within the plugin model::
_ = PluginManager('me',param1=5,param2=6,param3=7)
where the plugin is used::
>>> print(plugins.me.param1)
3
>>> print(plugins.me.param2)
6
>>> plugins.me.param3 = 8
>>> print(plugins.me.param3)
8
Here are some tests::
>>> a=PluginManager()
>>> a.x=6
>>> b=PluginManager('check')
>>> print(b.x)
6
>>> b=PluginManager() # reset settings
>>> print(b.x)
<Storage {}>
>>> b.x=7
>>> print(a.x)
7
>>> a.y.z=8
>>> print(b.y.z)
8
>>> test_thread_separation()
5
>>> plugins=PluginManager('me',db='mydb')
>>> print(plugins.me.db)
mydb
>>> print('me' in plugins)
True
>>> print(plugins.me.installed)
True
"""
instances = {}
def __new__(cls, *a, **b):
id = thread.get_ident()
lock = thread.allocate_lock()
try:
lock.acquire()
try:
return cls.instances[id]
except KeyError:
instance = object.__new__(cls, *a, **b)
cls.instances[id] = instance
return instance
finally:
lock.release()
def __init__(self, plugin=None, **defaults):
if not plugin:
self.__dict__.clear()
settings = self.__getattr__(plugin)
settings.installed = True
settings.update(
(k, v) for k, v in defaults.items() if k not in settings)
def __getattr__(self, key):
if key not in self.__dict__:
self.__dict__[key] = Storage()
return self.__dict__[key]
def keys(self):
return self.__dict__.keys()
def __contains__(self, key):
return key in self.__dict__
class Expose(object):
def __init__(self, base=None, basename=None, extensions=None,
allow_download=True, follow_symlink_out=False):
"""
Examples:
Use as::
def static():
return dict(files=Expose())
or::
def static():
path = os.path.join(request.folder,'static','public')
return dict(files=Expose(path,basename='public'))
Args:
extensions: an optional list of file extensions for filtering
displayed files: e.g. `['.py', '.jpg']`
allow_download: whether to allow downloading selected files
follow_symlink_out: whether to follow symbolic links that points
points outside of `base`.
Warning: setting this to `True` might pose a security risk
if you don't also have complete control over writing
and file creation under `base`.
"""
# why would this not be callable? but otherwise tests do not pass
if current.session and callable(current.session.forget):
current.session.forget()
self.follow_symlink_out = follow_symlink_out
self.base = self.normalize_path(
base or os.path.join(current.request.folder, 'static'))
self.basename = basename or current.request.function
self.base = base = os.path.realpath(base or os.path.join(current.request.folder, 'static'))
basename = basename or current.request.function
self.basename = basename
if current.request.raw_args:
self.args = [arg for arg in current.request.raw_args.split('/') if arg]
else:
self.args = [arg for arg in current.request.args if arg]
filename = os.path.join(self.base, *self.args)
if not os.path.exists(filename):
raise HTTP(404, "FILE NOT FOUND")
if not self.in_base(filename):
raise HTTP(401, "NOT AUTHORIZED")
if allow_download and not os.path.isdir(filename):
current.response.headers['Content-Type'] = contenttype(filename)
raise HTTP(200, open(filename, 'rb'), **current.response.headers)
self.path = path = os.path.join(filename, '*')
dirname_len = len(path) - 1
allowed = [f for f in sorted(glob.glob(path))
if not any([self.isprivate(f), self.issymlink_out(f)])]
self.folders = [f[dirname_len:]
for f in allowed if os.path.isdir(f)]
self.filenames = [f[dirname_len:]
for f in allowed if not os.path.isdir(f)]
if 'README' in self.filenames:
with open(os.path.join(filename, 'README')) as f:
readme = f.read()
self.paragraph = MARKMIN(readme)
else:
self.paragraph = None
if extensions:
self.filenames = [f for f in self.filenames
if os.path.splitext(f)[-1] in extensions]
def breadcrumbs(self, basename):
path = []
span = SPAN()
span.append(A(basename, _href=URL()))
for arg in self.args:
span.append('/')
path.append(arg)
span.append(A(arg, _href=URL(args='/'.join(path))))
return span
def table_folders(self):
if self.folders:
return SPAN(H3('Folders'),
TABLE(*[TR(TD(A(folder, _href=URL(args=self.args + [folder]))))
for folder in self.folders], **dict(_class="table")))
return ''
@staticmethod
def __in_base(subdir, basedir, sep=os.path.sep):
"""True if subdir/ is under basedir/"""
s = lambda f: '%s%s' % (f.rstrip(sep), sep) # f -> f/
# The trailing '/' is for the case of '/foobar' in_base of '/foo':
# - becase '/foobar' starts with '/foo'
# - but '/foobar/' doesn't start with '/foo/'
return s(subdir).startswith(s(basedir))
def in_base(self, f):
"""True if f/ is under self.base/
Where f ans slef.base are normalized paths
"""
return self.__in_base(self.normalize_path(f), self.base)
def normalize_path(self, f):
if self.follow_symlink_out:
return os.path.normpath(f)
else:
return os.path.realpath(f)
def issymlink_out(self, f):
"""True if f is a symlink and is pointing outside of self.base"""
return os.path.islink(f) and not self.in_base(f)
@staticmethod
def isprivate(f):
# remove '/private' prefix to deal with symbolic links on OSX
if f.startswith('/private/'):
f = f[8:]
return 'private' in f or f.startswith('.') or f.endswith('~')
@staticmethod
def isimage(f):
return os.path.splitext(f)[-1].lower() in (
'.png', '.jpg', '.jpeg', '.gif', '.tiff')
def table_files(self, width=160):
if self.filenames:
return SPAN(H3('Files'),
TABLE(*[TR(TD(A(f, _href=URL(args=self.args + [f]))),
TD(IMG(_src=URL(args=self.args + [f]),
_style='max-width:%spx' % width)
if width and self.isimage(f) else ''))
for f in self.filenames], **dict(_class="table")))
return ''
def xml(self):
return DIV(
H2(self.breadcrumbs(self.basename)),
self.paragraph or '',
self.table_folders(),
self.table_files()).xml()
class Wiki(object):
everybody = 'everybody'
rows_page = 25
def markmin_base(self, body):
return MARKMIN(body, extra=self.settings.extra,
url=True, environment=self.env,
autolinks=lambda link: expand_one(link, {})).xml()
def render_tags(self, tags):
return DIV(
_class='w2p_wiki_tags',
*[A(t.strip(), _href=URL(args='_search', vars=dict(q=t)))
for t in tags or [] if t.strip()])
def markmin_render(self, page):
return self.markmin_base(page.body) + self.render_tags(page.tags).xml()
def html_render(self, page):
html = page.body
# @///function -> http://..../function
html = replace_at_urls(html, URL)
# http://...jpg -> <img src="http://...jpg/> or embed
html = replace_autolinks(html, lambda link: expand_one(link, {}))
# @{component:name} -> <script>embed component name</script>
html = replace_components(html, self.env)
html = html + self.render_tags(page.tags).xml()
return html
@staticmethod
def component(text):
"""
In wiki docs allows `@{component:controller/function/args}`
which renders as a `LOAD(..., ajax=True)`
"""
items = text.split('/')
controller, function, args = items[0], items[1], items[2:]
return LOAD(controller, function, args=args, ajax=True).xml()
def get_renderer(self):
if isinstance(self.settings.render, basestring):
r = getattr(self, "%s_render" % self.settings.render)
elif callable(self.settings.render):
r = self.settings.render
elif isinstance(self.settings.render, dict):
def custom_render(page):
if page.render:
if page.render in self.settings.render.keys():
my_render = self.settings.render[page.render]
else:
my_render = getattr(self, "%s_render" % page.render)
else:
my_render = self.markmin_render
return my_render(page)
r = custom_render
else:
raise ValueError(
"Invalid render type %s" % type(self.settings.render))
return r
def __init__(self, auth, env=None, render='markmin',
manage_permissions=False, force_prefix='',
restrict_search=False, extra=None,
menu_groups=None, templates=None, migrate=True,
controller=None, function=None, groups=None):
settings = self.settings = auth.settings.wiki
"""
Args:
render:
- "markmin"
- "html"
- `<function>` : Sets a custom render function
- `dict(html=<function>, markmin=...)`: dict(...) allows
multiple custom render functions
- "multiple" : Is the same as `{}`. It enables per-record
formats using builtins
"""
engines = set(['markmin', 'html'])
show_engine = False
if render == "multiple":
render = {}
if isinstance(render, dict):
[engines.add(key) for key in render]
show_engine = True
settings.render = render
perms = settings.manage_permissions = manage_permissions
settings.force_prefix = force_prefix
settings.restrict_search = restrict_search
settings.extra = extra or {}
settings.menu_groups = menu_groups
settings.templates = templates
settings.controller = controller
settings.function = function
settings.groups = auth.user_groups.values() \
if groups is None else groups
db = auth.db
self.env = env or {}
self.env['component'] = Wiki.component
self.auth = auth
self.wiki_menu_items = None
if self.auth.user:
self.settings.force_prefix = force_prefix % self.auth.user
else:
self.settings.force_prefix = force_prefix
self.host = current.request.env.http_host
table_definitions = [
('wiki_page', {
'args': [
Field('slug',
requires=[IS_SLUG(),
IS_NOT_IN_DB(db, 'wiki_page.slug')],
writable=False),
Field('title', length=255, unique=True),
Field('body', 'text', notnull=True),
Field('tags', 'list:string'),
Field('can_read', 'list:string',
writable=perms,
readable=perms,
default=[Wiki.everybody]),
Field('can_edit', 'list:string',
writable=perms, readable=perms,
default=[Wiki.everybody]),
Field('changelog'),
Field('html', 'text',
compute=self.get_renderer(),
readable=False, writable=False),
Field('render', default="markmin",
readable=show_engine,
writable=show_engine,
requires=IS_EMPTY_OR(
IS_IN_SET(engines))),
auth.signature],
'vars': {'format': '%(title)s', 'migrate': migrate}}),
('wiki_tag', {
'args': [
Field('name'),
Field('wiki_page', 'reference wiki_page'),
auth.signature],
'vars':{'format': '%(title)s', 'migrate': migrate}}),
('wiki_media', {
'args': [
Field('wiki_page', 'reference wiki_page'),
Field('title', required=True),
Field('filename', 'upload', required=True),
auth.signature],
'vars': {'format': '%(title)s', 'migrate': migrate}}),
]
# define only non-existent tables
for key, value in table_definitions:
args = []
if key not in db.tables():
# look for wiki_ extra fields in auth.settings
extra_fields = auth.settings.extra_fields
if extra_fields:
if key in extra_fields:
if extra_fields[key]:
for field in extra_fields[key]:
args.append(field)
args += value['args']
db.define_table(key, *args, **value['vars'])
if self.settings.templates is None and not self.settings.manage_permissions:
self.settings.templates = \
db.wiki_page.tags.contains('template') & db.wiki_page.can_read.contains('everybody')
def update_tags_insert(page, id, db=db):
for tag in page.tags or []:
tag = tag.strip().lower()
if tag:
db.wiki_tag.insert(name=tag, wiki_page=id)
def update_tags_update(dbset, page, db=db):
page = dbset.select(limitby=(0, 1)).first()
db(db.wiki_tag.wiki_page == page.id).delete()
for tag in page.tags or []:
tag = tag.strip().lower()
if tag:
db.wiki_tag.insert(name=tag, wiki_page=page.id)
db.wiki_page._after_insert.append(update_tags_insert)
db.wiki_page._after_update.append(update_tags_update)
if (auth.user and
check_credentials(current.request, gae_login=False) and
'wiki_editor' not in auth.user_groups.values() and
self.settings.groups == auth.user_groups.values()):
group = db.auth_group(role='wiki_editor')
gid = group.id if group else db.auth_group.insert(
role='wiki_editor')
auth.add_membership(gid)
settings.lock_keys = True
# WIKI ACCESS POLICY
def not_authorized(self, page=None):
raise HTTP(401)
def can_read(self, page):
if 'everybody' in page.can_read or not self.settings.manage_permissions:
return True
elif self.auth.user:
groups = self.settings.groups
if ('wiki_editor' in groups or
set(groups).intersection(set(page.can_read + page.can_edit)) or
page.created_by == self.auth.user.id):
return True
return False
def can_edit(self, page=None):
if not self.auth.user:
redirect(self.auth.settings.login_url)
groups = self.settings.groups
return ('wiki_editor' in groups or
(page is None and 'wiki_author' in groups) or
page is not None and (set(groups).intersection(set(page.can_edit)) or
page.created_by == self.auth.user.id))
def can_manage(self):
if not self.auth.user:
return False
groups = self.settings.groups
return 'wiki_editor' in groups
def can_search(self):
return True
def can_see_menu(self):
if self.auth.user:
if self.settings.menu_groups is None:
return True
else:
groups = self.settings.groups
if any(t in self.settings.menu_groups for t in groups):
return True
return False
# END POLICY
def automenu(self):
"""adds the menu if not present"""
if (not self.wiki_menu_items and self.settings.controller and self.settings.function):
self.wiki_menu_items = self.menu(self.settings.controller,
self.settings.function)
current.response.menu += self.wiki_menu_items
def __call__(self):
request = current.request
settings = self.settings
settings.controller = settings.controller or request.controller
settings.function = settings.function or request.function
self.automenu()
zero = request.args(0) or 'index'
if zero and zero.isdigit():
return self.media(int(zero))
elif not zero or not zero.startswith('_'):
return self.read(zero)
elif zero == '_edit':
return self.edit(request.args(1) or 'index', request.args(2) or 0)
elif zero == '_editmedia':
return self.editmedia(request.args(1) or 'index')
elif zero == '_create':
return self.create()
elif zero == '_pages':
return self.pages()
elif zero == '_search':
return self.search()
elif zero == '_recent':
ipage = int(request.vars.page or 0)
query = self.auth.db.wiki_page.created_by == request.args(
1, cast=int)
return self.search(query=query,
orderby=~self.auth.db.wiki_page.created_on,
limitby=(ipage * self.rows_page,
(ipage + 1) * self.rows_page),
)
elif zero == '_cloud':
return self.cloud()
elif zero == '_preview':
return self.preview(self.get_renderer())
def first_paragraph(self, page):
if not self.can_read(page):
mm = (page.body or '').replace('\r', '')
ps = [p for p in mm.split('\n\n') if not p.startswith('#') and p.strip()]
if ps:
return ps[0]
return ''
def fix_hostname(self, body):
return (body or '').replace('://HOSTNAME', '://%s' % self.host)
def read(self, slug, force_render=False):
if slug in '_cloud':
return self.cloud()
elif slug in '_search':
return self.search()
page = self.auth.db.wiki_page(slug=slug)
if page and (not self.can_read(page)):
return self.not_authorized(page)
if current.request.extension == 'html':
if not page:
url = URL(args=('_create', slug))
return dict(content=A('Create page "%s"' % slug, _href=url, _class="btn"))
else:
html = page.html if not force_render else self.get_renderer()(page)
content = XML(self.fix_hostname(html))
return dict(title=page.title,
slug=page.slug,
page=page,
content=content,
tags=page.tags,
created_on=page.created_on,
modified_on=page.modified_on)
elif current.request.extension == 'load':
return self.fix_hostname(page.html) if page else ''
else:
if not page:
raise HTTP(404)
else:
return dict(title=page.title,
slug=page.slug,
page=page,
content=page.body,
tags=page.tags,
created_on=page.created_on,
modified_on=page.modified_on)
def edit(self, slug, from_template=0):
auth = self.auth
db = auth.db
page = db.wiki_page(slug=slug)
if not self.can_edit(page):
return self.not_authorized(page)
title_guess = ' '.join(c.capitalize() for c in slug.split('-'))
if not page:
if not (self.can_manage() or
slug.startswith(self.settings.force_prefix)):
current.session.flash = 'slug must have "%s" prefix' \
% self.settings.force_prefix
redirect(URL(args=('_create')))
db.wiki_page.can_read.default = [Wiki.everybody]
db.wiki_page.can_edit.default = [auth.user_group_role()]
db.wiki_page.title.default = title_guess
db.wiki_page.slug.default = slug
if slug == 'wiki-menu':
db.wiki_page.body.default = \
'- Menu Item > @////index\n- - Submenu > http://web2py.com'
else:
db.wiki_page.body.default = db(db.wiki_page.id == from_template).select(db.wiki_page.body)[0].body \
if int(from_template) > 0 else '## %s\n\npage content' % title_guess
vars = current.request.post_vars
if vars.body:
vars.body = vars.body.replace('://%s' % self.host, '://HOSTNAME')
form = SQLFORM(db.wiki_page, page, deletable=True,
formstyle='table2cols', showid=False).process()
if form.deleted:
current.session.flash = 'page deleted'
redirect(URL())
elif form.accepted:
current.session.flash = 'page created'
redirect(URL(args=slug))
script = """
jQuery(function() {
if (!jQuery('#wiki_page_body').length) return;
var pagecontent = jQuery('#wiki_page_body');
pagecontent.css('font-family',
'Monaco,Menlo,Consolas,"Courier New",monospace');
var prevbutton = jQuery('<button class="btn nopreview">Preview</button>');
var preview = jQuery('<div id="preview"></div>').hide();
var previewmedia = jQuery('<div id="previewmedia"></div>');
var form = pagecontent.closest('form');
preview.insertBefore(form);
prevbutton.insertBefore(form);
if(%(link_media)s) {
var mediabutton = jQuery('<button class="btn nopreview">Media</button>');
mediabutton.insertBefore(form);
previewmedia.insertBefore(form);
mediabutton.click(function() {
if (mediabutton.hasClass('nopreview')) {
web2py_component('%(urlmedia)s', 'previewmedia');
} else {
previewmedia.empty();
}
mediabutton.toggleClass('nopreview');
});
}
prevbutton.click(function(e) {
e.preventDefault();
if (prevbutton.hasClass('nopreview')) {
prevbutton.addClass('preview').removeClass(
'nopreview').html('Edit Source');
try{var wiki_render = jQuery('#wiki_page_render').val()}
catch(e){var wiki_render = null;}
web2py_ajax_page('post', \
'%(url)s', {body: jQuery('#wiki_page_body').val(), \
render: wiki_render}, 'preview');
form.fadeOut('fast', function() {preview.fadeIn()});
} else {
prevbutton.addClass(
'nopreview').removeClass('preview').html('Preview');
preview.fadeOut('fast', function() {form.fadeIn()});
}
})
})
""" % dict(url=URL(args=('_preview', slug)), link_media=('true' if page else 'false'),
urlmedia=URL(extension='load',
args=('_editmedia', slug),
vars=dict(embedded=1)))
return dict(content=TAG[''](form, SCRIPT(script)))
def editmedia(self, slug):
auth = self.auth
db = auth.db
page = db.wiki_page(slug=slug)
if not (page and self.can_edit(page)):
return self.not_authorized(page)
self.auth.db.wiki_media.id.represent = lambda id, row: \
id if not row.filename else \
SPAN('@////%i/%s.%s' % (id, IS_SLUG.urlify(row.title.split('.')[0]), row.filename.split('.')[-1]))
self.auth.db.wiki_media.wiki_page.default = page.id
self.auth.db.wiki_media.wiki_page.writable = False
links = []
csv = True
create = True
if current.request.vars.embedded:
script = "var c = jQuery('#wiki_page_body'); c.val(c.val() + jQuery('%s').text()); return false;"
fragment = self.auth.db.wiki_media.id.represent
csv = False
create = False
links = [lambda row: A('copy into source', _href='#', _onclick=script % (fragment(row.id, row)))]
content = SQLFORM.grid(
self.auth.db.wiki_media.wiki_page == page.id,
orderby=self.auth.db.wiki_media.title,
links=links,
csv=csv,
create=create,
args=['_editmedia', slug],
user_signature=False)
return dict(content=content)
def create(self):
if not self.can_edit():
return self.not_authorized()
db = self.auth.db
slugs = db(db.wiki_page.id > 0).select(db.wiki_page.id, db.wiki_page.slug)
options = [OPTION(row.slug, _value=row.id) for row in slugs]
options.insert(0, OPTION('', _value=''))
fields = [Field("slug", default=current.request.args(1) or
self.settings.force_prefix,
requires=(IS_SLUG(), IS_NOT_IN_DB(db, db.wiki_page.slug))), ]
if self.settings.templates:
fields.append(
Field("from_template", "reference wiki_page",
requires=IS_EMPTY_OR(IS_IN_DB(db(self.settings.templates), db.wiki_page._id, '%(slug)s')),
comment=current.T("Choose Template or empty for new Page")))
form = SQLFORM.factory(*fields, **dict(_class="well"))
form.element("[type=submit]").attributes["_value"] = \
current.T("Create Page from Slug")
if form.process().accepted:
form.vars.from_template = 0 if not form.vars.from_template else form.vars.from_template
redirect(URL(args=('_edit', form.vars.slug, form.vars.from_template or 0))) # added param
return dict(content=form)
def pages(self):
if not self.can_manage():
return self.not_authorized()
self.auth.db.wiki_page.slug.represent = lambda slug, row: SPAN(
'@////%s' % slug)
self.auth.db.wiki_page.title.represent = lambda title, row: \
A(title, _href=URL(args=row.slug))
wiki_table = self.auth.db.wiki_page
content = SQLFORM.grid(
wiki_table,
fields=[wiki_table.slug,
wiki_table.title, wiki_table.tags,
wiki_table.can_read, wiki_table.can_edit],
links=[
lambda row:
A('edit', _href=URL(args=('_edit', row.slug)), _class='btn'),
lambda row:
A('media', _href=URL(args=('_editmedia', row.slug)), _class='btn')],
details=False, editable=False, deletable=False, create=False,
orderby=self.auth.db.wiki_page.title,
args=['_pages'],
user_signature=False)
return dict(content=content)
def media(self, id):
request, response, db = current.request, current.response, self.auth.db
media = db.wiki_media(id)
if media:
if self.settings.manage_permissions:
page = db.wiki_page(media.wiki_page)
if not self.can_read(page):
return self.not_authorized(page)
request.args = [media.filename]
m = response.download(request, db)
current.session.forget() # get rid of the cookie
response.headers['Last-Modified'] = \
request.utcnow.strftime("%a, %d %b %Y %H:%M:%S GMT")
if 'Content-Disposition' in response.headers:
del response.headers['Content-Disposition']
response.headers['Pragma'] = 'cache'
response.headers['Cache-Control'] = 'private'
return m
else:
raise HTTP(404)
def menu(self, controller='default', function='index'):
db = self.auth.db
request = current.request
menu_page = db.wiki_page(slug='wiki-menu')
menu = []
if menu_page:
tree = {'': menu}
regex = re.compile('[\r\n\t]*(?P<base>(\s*\-\s*)+)(?P<title>\w.*?)\s+\>\s+(?P<link>\S+)')
for match in regex.finditer(self.fix_hostname(menu_page.body)):
base = match.group('base').replace(' ', '')
title = match.group('title')
link = match.group('link')
title_page = None
if link.startswith('@'):
items = link[2:].split('/')
if len(items) > 3:
title_page = items[3]
link = URL(a=items[0] or None, c=items[1] or controller,
f=items[2] or function, args=items[3:])
parent = tree.get(base[1:], tree[''])
subtree = []
tree[base] = subtree
parent.append((current.T(title),
request.args(0) == title_page,
link, subtree))
if self.can_see_menu():
submenu = []
menu.append((current.T('[Wiki]'), None, None, submenu))
if URL() == URL(controller, function):
if not str(request.args(0)).startswith('_'):
slug = request.args(0) or 'index'
mode = 1
elif request.args(0) == '_edit':
slug = request.args(1) or 'index'
mode = 2
elif request.args(0) == '_editmedia':
slug = request.args(1) or 'index'
mode = 3
else:
mode = 0
if mode in (2, 3):
submenu.append((current.T('View Page'), None,
URL(controller, function, args=slug)))
if mode in (1, 3):
submenu.append((current.T('Edit Page'), None,
URL(controller, function, args=('_edit', slug))))
if mode in (1, 2):
submenu.append((current.T('Edit Page Media'), None,
URL(controller, function, args=('_editmedia', slug))))
submenu.append((current.T('Create New Page'), None,
URL(controller, function, args=('_create'))))
# Moved next if to inside self.auth.user check
if self.can_manage():
submenu.append((current.T('Manage Pages'), None,
URL(controller, function, args=('_pages'))))
submenu.append((current.T('Edit Menu'), None,
URL(controller, function, args=('_edit', 'wiki-menu'))))
# Also moved inside self.auth.user check
submenu.append((current.T('Search Pages'), None,
URL(controller, function, args=('_search'))))
return menu
def search(self, tags=None, query=None, cloud=True, preview=True,
limitby=(0, 100), orderby=None):
if not self.can_search():
return self.not_authorized()
request = current.request
content = CAT()
if tags is None and query is None:
form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(),
value=request.vars.q),
INPUT(_type="submit", _value=current.T('Search')),
_method='GET')
content.append(DIV(form, _class='w2p_wiki_form'))
if request.vars.q:
tags = [v.strip() for v in request.vars.q.split(',')]
tags = [v.lower() for v in tags if v]
if tags or query is not None:
db = self.auth.db
count = db.wiki_tag.wiki_page.count()
fields = [db.wiki_page.id, db.wiki_page.slug,
db.wiki_page.title, db.wiki_page.tags,
db.wiki_page.can_read, db.wiki_page.can_edit]
if preview:
fields.append(db.wiki_page.body)
if query is None:
query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\
(db.wiki_tag.name.belongs(tags))
query = query | db.wiki_page.title.contains(request.vars.q)
if self.settings.restrict_search and not self.can_manage():
query = query & (db.wiki_page.created_by == self.auth.user_id)
pages = db(query).select(count,
*fields, **dict(orderby=orderby or ~count,
groupby=reduce(lambda a, b: a | b, fields),
distinct=True,
limitby=limitby))
if request.extension in ('html', 'load'):
if not pages:
content.append(DIV(current.T("No results"),
_class='w2p_wiki_form'))
def link(t):
return A(t, _href=URL(args='_search', vars=dict(q=t)))
items = [DIV(H3(A(p.wiki_page.title, _href=URL(
args=p.wiki_page.slug))),
MARKMIN(self.first_paragraph(p.wiki_page))
if preview else '',
DIV(_class='w2p_wiki_tags',
*[link(t.strip()) for t in
p.wiki_page.tags or [] if t.strip()]),
_class='w2p_wiki_search_item')
for p in pages]
content.append(DIV(_class='w2p_wiki_pages', *items))
else:
cloud = False
content = [p.wiki_page.as_dict() for p in pages]
elif cloud:
content.append(self.cloud()['content'])
if request.extension == 'load':
return content
return dict(content=content)
def cloud(self):
db = self.auth.db
count = db.wiki_tag.wiki_page.count(distinct=True)
ids = db(db.wiki_tag).select(
db.wiki_tag.name, count,
distinct=True,
groupby=db.wiki_tag.name,
orderby=~count, limitby=(0, 20))
if ids:
a, b = ids[0](count), ids[-1](count)
def style(c):
STYLE = 'padding:0 0.2em;line-height:%.2fem;font-size:%.2fem'
size = (1.5 * (c - b) / max(a - b, 1) + 1.3)
return STYLE % (1.3, size)
items = []
for item in ids:
items.append(A(item.wiki_tag.name,
_style=style(item(count)),
_href=URL(args='_search',
vars=dict(q=item.wiki_tag.name))))
items.append(' ')
return dict(content=DIV(_class='w2p_cloud', *items))
def preview(self, render):
request = current.request
# FIXME: This is an ugly hack to ensure a default render
# engine if not specified (with multiple render engines)
if 'render' not in request.post_vars:
request.post_vars.render = None
return render(request.post_vars)
class Config(object):
def __init__(
self,
filename,
section,
default_values={}
):
self.config = configparser.ConfigParser(default_values)
self.config.read(filename)
if not self.config.has_section(section):
self.config.add_section(section)
self.section = section
self.filename = filename
def read(self):
if not(isinstance(current.session['settings_%s' % self.section], dict)):
settings = dict(self.config.items(self.section))
else:
settings = current.session['settings_%s' % self.section]
return settings
def save(self, options):
for option, value in options:
self.config.set(self.section, option, value)
try:
self.config.write(open(self.filename, 'w'))
result = True
except:
current.session['settings_%s' % self.section] = dict(self.config.items(self.section))
result = False
return result
if __name__ == '__main__':
import doctest
doctest.testmod()
| []
| []
| [
"GNUPGHOME"
]
| [] | ["GNUPGHOME"] | python | 1 | 0 | |
pkg/operator/staticpod/installerpod/cmd.go | package installerpod
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/openshift/library-go/pkg/config/client"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceread"
"github.com/openshift/library-go/pkg/operator/resource/retry"
"github.com/openshift/library-go/pkg/operator/staticpod"
"github.com/openshift/library-go/pkg/operator/staticpod/internal/flock"
)
type InstallOptions struct {
// TODO replace with genericclioptions
KubeConfig string
KubeClient kubernetes.Interface
Revision string
NodeName string
Namespace string
PodConfigMapNamePrefix string
SecretNamePrefixes []string
OptionalSecretNamePrefixes []string
ConfigMapNamePrefixes []string
OptionalConfigMapNamePrefixes []string
CertSecretNames []string
OptionalCertSecretNamePrefixes []string
CertConfigMapNamePrefixes []string
OptionalCertConfigMapNamePrefixes []string
CertDir string
ResourceDir string
PodManifestDir string
Timeout time.Duration
// StaticPodManifestsLockFile used to coordinate work between multiple processes when writing static pod manifests
StaticPodManifestsLockFile string
PodMutationFns []PodMutationFunc
}
// PodMutationFunc is a function that has a chance at changing the pod before it is created
type PodMutationFunc func(pod *corev1.Pod) error
func NewInstallOptions() *InstallOptions {
return &InstallOptions{}
}
func (o *InstallOptions) WithPodMutationFn(podMutationFn PodMutationFunc) *InstallOptions {
o.PodMutationFns = append(o.PodMutationFns, podMutationFn)
return o
}
func NewInstaller() *cobra.Command {
o := NewInstallOptions()
cmd := &cobra.Command{
Use: "installer",
Short: "Install static pod and related resources",
Run: func(cmd *cobra.Command, args []string) {
klog.V(1).Info(cmd.Flags())
klog.V(1).Info(spew.Sdump(o))
if err := o.Complete(); err != nil {
klog.Exit(err)
}
if err := o.Validate(); err != nil {
klog.Exit(err)
}
ctx, cancel := context.WithTimeout(context.TODO(), o.Timeout)
defer cancel()
if err := o.Run(ctx); err != nil {
klog.Exit(err)
}
},
}
o.AddFlags(cmd.Flags())
return cmd
}
func (o *InstallOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "kubeconfig file or empty")
fs.StringVar(&o.Revision, "revision", o.Revision, "identifier for this particular installation instance. For example, a counter or a hash")
fs.StringVar(&o.Namespace, "namespace", o.Namespace, "namespace to retrieve all resources from and create the static pod in")
fs.StringVar(&o.PodConfigMapNamePrefix, "pod", o.PodConfigMapNamePrefix, "name of configmap that contains the pod to be created")
fs.StringSliceVar(&o.SecretNamePrefixes, "secrets", o.SecretNamePrefixes, "list of secret names to be included")
fs.StringSliceVar(&o.ConfigMapNamePrefixes, "configmaps", o.ConfigMapNamePrefixes, "list of configmaps to be included")
fs.StringSliceVar(&o.OptionalSecretNamePrefixes, "optional-secrets", o.OptionalSecretNamePrefixes, "list of optional secret names to be included")
fs.StringSliceVar(&o.OptionalConfigMapNamePrefixes, "optional-configmaps", o.OptionalConfigMapNamePrefixes, "list of optional configmaps to be included")
fs.StringVar(&o.ResourceDir, "resource-dir", o.ResourceDir, "directory for all files supporting the static pod manifest")
fs.StringVar(&o.PodManifestDir, "pod-manifest-dir", o.PodManifestDir, "directory for the static pod manifest")
fs.DurationVar(&o.Timeout, "timeout-duration", 120*time.Second, "maximum time in seconds to wait for the copying to complete (default: 2m)")
fs.StringVar(&o.StaticPodManifestsLockFile, "pod-manifests-lock-file", o.StaticPodManifestsLockFile, "path to a file that will be used to coordinate writing static pod manifests between multiple processes")
fs.StringSliceVar(&o.CertSecretNames, "cert-secrets", o.CertSecretNames, "list of secret names to be included")
fs.StringSliceVar(&o.CertConfigMapNamePrefixes, "cert-configmaps", o.CertConfigMapNamePrefixes, "list of configmaps to be included")
fs.StringSliceVar(&o.OptionalCertSecretNamePrefixes, "optional-cert-secrets", o.OptionalCertSecretNamePrefixes, "list of optional secret names to be included")
fs.StringSliceVar(&o.OptionalCertConfigMapNamePrefixes, "optional-cert-configmaps", o.OptionalCertConfigMapNamePrefixes, "list of optional configmaps to be included")
fs.StringVar(&o.CertDir, "cert-dir", o.CertDir, "directory for all certs")
}
func (o *InstallOptions) Complete() error {
clientConfig, err := client.GetKubeConfigOrInClusterConfig(o.KubeConfig, nil)
if err != nil {
return err
}
// Use protobuf to fetch configmaps and secrets and create pods.
protoConfig := rest.CopyConfig(clientConfig)
protoConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json"
protoConfig.ContentType = "application/vnd.kubernetes.protobuf"
o.KubeClient, err = kubernetes.NewForConfig(protoConfig)
if err != nil {
return err
}
// set via downward API
o.NodeName = os.Getenv("NODE_NAME")
return nil
}
func (o *InstallOptions) Validate() error {
if len(o.Revision) == 0 {
return fmt.Errorf("--revision is required")
}
if len(o.NodeName) == 0 {
return fmt.Errorf("env var NODE_NAME is required")
}
if len(o.Namespace) == 0 {
return fmt.Errorf("--namespace is required")
}
if len(o.PodConfigMapNamePrefix) == 0 {
return fmt.Errorf("--pod is required")
}
if len(o.ConfigMapNamePrefixes) == 0 {
return fmt.Errorf("--configmaps is required")
}
if o.Timeout == 0 {
return fmt.Errorf("--timeout-duration cannot be 0")
}
if o.KubeClient == nil {
return fmt.Errorf("missing client")
}
return nil
}
func (o *InstallOptions) nameFor(prefix string) string {
return fmt.Sprintf("%s-%s", prefix, o.Revision)
}
func (o *InstallOptions) prefixFor(name string) string {
return name[0 : len(name)-len(fmt.Sprintf("-%s", o.Revision))]
}
func (o *InstallOptions) copySecretsAndConfigMaps(ctx context.Context, resourceDir string,
secretNames, optionalSecretNames, configNames, optionalConfigNames sets.String, prefixed bool) error {
klog.Infof("Creating target resource directory %q ...", resourceDir)
if err := os.MkdirAll(resourceDir, 0755); err != nil && !os.IsExist(err) {
return err
}
// Gather secrets. If we get API server error, retry getting until we hit the timeout.
// Retrying will prevent temporary API server blips or networking issues.
// We return when all "required" secrets are gathered, optional secrets are not checked.
klog.Infof("Getting secrets ...")
secrets := []*corev1.Secret{}
for _, name := range append(secretNames.List(), optionalSecretNames.List()...) {
secret, err := o.getSecretWithRetry(ctx, name, optionalSecretNames.Has(name))
if err != nil {
return err
}
// secret is nil means the secret was optional and we failed to get it.
if secret != nil {
secrets = append(secrets, o.substituteSecret(secret))
}
}
klog.Infof("Getting config maps ...")
configs := []*corev1.ConfigMap{}
for _, name := range append(configNames.List(), optionalConfigNames.List()...) {
config, err := o.getConfigMapWithRetry(ctx, name, optionalConfigNames.Has(name))
if err != nil {
return err
}
// config is nil means the config was optional and we failed to get it.
if config != nil {
configs = append(configs, o.substituteConfigMap(config))
}
}
for _, secret := range secrets {
secretBaseName := secret.Name
if prefixed {
secretBaseName = o.prefixFor(secret.Name)
}
contentDir := path.Join(resourceDir, "secrets", secretBaseName)
klog.Infof("Creating directory %q ...", contentDir)
if err := os.MkdirAll(contentDir, 0755); err != nil {
return err
}
for filename, content := range secret.Data {
if err := writeSecret(content, path.Join(contentDir, filename)); err != nil {
return err
}
}
}
for _, configmap := range configs {
configMapBaseName := configmap.Name
if prefixed {
configMapBaseName = o.prefixFor(configmap.Name)
}
contentDir := path.Join(resourceDir, "configmaps", configMapBaseName)
klog.Infof("Creating directory %q ...", contentDir)
if err := os.MkdirAll(contentDir, 0755); err != nil {
return err
}
for filename, content := range configmap.Data {
if err := writeConfig([]byte(content), path.Join(contentDir, filename)); err != nil {
return err
}
}
}
return nil
}
func (o *InstallOptions) copyContent(ctx context.Context) error {
resourceDir := path.Join(o.ResourceDir, o.nameFor(o.PodConfigMapNamePrefix))
klog.Infof("Creating target resource directory %q ...", resourceDir)
if err := os.MkdirAll(resourceDir, 0755); err != nil && !os.IsExist(err) {
return err
}
secretPrefixes := sets.NewString()
optionalSecretPrefixes := sets.NewString()
configPrefixes := sets.NewString()
optionalConfigPrefixes := sets.NewString()
for _, prefix := range o.SecretNamePrefixes {
secretPrefixes.Insert(o.nameFor(prefix))
}
for _, prefix := range o.OptionalSecretNamePrefixes {
optionalSecretPrefixes.Insert(o.nameFor(prefix))
}
for _, prefix := range o.ConfigMapNamePrefixes {
configPrefixes.Insert(o.nameFor(prefix))
}
for _, prefix := range o.OptionalConfigMapNamePrefixes {
optionalConfigPrefixes.Insert(o.nameFor(prefix))
}
if err := o.copySecretsAndConfigMaps(ctx, resourceDir, secretPrefixes, optionalSecretPrefixes, configPrefixes, optionalConfigPrefixes, true); err != nil {
return err
}
// Copy the current state of the certs as we see them. This primes us once and allows a kube-apiserver to start once
if len(o.CertDir) > 0 {
if err := o.copySecretsAndConfigMaps(ctx, o.CertDir,
sets.NewString(o.CertSecretNames...),
sets.NewString(o.OptionalCertSecretNamePrefixes...),
sets.NewString(o.CertConfigMapNamePrefixes...),
sets.NewString(o.OptionalCertConfigMapNamePrefixes...),
false,
); err != nil {
return err
}
}
// Gather the config map that holds pods to be installed
var podsConfigMap *corev1.ConfigMap
err := retry.RetryOnConnectionErrors(ctx, func(ctx context.Context) (bool, error) {
klog.Infof("Getting pod configmaps/%s -n %s", o.nameFor(o.PodConfigMapNamePrefix), o.Namespace)
podConfigMap, err := o.KubeClient.CoreV1().ConfigMaps(o.Namespace).Get(ctx, o.nameFor(o.PodConfigMapNamePrefix), metav1.GetOptions{})
if err != nil {
return false, err
}
if _, exists := podConfigMap.Data["pod.yaml"]; !exists {
return true, fmt.Errorf("required 'pod.yaml' key does not exist in configmap")
}
podsConfigMap = o.substituteConfigMap(podConfigMap)
return true, nil
})
if err != nil {
return err
}
// at this point we know that the required key is present in the config map, just make sure the manifest dir actually exists
klog.Infof("Creating directory for static pod manifest %q ...", o.PodManifestDir)
if err := os.MkdirAll(o.PodManifestDir, 0755); err != nil {
return err
}
// check to see if we need to acquire a file based lock to coordinate work.
// since writing to disk is fast and we need to write at most 2 files it is okay to hold a lock here
// note that in case of unplanned disaster the Linux kernel is going to release the lock when the process exits
if len(o.StaticPodManifestsLockFile) > 0 {
installerLock := flock.New(o.StaticPodManifestsLockFile)
klog.Infof("acquiring an exclusive lock on a %s", o.StaticPodManifestsLockFile)
if err := installerLock.Lock(ctx); err != nil {
return fmt.Errorf("failed to acquire an exclusive lock on %s, due to %v", o.StaticPodManifestsLockFile, err)
}
defer installerLock.Unlock()
}
// then write the required pod and all optional
// the key must be pod.yaml or has a -pod.yaml suffix to be considered
for rawPodKey, rawPod := range podsConfigMap.Data {
var manifestFileName = rawPodKey
if manifestFileName == "pod.yaml" {
// TODO: update kas-o to update the key to a fully qualified name
manifestFileName = o.PodConfigMapNamePrefix + ".yaml"
} else if !strings.HasSuffix(manifestFileName, "-pod.yaml") {
continue
}
klog.Infof("Writing a pod under %q key \n%s", manifestFileName, rawPod)
err := o.writePod([]byte(rawPod), manifestFileName, resourceDir)
if err != nil {
return err
}
}
return nil
}
func (o *InstallOptions) substituteConfigMap(obj *corev1.ConfigMap) *corev1.ConfigMap {
ret := obj.DeepCopy()
for k, oldContent := range obj.Data {
newContent := strings.ReplaceAll(oldContent, "REVISION", o.Revision)
newContent = strings.ReplaceAll(newContent, "NODE_NAME", o.NodeName)
newContent = strings.ReplaceAll(newContent, "NODE_ENVVAR_NAME", strings.ReplaceAll(strings.ReplaceAll(o.NodeName, "-", "_"), ".", "_"))
ret.Data[k] = newContent
}
return ret
}
func (o *InstallOptions) substituteSecret(obj *corev1.Secret) *corev1.Secret {
ret := obj.DeepCopy()
for k, oldContent := range obj.Data {
newContent := strings.ReplaceAll(string(oldContent), "REVISION", o.Revision)
newContent = strings.ReplaceAll(newContent, "NODE_NAME", o.NodeName)
newContent = strings.ReplaceAll(newContent, "NODE_ENVVAR_NAME", strings.ReplaceAll(strings.ReplaceAll(o.NodeName, "-", "_"), ".", "_"))
ret.Data[k] = []byte(newContent)
}
return ret
}
func (o *InstallOptions) Run(ctx context.Context) error {
var eventTarget *corev1.ObjectReference
err := retry.RetryOnConnectionErrors(ctx, func(context.Context) (bool, error) {
var clientErr error
eventTarget, clientErr = events.GetControllerReferenceForCurrentPod(o.KubeClient, o.Namespace, nil)
if clientErr != nil {
return false, clientErr
}
return true, nil
})
if err != nil {
klog.Warningf("unable to get owner reference (falling back to namespace): %v", err)
}
recorder := events.NewRecorder(o.KubeClient.CoreV1().Events(o.Namespace), "static-pod-installer", eventTarget)
if err := o.copyContent(ctx); err != nil {
recorder.Warningf("StaticPodInstallerFailed", "Installing revision %s: %v", o.Revision, err)
return fmt.Errorf("failed to copy: %v", err)
}
recorder.Eventf("StaticPodInstallerCompleted", "Successfully installed revision %s", o.Revision)
return nil
}
func (o *InstallOptions) writePod(rawPodBytes []byte, manifestFileName, resourceDir string) error {
// the kubelet has a bug that prevents graceful termination from working on static pods with the same name, filename
// and uuid. By setting the pod UID we can work around the kubelet bug and get our graceful termination honored.
// Per the node team, this is hard to fix in the kubelet, though it will affect all static pods.
pod, err := resourceread.ReadPodV1(rawPodBytes)
if err != nil {
return err
}
pod.UID = uuid.NewUUID()
for _, fn := range o.PodMutationFns {
klog.V(2).Infof("Customizing static pod ...")
pod = pod.DeepCopy()
if err := fn(pod); err != nil {
return err
}
}
finalPodBytes := resourceread.WritePodV1OrDie(pod)
// Write secrets, config maps and pod to disk
// This does not need timeout, instead we should fail hard when we are not able to write.
klog.Infof("Writing pod manifest %q ...", path.Join(resourceDir, manifestFileName))
if err := ioutil.WriteFile(path.Join(resourceDir, manifestFileName), []byte(finalPodBytes), 0644); err != nil {
return err
}
// remove the existing file to ensure kubelet gets "create" event from inotify watchers
if err := os.Remove(path.Join(o.PodManifestDir, manifestFileName)); err == nil {
klog.Infof("Removed existing static pod manifest %q ...", path.Join(o.PodManifestDir, manifestFileName))
} else if !os.IsNotExist(err) {
return err
}
klog.Infof("Writing static pod manifest %q ...\n%s", path.Join(o.PodManifestDir, manifestFileName), finalPodBytes)
if err := ioutil.WriteFile(path.Join(o.PodManifestDir, manifestFileName), []byte(finalPodBytes), 0644); err != nil {
return err
}
return nil
}
func writeConfig(content []byte, fullFilename string) error {
klog.Infof("Writing config file %q ...", fullFilename)
filePerms := os.FileMode(0644)
if strings.HasSuffix(fullFilename, ".sh") {
filePerms = 0755
}
return staticpod.WriteFileAtomic(content, filePerms, fullFilename)
}
func writeSecret(content []byte, fullFilename string) error {
klog.Infof("Writing secret manifest %q ...", fullFilename)
filePerms := os.FileMode(0600)
if strings.HasSuffix(fullFilename, ".sh") {
filePerms = 0700
}
return staticpod.WriteFileAtomic(content, filePerms, fullFilename)
}
| [
"\"NODE_NAME\""
]
| []
| [
"NODE_NAME"
]
| [] | ["NODE_NAME"] | go | 1 | 0 | |
main.go | // This is the entry point for mouthful, the self hosted commenting engine.
//
// Upon providing a config, the main program will parse it and start an API.
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"github.com/apex/gateway"
"github.com/vkuznecovas/mouthful/global"
"github.com/fatih/color"
"github.com/vkuznecovas/mouthful/api"
"github.com/vkuznecovas/mouthful/config"
"github.com/vkuznecovas/mouthful/db"
"github.com/vkuznecovas/mouthful/job"
)
func main() {
// Print a warning if user is running as root.
if os.Geteuid() == 0 {
color.Set(color.FgYellow)
log.Println("WARNING: Mouthful is running as root. For security reasons please consider creating a non root user for mouthful. Mouthful does not need root permissions to run.")
color.Unset()
}
configFlag := flag.String("config", "./data/config.json", "File to read configuration")
lambdaFlag := flag.Bool("l", false, "Run as lambda")
lambdaFlagLong := flag.Bool("lambda", false, "Run as lambda")
helpFlag := flag.Bool("h", false, "Show help")
helpFlagLong := flag.Bool("help", false, "Show help")
flag.Parse()
if *helpFlag || *helpFlagLong {
howto()
}
// read config.json
if _, err := os.Stat(*configFlag); os.IsNotExist(err) {
fmt.Fprintln(os.Stderr, "Couldn't find config file:", *configFlag)
os.Exit(1)
}
contents, err := ioutil.ReadFile(*configFlag)
if err != nil {
fmt.Fprintln(os.Stderr, "Couldn't read the config file")
panic(err)
}
// unmarshal config
config, err := config.ParseConfig(contents)
if err != nil {
fmt.Fprintln(os.Stderr, "Couldn't parse the config file")
panic(err)
}
// check if we're gonna need to override the path in static admin html
if config.Moderation.Path != nil {
err := global.RewriteAdminPanelScripts(*config.Moderation.Path)
if err != nil {
panic(err)
}
}
// set up db according to config
database, err := db.GetDBInstance(config.Database)
if err != nil {
panic(err)
}
// startup cleanup, if enabled
err = job.StartCleanupJobs(database, config.Moderation.PeriodicCleanUp)
if err != nil {
panic(err)
}
// get GIN server
service, err := api.GetServer(&database, config)
if err != nil {
panic(err)
}
// set GIN port
port := global.DefaultPort
if config.API.Port != nil {
port = *config.API.Port
}
// add GIN bind address, serving on all by default
bindAddress := global.DefaultBindAddress
if config.API.BindAddress != nil {
bindAddress = *config.API.BindAddress
}
// run the server
fullAddress := fmt.Sprintf("%v:%v", bindAddress, port)
color.Set(color.FgGreen)
log.Println("Running mouthful server on ", fullAddress)
color.Unset()
if *lambdaFlag || *lambdaFlagLong {
log.Fatal(gateway.ListenAndServe(":"+os.Getenv("PORT"), service))
} else {
service.Run(fullAddress)
}
}
func howto() {
fmt.Println(`
Welcome to Mouthful
Mouthful is a lightweight commenting server written in GO and Preact. It's a self hosted alternative to disqus that's ad free.
Parameters:
-config Location of config.json file (Searches in current directory as default)
-help Show this screen
`)
os.Exit(0)
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
fabfile.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
fabfile for Django:
originally derived from http://morethanseven.net/2009/07/27/fabric-django-git-apache-mod_wsgi-virtualenv-and-p/
TODO: adapt to current Invoke/Fabric!
TODO: change workflow from local git + remote "releases" to checkout from remote git
"""
#from __future__ import unicode_literals, print_function
import os
import time
from fabric.api import env, sudo, local, require, settings, run, prompt, cd, put
# Fabric setup
env.colorize_errors = True
# globals
env.prj_name = 'project_name' # no spaces!
env.prj_dir = 'django_project' # subdir under git root that contains the deployable part
env.sudoers_group = 'admin' # we don't use sudo any more, just certificate login as root!
env.use_feincms = True
env.use_medialibrary = True # feincms.medialibrary or similar
env.use_supervisor = True
env.use_celery = False
env.use_memcached = False
env.webserver = 'nginx' # nginx (directory name below /etc!), nothing else ATM
env.dbserver = 'mysql' # mysql or postgresql
# environments
def localhost():
"Use the local virtual server"
env.hosts = ['localhost']
env.requirements = 'local'
env.user = env.prj_name # used by ssh
# env.adminuser = 'you'
env.homepath = '/Users/%(adminuser)s' % env # User home on OSX, TODO: check local OS
env.prj_path = '%(homepath)s/workspace/%(prj_name)s' % env
env.virtualhost_path = env.prj_path
env.pysp = '%(virtualhost_path)s/lib/python3.7/site-packages' % env
env.tmppath = '/var/tmp/django_cache/%(prj_name)s' % env
def webserver():
"Use the actual webserver"
env.hosts = ['webserver.example.com'] # Change to your server name!
env.requirements = 'webserver'
env.user = env.prj_name # You must create and sudo-enable the user first!
# env.adminuser = 'root' # This user is used to create the other user on first setup
env.homepath = '/home/%(user)s' % env # User home on Linux
env.prj_path = '/var/www/%(prj_name)s' % env
env.virtualhost_path = env.prj_path
env.pysp = '%(virtualhost_path)s/lib/python3.6/site-packages' % env
env.tmppath = '/var/tmp/django_cache/%(prj_name)s' % env
env.cryptdomain = 'www.project_name.de'
if not _is_host_up(env.hosts[0], 22):
import sys
sys.exit(1)
# helpers
def _is_host_up(host, port):
import socket
import paramiko
original_timeout = socket.getdefaulttimeout()
new_timeout = 3
socket.setdefaulttimeout(new_timeout)
host_status = False
try:
paramiko.Transport((host, port))
host_status = True
except:
print('***Warning*** Host {host} on port {port} is down.'.format(
host=host, port=port)
)
socket.setdefaulttimeout(original_timeout)
return host_status
# tasks
def test():
"Run the test suite and bail out if it fails"
local("cd %(prj_path)s/releases/current/%(prj_name)s; python manage.py test" % env) # , fail="abort")
def setup():
"""
Setup a fresh virtualenv as well as a few useful directories, then run
a full deployment
"""
require('hosts', provided_by=[webserver])
require('prj_path')
if env.requirements == 'local':
return local_setup()
with settings(user=env.adminuser):
# install Python environment and version control
sudo('apt-get install -y build-essential python3-dev python3-setuptools python3-imaging python3-virtualenv libyaml-dev python3-yaml git-core certbot')
# If you need Django modules in development, install more version control systems
# sudo('apt-get install -y subversion git-core mercurial', pty=False)
# install more Python stuff
# Don't install setuptools or virtualenv on Ubuntu with easy_install or pip! Only Ubuntu packages work!
# sudo('easy_install pip') # maybe broken
if env.use_supervisor:
sudo('pip install supervisor')
# sudo('echo; if [ ! -f /etc/supervisord.conf ]; then echo_supervisord_conf > /etc/supervisord.conf; fi', pty=True) # configure that!
sudo('echo; if [ ! -d /etc/supervisor ]; then mkdir /etc/supervisor; fi', pty=True)
if env.use_celery:
sudo('apt-get install -y rabbitmq-server') # needs additional deb-repository, see tools/README.rst!
if env.use_supervisor:
local('echo "CHECK: You want to use celery under supervisor. Please check your celery configuration in supervisor-celery.conf!"', pty=True)
if env.use_memcached:
sudo('apt-get install -y memcached python3-memcache')
# install webserver and database server
if env.webserver == 'nginx':
sudo('apt-get remove -y apache2 apache2-mpm-prefork apache2-utils') # is mostly pre-installed
sudo('apt-get install -y nginx-full python3-certbot-nginx')
else:
local('echo "WARNING: Your webserver «%s» is not supported!"' % env.webserver, pty=True) # other webservers?
if env.dbserver == 'mysql':
sudo('apt-get install -y mysql-server python3-mysqldb libmysqlclient-dev')
elif env.dbserver == 'postgresql':
sudo('apt-get install -y postgresql python3-psycopg2') # is psycopg2 still the way to go?
with settings(warn_only=True, pty=True):
# disable default site
sudo('cd /etc/%(webserver)s/sites-enabled/; rm default;' % env)
# install certbot scripts
#sudo('git clone https://github.com/certbot/certbot /opt/letsencrypt; cd /opt/letsencrypt; ./certbot-auto')
#sudo('cp tools/renew-letsencrypt.sh /etc/cron-monthly/')
# new project setup
setup_user()
deploy('first')
def setup_user():
"""
Create a new Linux user, set it up for certificate login.
Call `setup_passwords`.
"""
require('hosts', provided_by=[webserver])
require('adminuser')
env.new_user = env.user
with settings(user=env.adminuser, pty=True):
# create user and add it to admin group
sudo('adduser "%(new_user)s" --disabled-password --gecos ""' % env)
#' && adduser "%(new_user)s" %(sudoers_group)s' % env)
# copy authorized_keys from root for certificate login
sudo('mkdir %(homepath)s/.ssh && cp /root/.ssh/authorized_keys %(homepath)s/.ssh/' % env)
# Now we should be able to login with that new user
with settings(warn_only=True):
# create web and temp dirs
sudo('mkdir -p %(prj_path)s; chown %(new_user)s:%(new_user)s %(prj_path)s;' % env)
sudo('mkdir -p %(tmppath)s; chown %(new_user)s:%(new_user)s %(tmppath)s;' % env)
# symlink web dir in home
run('cd ~; ln -s %(prj_path)s www;' % env)
env.user = env.new_user
# cd to web dir and activate virtualenv on login
run('echo "\ncd %(prj_path)s && source bin/activate\n" >> %(homepath)s/.profile\n' % env, pty=True)
setup_passwords()
def setup_passwords():
"""
create .env and MySQL user; to be called from `setup` or `local_setup`
"""
print('I will now ask for the passwords to use for database and email account access. If one is empty, I’ll use the non-empty for both. If you leave both empty, I won’t create an database user.')
prompt('Please enter DATABASE_PASSWORD for user %(prj_name)s:' % env, key='database_password')
prompt('Please enter EMAIL_PASSWORD for user %(user)s:' % env, key='email_password')
if env.database_password and not env.email_password:
env.email_password = env.database_password
if env.email_password and not env.database_password:
env.database_password = env.email_password
# TODO: check input for need of quoting!
with settings(user=env.adminuser, pty=True):
# create .env and set database and email passwords
from django.utils.crypto import get_random_string
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#^&*(-_=+)' # without % and $
env.secret_key = get_random_string(50, chars)
run('echo; if [ ! -f %(prj_path)s/.env ]; then echo "DJANGO_SETTINGS_MODULE=settings\nDATABASE_PASSWORD=%(database_password)s\nEMAIL_PASSWORD=%(email_password)s\nSECRET_KEY='%s'\n" > %(prj_path)s/.env; fi' % env)
# create MySQL user
if env.dbserver == 'mysql' and env.database_password:
env.dbuserscript = '%(homepath)s/userscript.sql' % env
run('''echo "\ncreate user '%(prj_name)s'@'localhost' identified by '%(database_password)s';
create database %(prj_name)s character set 'utf8';\n
grant all privileges on %(prj_name)s.* to '%(prj_name)s'@'localhost';\n
flush privileges;\n" > %(dbuserscript)s''' % env)
print('Setting up %(prj_name)s in MySQL. Please enter password for MySQL root:')
run('mysql -u root -p -D mysql < %(dbuserscript)s' % env)
run('rm %(dbuserscript)s' % env)
# TODO: add setup for PostgreSQL
setup_paths()
def setup_paths():
with cd(env.prj_path):
run('virtualenv .') # activate with 'source ~/www/bin/activate', perhaps add that to your .bashrc or .profile
with settings(warn_only=True):
# create necessary directories
for folder in 'logs run releases shared packages backup letsencrypt ssl'.split():
run('mkdir %s' % folder, pty=True)
run('chmod a+w logs', pty=True)
with settings(user=env.adminuser):
run('chown www-data:www-data letsencrypt && chown www-data:www-data ssl')
if env.use_medialibrary:
run('mkdir medialibrary', pty=True)
run('cd releases; ln -s . current; ln -s . previous;', pty=True)
def check_dotenv(local=True):
"""
Check if there is a .env file, otherwise create it.
Works ATM only locally.
"""
require('prj_name')
require('prj_path')
require('user')
dotenv_filename = '%(prj_path)s/%(prj_name)s/.env' % env
if not os.path.isfile(dotenv_filename):
print('I will now ask for the passwords to use for ' +
('local ' if local else '') + 'database and ' +
'email account access. If one is empty, I’ll use the non-empty ' +
'for both. If you leave both empty, I won’t create a database ' +
'user.')
prompt('Please enter DATABASE_PASSWORD for user %(prj_name)s:' % env, key='database_password')
prompt('Please enter EMAIL_PASSWORD for user %(user)s:' % env, key='email_password')
if env.database_password and not env.email_password:
env.email_password = env.database_password
if env.email_password and not env.database_password:
env.database_password = env.email_password
# TODO: check input for need of quoting!
# create .env and set database and email passwords
from django.utils.crypto import get_random_string
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#^&*(-_=+)' # without % and $
dotenv = 'SECRET_KEY="%s"\n' % get_random_string(50, chars)
dotenv += 'DJANGO_SETTINGS_MODULE=settings%s\n' % ('.local' if local else '')
dotenv += 'DATABASE_PASSWORD="%(database_password)s"\n' % env
dotenv += 'EMAIL_PASSWORD="%(email_password)s"\n' % env
try:
dotenv_file = open(dotenv_filename, 'x', encoding='utf-8')
dotenv_file.write(dotenv)
except TypeError: # Python 2.x
dotenv_file = open(dotenv_filename, 'w')
dotenv_file.write(dotenv.encode('utf-8'))
dotenv_file.close()
else:
print('Reading existing .env file...')
import dotenv
dotenv.read_dotenv(dotenv_filename)
env.database_password = os.environ['DATABASE_PASSWORD']
def local_setup():
"""
user setup on localhost
"""
require('hosts', provided_by=[localhost])
require('prj_path')
with cd(env.prj_path):
if not (os.path.isdir(os.path.join(env.prj_path, 'bin')) and
os.path.isdir(os.path.join(env.prj_path, 'lib')) and
os.path.isdir(os.path.join(env.prj_path, 'include'))):
with settings(warn_only=True):
# TODO: add Python version
local('virtualenv . && source bin/activate')
local('source bin/activate && pip install -U -r ./requirements/%(requirements)s.txt' % env)
check_dotenv(local=True)
# create MySQL user
if env.dbserver == 'mysql' and env.database_password:
# check MySQL:
print('Checking database connection...')
try:
import _mysql, _mysql_exceptions
except ImportError as ex:
print(ex)
print('MySQL module not installed!')
try:
db = _mysql.connect(host=env.hosts[0], user=env.user, passwd=env.database_password, db=env.prj_name)
print('Database connection successful.')
del db
except Exception as ex:
print(ex)
env.dbuserscript = '%(prj_path)s/userscript.sql' % env
dbs = open(env.dbuserscript, 'w')
dbs.write('''create user '%(prj_name)s'@'localhost' identified by '%(database_password)s';
create database %(prj_name)s character set 'utf8';
grant all privileges on %(prj_name)s.* to '%(prj_name)s'@'localhost';
flush privileges;\n''' % env)
dbs.close()
print('Setting up %(prj_name)s in MySQL. Please enter password for MySQL root:' % env)
local('mysql -u root -p -D mysql -e "source %(dbuserscript)s"' % env)
os.unlink(env.dbuserscript)
def deploy(param=''):
"""
Deploy the latest version of the site to the servers, install any
required third party modules, install the virtual host and
then restart the webserver
"""
require('hosts', provided_by=[localhost, webserver])
require('prj_path')
env.release = time.strftime('%Y%m%d%H%M%S')
upload_tar_from_git()
if param == 'first': install_requirements()
install_site()
symlink_current_release()
migrate(param)
restart_webserver()
def deploy_version(version):
"Specify a specific version to be made live"
require('hosts', provided_by=[localhost, webserver])
require('prj_path')
env.version = version
with cd(env.prj_path):
run('rm -rf releases/previous; mv releases/current releases/previous;', pty=True)
run('ln -s %(version)s releases/current' % env, pty=True)
restart_webserver()
def rollback():
"""
Limited rollback capability. Simply loads the previously current
version of the code. Rolling back again will swap between the two.
"""
require('hosts', provided_by=[localhost, webserver])
require('prj_path')
with cd(env.prj_path):
run('mv releases/current releases/_previous;', pty=True)
run('mv releases/previous releases/current;', pty=True)
run('mv releases/_previous releases/previous;', pty=True)
# TODO: check Django migrations for rollback
restart_webserver()
# Helpers. These are called by other functions rather than directly
def upload_tar_from_git():
"Create an archive from the current Git master branch and upload it"
require('release', provided_by=[deploy, setup])
local('git archive --format=tar master | gzip > %(release)s.tar.gz' % env)
run('mkdir -p %(prj_path)s/releases/%(release)s' % env) # , pty=True)
put('%(release)s.tar.gz' % env, '%(prj_path)s/packages/' % env)
run('cd %(prj_path)s/releases/%(release)s && tar zxf ../../packages/%(release)s.tar.gz' % env, pty=True)
local('rm %(release)s.tar.gz' % env)
def install_site():
"Add the virtualhost config file to the webserver's config, activate logrotate"
require('release', provided_by=[deploy, setup])
with cd('%(prj_path)s/releases/%(release)s' % env):
with settings(user=env.adminuser, pty=True):
run('cp server-setup/%(webserver)s.conf /etc/%(webserver)s/sites-available/%(prj_name)s' % env)
if env.use_daemontools: # activate new service runner
run('cp server-setup/service-run.sh /etc/service/%(prj_name)s/run; chmod a+x /etc/service/%(prj_name)s/run;' % env)
else: # delete old service dir
run('echo; if [ -d /etc/service/%(prj_name)s ]; then rm -rf /etc/service/%(prj_name)s; fi' % env)
if env.use_supervisor: # activate new supervisor.conf
run('cp server-setup/supervisor.conf /etc/supervisor/conf.d/%(prj_name)s.conf' % env)
if env.use_celery:
run('cp server-setup/supervisor-celery.conf /etc/supervisor/conf.d/%(prj_name)s-celery.conf' % env)
else: # delete old config file
# if you set a process name in supervisor.ini, then you must add it like %(prj_name):appserver
run('echo; if [ -f /etc/supervisor/%(prj_name)s.ini ]; then supervisorctl %(prj_name)s stop rm /etc/supervisor/%(prj_name)s.ini; fi' % env)
run('echo; if [ -f /etc/supervisor/conf.d/%(prj_name)s.conf ]; then supervisorctl %(prj_name)s stop rm /etc/supervisor/conf.d/%(prj_name)s.conf; fi' % env)
if env.use_celery:
run('echo; if [ -f /etc/supervisor/%(prj_name)s-celery.ini ]; then supervisorctl celery celerybeat stop rm /etc/supervisor/%(prj_name)s-celery.ini; fi' % env)
run('echo; if [ -f /etc/supervisor/conf.d/%(prj_name)s-celery.conf ]; then supervisorctl celery celerybeat stop rm /etc/supervisor/conf.d/%(prj_name)s-celery.conf; fi' % env)
if env.use_celery and env.use_daemontools:
run('cp server-setup/service-run-celeryd.sh /etc/service/%(prj_name)s-celery/run; chmod a+x /etc/service/%(prj_name)s-celery/run;' % env)
# try logrotate
with settings(warn_only=True):
run('cp server-setup/logrotate.conf /etc/logrotate.d/website-%(prj_name)s' % env)
if env.use_celery:
run('cp server-setup/logrotate-celery.conf /etc/logrotate.d/celery' % env)
run('cp server-setup/letsencrypt.conf /etc/letsencrypt/configs/%(cryptdomain)s.conf' % env)
with settings(warn_only=True):
run('cd /etc/%(webserver)s/sites-enabled/; ln -s ../sites-available/%(prj_name)s %(prj_name)s' % env)
def install_requirements():
"Install the required packages from the requirements file using pip"
require('release', provided_by=[deploy, setup])
require('requirements', provided_by=[localhost, webserver])
run('cd %(prj_path)s; pip install -U -r ./releases/%(release)s/requirements/%(requirements)s.txt' % env, pty=True)
def symlink_current_release():
"Symlink our current release"
require('release', provided_by=[deploy, setup])
with cd(env.prj_path):
run('rm releases/previous; mv releases/current releases/previous;', pty=True)
run('ln -s %(release)s releases/current' % env, pty=True)
# copy South migrations from previous release, if there are any
run('cd releases/previous/%(prj_name)s; if [ -d migrations ]; then cp -r migrations ../../current/%(prj_name)s/; fi' % env, pty=True)
# collect static files
with cd('releases/current/%(prj_name)s' % env):
run('rm settings/local.*') # delete local settings, could also copy webserver to local
run('mkdir ../logs', warn_only=True) # needed at start, while it stays empty
run('%(prj_path)s/bin/python manage.py collectstatic -v0 --noinput' % env, pty=True)
def migrate(param=''):
"Update the database"
require('prj_name')
require('prj_path')
env.southparam = '--auto'
if param == 'first':
if env.use_feincms:
# FeinCMS 1.9 doesn’t yet have migrations
run('cd %(prj_path)s/releases/current/%(prj_name)s; %(prj_path)s/bin/python manage.py makemigrations page medialibrary' % env, pty=True)
run('cd %(prj_path)s/releases/current/%(prj_name)s; %(prj_path)s/bin/python manage.py makemigrations %(prj_name)s' % env, pty=True)
run('cd %(prj_path)s/releases/current/%(prj_name)s; %(prj_path)s/bin/python manage.py migrate --noinput' % env, pty=True)
# with cd('%(prj_path)s/releases/current/%(prj_name)s' % env):
# run('%(prj_path)s/bin/python manage.py schemamigration %(prj_name)s %(southparam)s && %(prj_path)s/bin/python manage.py migrate %(prj_name)s' % env)
# # TODO: should also migrate other apps! get migrations from previous releases
def restart_webserver():
"Restart the web server"
require('webserver')
with settings(user=env.adminuser, warn_only=True, pty=True):
if env.webserver == 'nginx':
require('prj_path')
if env.use_daemontools:
run('kill `cat %(prj_path)s/logs/django.pid`' % env) # kill process, daemontools will start it again, see service-run.sh
if env.use_supervisor:
# if you set a process name in supervisor.ini, then you must add it like %(prj_name):appserver
if env.use_celery:
run('supervisorctl restart %(prj_name)s celery celerybeat' % env)
else:
run('supervisorctl restart %(prj_name)s' % env)
# require('prj_name')
# run('cd %(prj_path)s; bin/python releases/current/manage.py runfcgi method=threaded maxchildren=6 maxspare=4 minspare=2 host=127.0.0.1 port=%(webport)s pidfile=./logs/django.pid' % env)
run('service %(webserver)s reload' % env)
| []
| []
| [
"DATABASE_PASSWORD"
]
| [] | ["DATABASE_PASSWORD"] | python | 1 | 0 | |
src/aks-preview/azext_aks_preview/custom.py | # pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import base64
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from math import isnan
import colorama # pylint: disable=import-error
import yaml # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (
ArgumentUsageError,
InvalidArgumentValueError,
)
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.commands.client_factory import (
get_mgmt_service_client,
get_subscription_id,
)
from azure.cli.core.util import (
get_file_json,
in_cloud_console,
read_file_content,
sdk_no_wait,
shell_safe_json_parse,
)
from azure.graphrbac.models import (
ApplicationCreateParameters,
KeyCredential,
PasswordCredential,
ServicePrincipalCreateParameters,
)
from dateutil.parser import parse # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from knack.log import get_logger
from knack.prompting import NoTTYException, prompt_pass, prompt_y_n
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
from six.moves.urllib.error import URLError # pylint: disable=import-error
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW
from ._client_factory import (
cf_agent_pools,
cf_container_registry_service,
cf_nodepool_snapshots_client,
cf_mc_snapshots_client,
cf_storage,
get_auth_management_client,
get_graph_rbac_management_client,
get_msi_client,
get_resource_by_name,
)
from ._consts import (
ADDONS,
ADDONS_DESCRIPTIONS,
CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_AZURE_POLICY_ADDON_NAME,
CONST_CONFCOM_ADDON_NAME,
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR,
CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_MONITORING_USING_AAD_MSI_AUTH,
CONST_OPEN_SERVICE_MESH_ADDON_NAME,
CONST_ROTATION_POLL_INTERVAL,
CONST_SCALE_DOWN_MODE_DELETE,
CONST_SCALE_SET_PRIORITY_REGULAR,
CONST_SCALE_SET_PRIORITY_SPOT,
CONST_SECRET_ROTATION_ENABLED,
CONST_SPOT_EVICTION_POLICY_DELETE,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
)
from ._helpers import (
_trim_fqdn_name_containing_hcp,
)
from ._podidentity import (
_ensure_managed_identity_operator_permission,
_ensure_pod_identity_addon_is_enabled,
_fill_defaults_for_pod_identity_profile,
_update_addon_pod_identity,
)
from ._resourcegroup import get_rg_location
from ._roleassignments import (
add_role_assignment,
build_role_scope,
create_role_assignment,
resolve_object_id,
resolve_role_id,
)
from .addonconfiguration import (
add_ingress_appgw_addon_role_assignment,
add_monitoring_role_assignment,
add_virtual_node_role_assignment,
enable_addons,
ensure_container_insights_for_monitoring,
ensure_default_log_analytics_workspace_for_monitoring,
sanitize_loganalytics_ws_resource_id,
)
from .maintenanceconfiguration import (
aks_maintenanceconfiguration_update_internal,
)
from .vendored_sdks.azure_mgmt_preview_aks.v2022_04_02_preview.models import (
AgentPool,
AgentPoolUpgradeSettings,
ContainerServiceStorageProfileTypes,
CreationData,
KubeletConfig,
LinuxOSConfig,
ManagedClusterAddonProfile,
ManagedClusterHTTPProxyConfig,
ManagedClusterPodIdentity,
ManagedClusterPodIdentityException,
PowerState,
Snapshot,
ManagedClusterSnapshot,
SysctlConfig,
UserAssignedIdentity,
)
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
_re_user_assigned_identity_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("Identity {} not found.".format(resource_id))
raise CLIError(ex.message)
return identity
raise CLIError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
_re_snapshot_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.containerservice/snapshots/(.*)',
flags=re.IGNORECASE)
_re_mc_snapshot_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.containerservice/managedclustersnapshots/(.*)',
flags=re.IGNORECASE)
def _get_snapshot(cli_ctx, snapshot_id):
snapshot_id = snapshot_id.lower()
match = _re_snapshot_resource_id.search(snapshot_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
snapshot_name = match.group(3)
snapshot_client = cf_nodepool_snapshots_client(
cli_ctx, subscription_id=subscription_id)
try:
snapshot = snapshot_client.get(resource_group_name, snapshot_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise InvalidArgumentValueError(
"Snapshot {} not found.".format(snapshot_id))
raise CLIError(ex.message)
return snapshot
raise InvalidArgumentValueError(
"Cannot parse snapshot name from provided resource id {}.".format(snapshot_id))
def _get_cluster_snapshot(cli_ctx, snapshot_id):
snapshot_id = snapshot_id.lower()
match = _re_mc_snapshot_resource_id.search(snapshot_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
snapshot_name = match.group(3)
snapshot_client = cf_mc_snapshots_client(
cli_ctx, subscription_id=subscription_id)
try:
snapshot = snapshot_client.get(resource_group_name, snapshot_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise InvalidArgumentValueError(
"Managed cluster snapshot {} not found.".format(snapshot_id))
raise CLIError(ex.message)
return snapshot
raise InvalidArgumentValueError(
"Cannot parse snapshot name from provided resource id {}.".format(snapshot_id))
def aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
):
from azure.cli.command_modules.acs.custom import _aks_browse
return _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser,
listen_address,
listen_port,
CUSTOM_MGMT_AKS_PREVIEW,
)
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def aks_maintenanceconfiguration_list(
cmd,
client,
resource_group_name,
cluster_name
):
return client.list_by_managed_cluster(resource_group_name, cluster_name)
def aks_maintenanceconfiguration_show(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.get(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_delete(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.delete(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_add(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
for config in configs:
if config.name == config_name:
raise CLIError("Maintenance configuration '{}' already exists, please try a different name, "
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_maintenanceconfiguration_update(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
found = False
for config in configs:
if config.name == config_name:
found = True
break
if not found:
raise CLIError("Maintenance configuration '{}' doesn't exist."
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
# pylint: disable=unused-argument,too-many-locals
def aks_create(cmd,
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
os_sku=None,
enable_fips_image=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
pod_cidrs=None,
service_cidrs=None,
ip_families=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_managed_outbound_ipv6_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
min_count=None,
max_count=None,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
private_dns_zone=None,
enable_managed_identity=True,
fqdn_subdomain=None,
disable_public_fqdn=False,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
enable_sgxquotehelper=False,
kubelet_config=None,
linux_os_config=None,
http_proxy_config=None,
assign_identity=None,
auto_upgrade_channel=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
# NOTE: for workload identity flags, we need to know if it's set to True/False or not set (None)
enable_workload_identity=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
edge_zone=None,
enable_secret_rotation=False,
disable_disk_driver=None,
disable_file_driver=None,
disable_snapshot_controller=None,
rotation_poll_interval=None,
disable_local_accounts=False,
no_wait=False,
assign_kubelet_identity=None,
workload_runtime=None,
gpu_instance_profile=None,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
snapshot_id=None,
cluster_snapshot_id=None,
enable_oidc_issuer=False,
host_group_id=None,
crg_id=None,
message_of_the_day=None,
enable_azure_keyvault_kms=False,
azure_keyvault_kms_key_id=None,
enable_apiserver_vnet_integration=False,
apiserver_subnet_id=None,
yes=False):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
from azure.cli.command_modules.acs._consts import DecoratorEarlyExitException
from azure.cli.command_modules.acs.decorator import AKSParamDict
from .decorator import AKSPreviewCreateDecorator
# decorator pattern
aks_create_decorator = AKSPreviewCreateDecorator(
cmd=cmd,
client=client,
raw_parameters=AKSParamDict(raw_parameters),
resource_type=CUSTOM_MGMT_AKS_PREVIEW,
)
try:
# construct mc profile
mc = aks_create_decorator.construct_mc_preview_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to create a real managed cluster
return aks_create_decorator.create_mc_preview(mc)
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_managed_outbound_ipv6_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
uptime_sla=False,
no_uptime_sla=False,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
aks_custom_headers=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
assign_kubelet_identity=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
disable_pod_identity=False,
# NOTE: for workload identity flags, we need to know if it's set to True/False or not set (None)
enable_workload_identity=None,
disable_workload_identity=None,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
enable_disk_driver=None,
disable_disk_driver=None,
enable_file_driver=None,
disable_file_driver=None,
enable_snapshot_controller=None,
disable_snapshot_controller=None,
disable_local_accounts=False,
enable_local_accounts=False,
enable_public_fqdn=False,
disable_public_fqdn=False,
yes=False,
tags=None,
nodepool_labels=None,
windows_admin_password=None,
enable_azure_rbac=False,
disable_azure_rbac=False,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
enable_oidc_issuer=False,
http_proxy_config=None,
enable_azure_keyvault_kms=False,
azure_keyvault_kms_key_id=None,
enable_apiserver_vnet_integration=False,
apiserver_subnet_id=None):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
from azure.cli.command_modules.acs._consts import DecoratorEarlyExitException
from azure.cli.command_modules.acs.decorator import AKSParamDict
from .decorator import AKSPreviewUpdateDecorator
# decorator pattern
aks_update_decorator = AKSPreviewUpdateDecorator(
cmd=cmd,
client=client,
raw_parameters=AKSParamDict(raw_parameters),
resource_type=CUSTOM_MGMT_AKS_PREVIEW,
)
try:
# update mc profile
mc = aks_update_decorator.update_mc_preview_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to update the real managed cluster
return aks_update_decorator.update_mc_preview(mc)
# pylint: disable=unused-argument
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None,
public_fqdn=False,
credential_format=None):
credentialResults = None
serverType = None
if public_fqdn:
serverType = 'public'
if credential_format:
credential_format = credential_format.lower()
if admin:
raise InvalidArgumentValueError("--format can only be specified when requesting clusterUser credential.")
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name, serverType)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name, serverType, credential_format)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(
resource_group_name, name, serverType)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(
cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError(
"A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import (is_valid_resource_id, parse_resource_id,
resource_id)
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(
cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(
cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
# Form containerName from fqdn, as it was previously jsut the location of code is changed.
# https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#container-names
maxContainerNameLength = 63
fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn
normalized_container_name = fqdn.replace('.', '-')
len_of_container_name = normalized_container_name.index("-hcp-")
if len_of_container_name == -1:
len_of_container_name = maxContainerNameLength
container_name = normalized_container_name[:len_of_container_name]
sas_token = sas_token.strip('?')
deployment_yaml = _read_periscope_yaml()
deployment_yaml = deployment_yaml.replace(
"# <accountName, string>", storage_account_name)
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace(
"# <containerName, string>", container_name)
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(container_name)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Storage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def _read_periscope_yaml():
curr_dir = os.path.dirname(os.path.realpath(__file__))
periscope_yaml_file = os.path.join(
curr_dir, "deploymentyaml", "aks-periscope.yaml")
yaml_file = open(periscope_yaml_file, "r")
data_loaded = yaml_file.read()
return data_loaded
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
_fill_defaults_for_pod_identity_profile(instance.pod_identity_profile)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements
client,
resource_group_name,
name,
kubernetes_version='',
control_plane_only=False,
no_wait=False,
node_image_only=False,
aks_custom_headers=None,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
_fill_defaults_for_pod_identity_profile(instance.pod_identity_profile)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster " \
"and might take a while. Do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(
True, agent_pool_client, resource_group_name, name, agent_pool_profile.name, None)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
agent_profile.creation_data = None
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance, headers=headers)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name, snapshot_id=None):
headers = {}
if snapshot_id:
headers["AKSSnapshotId"] = snapshot_id
return sdk_no_wait(no_wait, client.begin_upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name, headers=headers)
def _handle_addons_args(cmd, # pylint: disable=too-many-statements
addons_str,
subscription_id,
resource_group_name,
addon_profiles=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
aci_subnet_name=None,
vnet_subnet_id=None,
enable_secret_rotation=False,
rotation_poll_interval=None,):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = sanitize_loganalytics_ws_resource_id(
workspace_resource_id)
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True,
config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id,
CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth_for_monitoring})
addons.remove('monitoring')
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'gitops' in addons:
addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('gitops')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'azure-keyvault-secrets-provider' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={
CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addons.remove('azure-keyvault-secrets-provider')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(
subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
store_acs_service_principal(
subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
os_type=None,
os_sku=None,
enable_fips_image=False,
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
max_surge=None,
mode="User",
aks_custom_headers=None,
kubelet_config=None,
linux_os_config=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
workload_runtime=None,
gpu_instance_profile=None,
snapshot_id=None,
host_group_id=None,
crg_id=None,
message_of_the_day=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
creationData = None
if snapshot_id:
snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id)
if not kubernetes_version:
kubernetes_version = snapshot.kubernetes_version
if not os_type:
os_type = snapshot.os_type
if not os_sku:
os_sku = snapshot.os_sku
if not node_vm_size:
node_vm_size = snapshot.vm_size
creationData = CreationData(
source_resource_id=snapshot_id
)
if not os_type:
os_type = "Linux"
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
os_sku=os_sku,
enable_fips=enable_fips_image,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
scale_set_priority=priority,
scale_down_mode=scale_down_mode,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode,
workload_runtime=workload_runtime,
gpu_instance_profile=gpu_instance_profile,
creation_data=creationData,
host_group_id=host_group_id,
capacity_reservation_group_id=crg_id
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
if kubelet_config:
agent_pool.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool.linux_os_config = _get_linux_os_config(linux_os_config)
if message_of_the_day:
agent_pool.message_of_the_day = _get_message_of_the_day(
message_of_the_day)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, headers=headers)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version='',
no_wait=False,
node_image_only=False,
max_surge=None,
aks_custom_headers=None,
snapshot_id=None):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name,
snapshot_id)
creationData = None
if snapshot_id:
snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id)
if not kubernetes_version and not node_image_only:
kubernetes_version = snapshot.kubernetes_version
creationData = CreationData(
source_resource_id=snapshot_id
)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
instance.creation_data = creationData
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
scale_down_mode=None,
min_count=None, max_count=None,
max_surge=None,
mode=None,
labels=None,
node_taints=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not scale_down_mode and not mode and not max_surge and labels is None and node_taints is None):
reconcilePrompt = 'no argument specified to update would you like to reconcile to current settings?'
if not prompt_y_n(reconcilePrompt, default="n"):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge" or "--scale-down-mode" or "--labels" or "--node-taints')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if node_taints is not None:
taints_array = []
if node_taints != '':
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise InvalidArgumentValueError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
instance.node_taints = taints_array
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if scale_down_mode is not None:
instance.scale_down_mode = scale_down_mode
if mode is not None:
instance.mode = mode
if labels is not None:
instance.node_labels = labels
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_stop(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
aks_custom_headers=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise InvalidArgumentValueError(
"Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name))
instance = client.get(resource_group_name, cluster_name, nodepool_name)
power_state = PowerState(code="Stopped")
instance.power_state = power_state
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_start(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
aks_custom_headers=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise InvalidArgumentValueError(
"Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name))
instance = client.get(resource_group_name, cluster_name, nodepool_name)
power_state = PowerState(code="Running")
instance.power_state = power_state
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
ignore_pod_disruption_budget=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name, ignore_pod_disruption_budget=ignore_pod_disruption_budget)
def aks_addon_list_available():
available_addons = []
for k, v in ADDONS.items():
available_addons.append({
"name": k,
"description": ADDONS_DESCRIPTIONS[v]
})
return available_addons
def aks_addon_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
addon_profiles = client.get(resource_group_name, name).addon_profiles
current_addons = []
for name, addon in ADDONS.items():
if not addon_profiles or addon not in addon_profiles:
current_addons.append({
"name": name,
"api_key": addon,
"enabled": False
})
else:
current_addons.append({
"name": name,
"api_key": addon,
"enabled": addon_profiles[addon].enabled
})
return current_addons
def aks_addon_show(cmd, client, resource_group_name, name, addon): # pylint: disable=unused-argument
addon_profiles = client.get(resource_group_name, name).addon_profiles
addon_key = ADDONS[addon]
if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled:
raise CLIError(f'Addon "{addon}" is not enabled in this cluster.')
return {
"name": addon,
"api_key": addon_key,
"config": addon_profiles[addon_key].config,
"identity": addon_profiles[addon_key].identity
}
def aks_addon_enable(cmd, client, resource_group_name, name, addon, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None,
no_wait=False, enable_msi_auth_for_monitoring=False):
return enable_addons(cmd, client, resource_group_name, name, addon, workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring)
def aks_addon_disable(cmd, client, resource_group_name, name, addon, no_wait=False):
return aks_disable_addons(cmd, client, resource_group_name, name, addon, no_wait)
def aks_addon_update(cmd, client, resource_group_name, name, addon, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None,
no_wait=False, enable_msi_auth_for_monitoring=False):
addon_profiles = client.get(resource_group_name, name).addon_profiles
addon_key = ADDONS[addon]
if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled:
raise CLIError(f'Addon "{addon}" is not enabled in this cluster.')
return enable_addons(cmd, client, resource_group_name, name, addon, check_enabled=False,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
try:
if addons == "monitoring" and CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and \
CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
# remove the DCR association because otherwise the DCR can't be deleted
ensure_container_insights_for_monitoring(
cmd,
instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
remove_monitoring=True,
aad_route=True,
create_dcr=False,
create_dcra=True
)
except TypeError:
pass
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None, no_wait=False, enable_msi_auth_for_monitoring=False):
instance = client.get(resource_group_name, name)
# this is overwritten by _update_addons(), so the value needs to be recorded here
msi_auth = True if instance.service_principal_profile.client_id == "msi" else False
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait)
if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled:
if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
if not msi_auth:
raise ArgumentUsageError(
"--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
else:
# create a Data Collection Rule (DCR) and associate it with the cluster
ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=True, create_dcr=True, create_dcra=True)
else:
# monitoring addon will use legacy path
ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False)
monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles:
enable_virtual_node = True
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
logAnalyticsConstName = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = sanitize_loganalytics_ws_resource_id(
workspace_resource_id)
addon_profile.config = {
logAnalyticsConstName: workspace_resource_id}
addon_profile.config[CONST_MONITORING_USING_AAD_MSI_AUTH] = enable_msi_auth_for_monitoring
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n'
'To change open-service-mesh configuration, run '
f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The confcom addon is already enabled for this managed cluster.\n'
'To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_os_options(cmd, client, location): # pylint: disable=unused-argument
return client.get_os_options(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(
cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(
cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id,
resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
for _, diag_setting in enumerate(diag_settings):
if diag_setting:
return diag_setting.storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning(
"Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster',
str(len(ready_nodes)))
if not ready_nodes:
logger.warning(
'No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get",
"apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s',
node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s',
node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads(
'[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(
network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
def get_aks_custom_headers(aks_custom_headers=None):
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
return headers
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned"))
def _get_message_of_the_day(file_path):
if not os.path.isfile(file_path):
raise CLIError(
"{} is not valid file, or not accessable.".format(file_path))
content = read_file_content(file_path)
if not content:
raise ArgumentUsageError(
"message of the day should point to a non-empty file if specified.")
content = base64.b64encode(bytes(content, 'ascii')).decode('ascii')
return content
def _get_kubelet_config(file_path):
if not os.path.isfile(file_path):
raise CLIError(
"{} is not valid file, or not accessable.".format(file_path))
kubelet_config = get_file_json(file_path)
if not isinstance(kubelet_config, dict):
raise CLIError(
"Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = KubeletConfig()
config_object.cpu_manager_policy = kubelet_config.get(
"cpuManagerPolicy", None)
config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None)
config_object.cpu_cfs_quota_period = kubelet_config.get(
"cpuCfsQuotaPeriod", None)
config_object.image_gc_high_threshold = kubelet_config.get(
"imageGcHighThreshold", None)
config_object.image_gc_low_threshold = kubelet_config.get(
"imageGcLowThreshold", None)
config_object.topology_manager_policy = kubelet_config.get(
"topologyManagerPolicy", None)
config_object.allowed_unsafe_sysctls = kubelet_config.get(
"allowedUnsafeSysctls", None)
config_object.fail_swap_on = kubelet_config.get("failSwapOn", None)
config_object.container_log_max_files = kubelet_config.get(
"containerLogMaxFiles", None)
config_object.container_log_max_size_mb = kubelet_config.get(
"containerLogMaxSizeMB", None)
config_object.pod_max_pids = kubelet_config.get(
"podMaxPids", None)
return config_object
def _get_linux_os_config(file_path):
if not os.path.isfile(file_path):
raise CLIError(
"{} is not valid file, or not accessable.".format(file_path))
os_config = get_file_json(file_path)
if not isinstance(os_config, dict):
raise CLIError(
"Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = LinuxOSConfig()
config_object.transparent_huge_page_enabled = os_config.get(
"transparentHugePageEnabled", None)
config_object.transparent_huge_page_defrag = os_config.get(
"transparentHugePageDefrag", None)
config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None)
# sysctl settings
sysctls = os_config.get("sysctls", None)
if not isinstance(sysctls, dict):
raise CLIError(
"Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object.sysctls = SysctlConfig()
config_object.sysctls.net_core_somaxconn = sysctls.get(
"netCoreSomaxconn", None)
config_object.sysctls.net_core_netdev_max_backlog = sysctls.get(
"netCoreNetdevMaxBacklog", None)
config_object.sysctls.net_core_rmem_max = sysctls.get(
"netCoreRmemMax", None)
config_object.sysctls.net_core_wmem_max = sysctls.get(
"netCoreWmemMax", None)
config_object.sysctls.net_core_optmem_max = sysctls.get(
"netCoreOptmemMax", None)
config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get(
"netIpv4TcpMaxSynBacklog", None)
config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get(
"netIpv4TcpMaxTwBuckets", None)
config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get(
"netIpv4TcpFinTimeout", None)
config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get(
"netIpv4TcpKeepaliveTime", None)
config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get(
"netIpv4TcpKeepaliveProbes", None)
config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get(
"netIpv4TcpkeepaliveIntvl", None)
config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get(
"netIpv4TcpRmem", None)
config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get(
"netIpv4TcpWmem", None)
config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get(
"netIpv4TcpTwReuse", None)
config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get(
"netIpv4IpLocalPortRange", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get(
"netIpv4NeighDefaultGcThresh1", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get(
"netIpv4NeighDefaultGcThresh2", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get(
"netIpv4NeighDefaultGcThresh3", None)
config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get(
"netNetfilterNfConntrackMax", None)
config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get(
"netNetfilterNfConntrackBuckets", None)
config_object.sysctls.fs_inotify_max_user_watches = sysctls.get(
"fsInotifyMaxUserWatches", None)
config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None)
config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None)
config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None)
config_object.sysctls.kernel_threads_max = sysctls.get(
"kernelThreadsMax", None)
config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None)
config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None)
config_object.sysctls.vm_vfs_cache_pressure = sysctls.get(
"vmVfsCachePressure", None)
return config_object
def _get_http_proxy_config(file_path):
if not os.path.isfile(file_path):
raise CLIError(
"{} is not valid file, or not accessable.".format(file_path))
hp_config = get_file_json(file_path)
if not isinstance(hp_config, dict):
raise CLIError(
"Error reading Http Proxy Config at {}. Please see https://aka.ms/HttpProxyConfig for correct format.".format(file_path))
config_object = ManagedClusterHTTPProxyConfig()
config_object.http_proxy = hp_config.get("httpProxy", None)
config_object.https_proxy = hp_config.get("httpsProxy", None)
config_object.no_proxy = hp_config.get("noProxy", None)
config_object.trusted_ca = hp_config.get("trustedCa", None)
return config_object
def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace, identity_resource_id,
binding_selector=None,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
user_assigned_identity = _get_user_assigned_identity(
cmd.cli_ctx, identity_resource_id)
_ensure_managed_identity_operator_permission(
cmd.cli_ctx, instance, user_assigned_identity.id)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
pod_identities = instance.pod_identity_profile.user_assigned_identities
pod_identity = ManagedClusterPodIdentity(
name=identity_name,
namespace=identity_namespace,
identity=UserAssignedIdentity(
resource_id=user_assigned_identity.id,
client_id=user_assigned_identity.client_id,
object_id=user_assigned_identity.principal_id,
)
)
if binding_selector is not None:
pod_identity.binding_selector = binding_selector
pod_identities.append(pod_identity)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
for pod_identity in instance.pod_identity_profile.user_assigned_identities:
if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace:
# to remove
continue
pod_identities.append(pod_identity)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions
exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions.append(exc)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
# to remove
continue
pod_identity_exceptions.append(exc)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
found_target = False
updated_exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
found_target = True
pod_identity_exceptions.append(updated_exc)
else:
pod_identity_exceptions.append(exc)
if not found_target:
raise CLIError(
'pod identity exception {}/{} not found'.format(exc_namespace, exc_name))
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name):
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError(
'Could not grant Managed Identity Operator permission to cluster identity at scope {}'.format(scope))
def aks_egress_endpoints_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
return client.list_outbound_network_dependencies_endpoints(resource_group_name, name)
def aks_snapshot_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
cluster_id,
location=None,
tags=None,
aks_custom_headers=None,
no_wait=False):
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
creationData = CreationData(
source_resource_id=cluster_id
)
snapshot = ManagedClusterSnapshot(
name=name,
tags=tags,
location=location,
creation_data=creationData,
snapshot_type="ManagedCluster",
)
headers = get_aks_custom_headers(aks_custom_headers)
return client.create_or_update(resource_group_name, name, snapshot, headers=headers)
def aks_snapshot_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
snapshot = client.get(resource_group_name, name)
return snapshot
def aks_snapshot_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
no_wait=False,
yes=False):
from knack.prompting import prompt_y_n
msg = 'This will delete the cluster snapshot "{}" in resource group "{}", Are you sure?'.format(
name, resource_group_name)
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.delete(resource_group_name, name)
def aks_snapshot_list(cmd, client, resource_group_name=None): # pylint: disable=unused-argument
if resource_group_name is None or resource_group_name == '':
return client.list()
return client.list_by_resource_group(resource_group_name)
def aks_nodepool_snapshot_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
snapshot_name,
nodepool_id,
location=None,
tags=None,
aks_custom_headers=None,
no_wait=False):
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
creationData = CreationData(
source_resource_id=nodepool_id
)
snapshot = Snapshot(
name=snapshot_name,
tags=tags,
location=location,
creation_data=creationData
)
headers = get_aks_custom_headers(aks_custom_headers)
return client.create_or_update(resource_group_name, snapshot_name, snapshot, headers=headers)
def aks_nodepool_snapshot_show(cmd, client, resource_group_name, snapshot_name): # pylint: disable=unused-argument
snapshot = client.get(resource_group_name, snapshot_name)
return snapshot
def aks_nodepool_snapshot_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
snapshot_name,
no_wait=False,
yes=False):
from knack.prompting import prompt_y_n
msg = 'This will delete the nodepool snapshot "{}" in resource group "{}", Are you sure?'.format(
snapshot_name, resource_group_name)
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.delete(resource_group_name, snapshot_name)
def aks_nodepool_snapshot_list(cmd, client, resource_group_name=None): # pylint: disable=unused-argument
if resource_group_name is None or resource_group_name == '':
return client.list()
return client.list_by_resource_group(resource_group_name)
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
init.go | package jpush
import (
"os"
"strconv"
)
func init() {
AppKey = os.Getenv("JPUSH_USERNAME")
MasterSecret = os.Getenv("JPUSH_PASSWORD")
APIEndpoint = os.Getenv("JPUSH_URL")
if isDev, err := strconv.ParseBool(os.Getenv("JPUSH_MODE")); err == nil {
DefaultOption.IsIOSProd = isDev
}
DefaultClient = NewJPushClient(APIEndpoint, AppKey, MasterSecret)
}
| [
"\"JPUSH_USERNAME\"",
"\"JPUSH_PASSWORD\"",
"\"JPUSH_URL\"",
"\"JPUSH_MODE\""
]
| []
| [
"JPUSH_MODE",
"JPUSH_USERNAME",
"JPUSH_URL",
"JPUSH_PASSWORD"
]
| [] | ["JPUSH_MODE", "JPUSH_USERNAME", "JPUSH_URL", "JPUSH_PASSWORD"] | go | 4 | 0 | |
cmd/main.go | package main
import (
"fmt"
"net/http"
"os"
"github.com/petfinder-com/petfinder-go-sdk/pfapi"
)
var client *http.Client
//Main() is used as an example for accessing petfinder api
func main() {
//Pull Client ID key and Client Secret Key from environment variables
clientID := os.Getenv("PF_CLIENT_ID")
clientSecret := os.Getenv("PF_CLIENT_SECRET")
//Create pfclient Object
pfclient, err := pfapi.NewClient(clientID, clientSecret)
if err != nil {
fmt.Println("Could not create client")
}
//Create variable based on AnimalType struct
var types []pfapi.AnimalType
//Retreive all animal types, put into struct
types, _ = pfclient.GetAllTypes()
//Iterate through animal types using struct data
for _, t := range types {
fmt.Println("Name: ", t.Name)
fmt.Println("Colors: ", t.Colors)
fmt.Println("Self Link: ", t.Links.Self.Href)
}
//Get a specific type
myType, err := pfclient.GetType("dog")
if err != nil {
fmt.Println(err)
}
fmt.Println(myType.Name)
//Get a particular animal by id
myAnimal, err := pfclient.GetAnimalById("39140238")
if err != nil {
fmt.Println(err)
}
fmt.Println(myAnimal.ID, myAnimal.Species, myAnimal.Breeds)
myParams := pfapi.NewPetSearchParams()
myParams.AddParam("type", "Dog")
myParams.AddParam("coat", "Medium")
myAnimals, err := pfclient.GetAnimals(myParams)
if err != nil {
fmt.Println(err)
}
for _, a := range myAnimals.Animals {
fmt.Println(a.Name)
fmt.Println(a.Photos)
}
fmt.Println(myAnimals.Pagination.TotalCount)
//Orgs
myOrgs, err := pfclient.GetOrganizations()
if err != nil {
fmt.Println(err)
}
for _, a := range myOrgs.Organizations {
fmt.Println("Org Name: " + a.ID)
}
fmt.Println(myOrgs.Pagination.TotalCount)
//Orgs By ID
myOrg, err := pfclient.GetOrganizationsByID("NJ333")
if err != nil {
fmt.Println(err)
}
//for _, a := range myOrgs.Organizations {
// fmt.Println(a)
// }
fmt.Println("Org filter:")
fmt.Println(myOrg)
}
| [
"\"PF_CLIENT_ID\"",
"\"PF_CLIENT_SECRET\""
]
| []
| [
"PF_CLIENT_ID",
"PF_CLIENT_SECRET"
]
| [] | ["PF_CLIENT_ID", "PF_CLIENT_SECRET"] | go | 2 | 0 | |
check_certs/plugins/ses_notifier.py | #!/usr/bin/env python
from botocore.exceptions import ClientError
import boto3
import logging
class SesNotifier:
def __init__(self, params):
# This address must be verified with Amazon SES.
self.sender = params.get("sender", "")
# If your account is still in the sandbox, this address must be verified.
self.recipient = params.get("recipient", "")
self.aws_region = params.get("aws_region", "us-west-2")
# The character encoding for the email.
self.charset = "UTF-8"
# Create a new SES resource and specify a region.
self.client = boto3.client('ses', region_name=self.aws_region)
def send(self, info):
# The subject line for the email.
subject = "%s expires on %s, and in %d days" % (
info.get("site", "SITE_NOT_PROVIDED"),
info.get("not_valid_after", "NOT_VALID_AFTER_NOT_PROVIDED"),
info.get("expires_in_days", "EXPIRES_IN_DAYS_NOT_PROVIDED"),
)
body_text = "%s expires on %s, and in %d days" % (
info.get("site", "SITE_NOT_PROVIDED"),
info.get("not_valid_after", "NOT_VALID_AFTER_NOT_PROVIDED"),
info.get("expires_in_days", "EXPIRES_IN_DAYS_NOT_PROVIDED"),
)
try:
#Provide the contents of the email.
response = self.client.send_email(
Destination={
'ToAddresses': [
self.recipient,
],
},
Message={
'Body': {
'Text': {
'Charset': self.charset,
'Data': body_text,
},
},
'Subject': {
'Charset': self.charset,
'Data': subject,
},
},
Source=self.sender,
)
# Display an error if something goes wrong.
except ClientError as e:
logging.error(e.response['Error']['Message'])
raise e
else:
logging.info("Email sent! Message ID: %s" %
response['ResponseMetadata']['RequestId'])
def setup(app, params):
app.register_notifier(SesNotifier(params))
| []
| []
| []
| [] | [] | python | null | null | null |
frappe/setup_logging.py | import frappe
import logging
import logging.config
import os
import json
from pprint import pformat
class ContextFilter(logging.Filter):
"""
This is a filter which injects request information (if available) into the log.
"""
def filter(self, record):
record.form_dict = pformat(getattr(frappe.local, 'form_dict', None))
record.site = getattr(frappe.local, 'site', None)
record.tb = frappe.utils.get_traceback()
return True
def setup_logging():
conf = frappe.get_site_config(sites_path=os.environ.get('SITES_PATH', '.'))
if conf.logging_conf:
logging_conf = conf.logging_conf
else:
logging_conf = {
"version": 1,
"disable_existing_loggers": True,
"filters": {
"context_filter": {
"()": "frappe.setup_logging.ContextFilter"
}
},
"formatters": {
"site_wise": {
"format": "\n%(asctime)s %(message)s \n site: %(site)s\n form: %(form_dict)s\n\n%(tb)s\n--------------"
}
},
"loggers": {
"frappe": {
"level": "INFO",
"propagate": False,
"filters": ["context_filter"],
"handlers": ["request_exception"]
}
},
"handlers": {
"request_exception": {
"level": "ERROR",
"formatter": "site_wise",
"class": "logging.StreamHandler",
}
}
}
if conf.request_exception_log_file:
logging_conf.update({
"handlers": {
"request_exception": {
"level": "ERROR",
"formatter": "site_wise",
"class": "logging.handlers.WatchedFileHandler",
"filename": conf.request_exception_log_file
}
}
})
logging.config.dictConfig(logging_conf)
| []
| []
| [
"SITES_PATH"
]
| [] | ["SITES_PATH"] | python | 1 | 0 | |
template_settings_postgres.py | """
Django settings for TA project.
Generated by 'django-admin startproject' using Django 1.11.24.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import logging.config # this is for logging
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = '5jpu71#_4j4jaorh+_llj45p$gno7@)+!04n!#q_27b+4cv%4('
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DJANGO_DEBUG')
ALLOWED_HOSTS = os.getenv('ALLOWED_HOSTS').split(' ')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'api'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'TA.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TA.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'], # database name
'USER': os.environ['DB_USER'], # user name
'PASSWORD': os.environ['DB_PASSWORD'], # user password
'HOST': os.environ['DB_HOST'], # postgres server
'PORT': os.environ['DB_PORT'],
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# Logging Configuration
# Clear prev config
LOGGING_CONFIG = None
# Get loglevel from env
LOGLEVEL = os.getenv('DJANGO_LOGLEVEL', 'info').upper()
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'format': '%(asctime)s %(levelname)s [%(name)s:%(lineno)s] %(module)s %(process)d %(thread)d %(message)s',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'console',
},
},
'loggers': {
'': {
'level': LOGLEVEL,
'handlers': ['console',],
},
'django.utils.autoreload': {
'level': 'ERROR',
# ...
}
},
})
# Prevent creating pyc files
PYTHONDONTWRITEBYTECODE = 1
| []
| []
| [
"ALLOWED_HOSTS",
"DJANGO_SECRET_KEY",
"DB_PASSWORD",
"DB_HOST",
"DB_PORT",
"DB_NAME",
"DJANGO_LOGLEVEL",
"DJANGO_DEBUG",
"DB_USER"
]
| [] | ["ALLOWED_HOSTS", "DJANGO_SECRET_KEY", "DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "DJANGO_LOGLEVEL", "DJANGO_DEBUG", "DB_USER"] | python | 9 | 0 | |
src/toil/provisioners/gceProvisioner.py | # Copyright (C) 2015 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import range
import os
import time
import threading
import json
import requests
import uuid
import logging
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.drivers.gce import GCEFailedNode
from toil.provisioners.abstractProvisioner import AbstractProvisioner, Shape
from toil.provisioners import NoSuchClusterException
from toil.jobStores.googleJobStore import GoogleJobStore
from toil.provisioners.node import Node
logger = logging.getLogger(__name__)
logging.getLogger("urllib3.connectionpool").setLevel(logging.WARNING)
class GCEProvisioner(AbstractProvisioner):
"""
Implements a Google Compute Engine Provisioner using libcloud.
"""
NODE_BOTO_PATH = "/root/.boto" # boto file path on instances
SOURCE_IMAGE = (b'https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/'
b'images/coreos-stable-1576-4-0-v20171206')
def __init__(self, clusterName, zone, nodeStorage, sseKey):
super(GCEProvisioner, self).__init__(clusterName, zone, nodeStorage)
self.cloud = 'gce'
self._sseKey = sseKey
# If the clusterName is not given, then Toil must be running on the leader
# and should read the settings from the instance meta-data.
if clusterName:
self._readCredentials()
else:
self._readClusterSettings()
def _readClusterSettings(self):
"""
Read the cluster settings from the instance, which should be the leader.
See https://cloud.google.com/compute/docs/storing-retrieving-metadata for details about
reading the metadata.
"""
metadata_server = "http://metadata/computeMetadata/v1/instance/"
metadata_flavor = {'Metadata-Flavor': 'Google'}
zone = requests.get(metadata_server + 'zone', headers = metadata_flavor).text
self._zone = zone.split('/')[-1]
project_metadata_server = "http://metadata/computeMetadata/v1/project/"
self._projectId = requests.get(project_metadata_server + 'project-id', headers = metadata_flavor).text
# From a GCE instance, these values can be blank. Only the projectId is needed
self._googleJson = ''
self._clientEmail = ''
self._tags = requests.get(metadata_server + 'description', headers = metadata_flavor).text
tags = json.loads(self._tags)
self.clusterName = tags['clusterName']
self._gceDriver = self._getDriver()
self._instanceGroup = self._gceDriver.ex_get_instancegroup(self.clusterName, zone=self._zone)
leader = self.getLeader()
self._leaderPrivateIP = leader.privateIP
# generate a public key for the leader, which is used to talk to workers
self._masterPublicKey = self._setSSH()
# The location of the Google credentials file on instances.
self._credentialsPath = GoogleJobStore.nodeServiceAccountJson
self._keyName = 'core' # key name leader users to communicate with works
self._botoPath = self.NODE_BOTO_PATH # boto credentials (used if reading an AWS bucket)
def _readCredentials(self):
"""
Get the credentials from the file specified by GOOGLE_APPLICATION_CREDENTIALS.
"""
self._googleJson = os.getenv('GOOGLE_APPLICATION_CREDENTIALS')
if not self._googleJson:
raise RuntimeError('GOOGLE_APPLICATION_CREDENTIALS not set.')
try:
with open(self._googleJson) as jsonFile:
self.googleConnectionParams = json.loads(jsonFile.read())
except:
raise RuntimeError('GCEProvisioner: Could not parse the Google service account json file %s'
% self._googleJson)
self._projectId = self.googleConnectionParams['project_id']
self._clientEmail = self.googleConnectionParams['client_email']
self._credentialsPath = self._googleJson
self._masterPublicKey = None
self._gceDriver = self._getDriver()
def launchCluster(self, leaderNodeType, leaderStorage, owner, **kwargs):
"""
In addition to the parameters inherited from the abstractProvisioner,
the Google launchCluster takes the following parameters:
keyName: The key used to communicate with instances
botoPath: Boto credentials for reading an AWS jobStore (optional).
vpcSubnet: A subnet (optional).
"""
if 'keyName' not in kwargs:
raise RuntimeError("A keyPairName is required for the GCE provisioner.")
self._keyName = kwargs['keyName']
if 'botoPath' in kwargs:
self._botoPath = kwargs['botoPath']
self._vpcSubnet = kwargs['vpcSubnet'] if 'vpcSubnet' in kwargs else None
# Throws an error if cluster exists
self._instanceGroup = self._gceDriver.ex_create_instancegroup(self.clusterName, self._zone)
logger.debug('Launching leader')
# GCE doesn't have a dictionary tags field. The tags field is just a string list.
# Therefore, dumping tags into the description.
tags = {'Owner': self._keyName, 'clusterName': self.clusterName}
if 'userTags' in kwargs:
tags.update(kwargs['userTags'])
self._tags = json.dumps(tags)
userData = self._getCloudConfigUserData('leader')
metadata = {'items': [{'key': 'user-data', 'value': userData}]}
imageType = 'coreos-stable'
sa_scopes = [{'scopes': ['compute', 'storage-full']}]
disk = {}
disk['initializeParams'] = {
'sourceImage': self.SOURCE_IMAGE,
'diskSizeGb' : leaderStorage }
disk.update({'boot': True,
'autoDelete': True })
name= 'l' + str(uuid.uuid4())
leader = self._gceDriver.create_node(name, leaderNodeType, imageType,
location=self._zone,
ex_service_accounts=sa_scopes,
ex_metadata=metadata,
ex_subnetwork=self._vpcSubnet,
ex_disks_gce_struct = [disk],
description=self._tags,
ex_preemptible=False)
self._instanceGroup.add_instances([leader])
self._leaderPrivateIP = leader.private_ips[0] # needed if adding workers
#self.subnetID = leader.subnet_id #TODO: get subnetID
# Wait for the appliance to start and inject credentials.
leaderNode = Node(publicIP=leader.public_ips[0], privateIP=leader.private_ips[0],
name=leader.name, launchTime=leader.created_at, nodeType=leader.size,
preemptable=False, tags=self._tags)
leaderNode.waitForNode('toil_leader', keyName=self._keyName)
leaderNode.copySshKeys(self._keyName)
leaderNode.injectFile(self._credentialsPath, GoogleJobStore.nodeServiceAccountJson, 'toil_leader')
if self._botoPath:
leaderNode.injectFile(self._botoPath, self.NODE_BOTO_PATH, 'toil_leader')
logger.debug('Launched leader')
def getNodeShape(self, nodeType, preemptable=False):
# TODO: read this value only once
sizes = self._gceDriver.list_sizes(location=self._zone)
sizes = [x for x in sizes if x.name == nodeType]
assert len(sizes) == 1
instanceType = sizes[0]
disk = 0 #instanceType.disks * instanceType.disk_capacity * 2 ** 30
if disk == 0:
# This is an EBS-backed instance. We will use the root
# volume, so add the amount of EBS storage requested forhe root volume
disk = self._nodeStorage * 2 ** 30
# Ram is in M.
#Underestimate memory by 100M to prevent autoscaler from disagreeing with
#mesos about whether a job can run on a particular node type
memory = (instanceType.ram/1000 - 0.1) * 2** 30
return Shape(wallTime=60 * 60,
memory=memory,
cores=instanceType.extra['guestCpus'],
disk=disk,
preemptable=preemptable)
@staticmethod
def retryPredicate(e):
""" Not used by GCE """
return False
def destroyCluster(self):
"""
Try a few times to terminate all of the instances in the group.
"""
logger.debug("Destroying cluster %s" % self.clusterName)
instancesToTerminate = self._getNodesInCluster()
attempts = 0
while instancesToTerminate and attempts < 3:
self._terminateInstances(instances=instancesToTerminate)
instancesToTerminate = self._getNodesInCluster()
attempts += 1
# remove group
instanceGroup = self._gceDriver.ex_get_instancegroup(self.clusterName, zone=self._zone)
instanceGroup.destroy()
def terminateNodes(self, nodes):
nodeNames = [n.name for n in nodes]
instances = self._getNodesInCluster()
instancesToKill = [i for i in instances if i.name in nodeNames]
self._terminateInstances(instancesToKill)
def addNodes(self, nodeType, numNodes, preemptable, spotBid=None):
assert self._leaderPrivateIP
# If keys are rsynced, then the mesos-slave needs to be started after the keys have been
# transferred. The waitForKey.sh script loops on the new VM until it finds the keyPath file, then it starts the
# mesos-slave. If there are multiple keys to be transferred, then the last one to be transferred must be
# set to keyPath.
keyPath = None
botoExists = False
if self._botoPath is not None and os.path.exists(self._botoPath):
keyPath = self.NODE_BOTO_PATH
botoExists = True
elif self._sseKey:
keyPath = self._sseKey
if not preemptable:
logger.debug('Launching %s non-preemptable nodes', numNodes)
else:
logger.debug('Launching %s preemptable nodes', numNodes)
#kwargs["subnet_id"] = self.subnetID if self.subnetID else self._getClusterInstance(self.instanceMetaData).subnet_id
userData = self._getCloudConfigUserData('worker', self._masterPublicKey, keyPath, preemptable)
metadata = {'items': [{'key': 'user-data', 'value': userData}]}
imageType = 'coreos-stable'
sa_scopes = [{'scopes': ['compute', 'storage-full']}]
disk = {}
disk['initializeParams'] = {
'sourceImage': self.SOURCE_IMAGE,
'diskSizeGb' : self._nodeStorage }
disk.update({'boot': True,
'autoDelete': True })
# TODO:
# - bug in gce.py for ex_create_multiple_nodes (erroneously, doesn't allow image and disk to specified)
# - ex_create_multiple_nodes is limited to 1000 nodes
# - use a different function
# - or write a loop over the rest of this function, with 1K nodes max on each iteration
#instancesLaunched = driver.ex_create_multiple_nodes(
retries = 0
workersCreated = 0
# Try a few times to create the requested number of workers
while numNodes-workersCreated > 0 and retries < 3:
instancesLaunched = self.ex_create_multiple_nodes(
'', nodeType, imageType, numNodes-workersCreated,
location=self._zone,
ex_service_accounts=sa_scopes,
ex_metadata=metadata,
ex_disks_gce_struct = [disk],
description=self._tags,
ex_preemptible = preemptable
)
failedWorkers = []
for instance in instancesLaunched:
if isinstance(instance, GCEFailedNode):
logger.error("Worker failed to launch with code %s. Error message: %s"
% (instance.code, instance.error))
continue
node = Node(publicIP=instance.public_ips[0], privateIP=instance.private_ips[0],
name=instance.name, launchTime=instance.created_at, nodeType=instance.size,
preemptable=False, tags=self._tags) #FIXME: what should tags be set to?
try:
self._injectWorkerFiles(node, botoExists)
logger.debug("Created worker %s" % node.publicIP)
self._instanceGroup.add_instances([instance])
workersCreated += 1
except Exception as e:
logger.error("Failed to configure worker %s. Error message: %s" % (node.name, e))
failedWorkers.append(instance)
if failedWorkers:
logger.error("Terminating %d failed workers" % len(failedWorkers))
self._terminateInstances(failedWorkers)
retries += 1
logger.debug('Launched %d new instance(s)', numNodes)
if numNodes != workersCreated:
logger.error("Failed to launch %d worker(s)", numNodes-workersCreated)
return workersCreated
def getProvisionedWorkers(self, nodeType, preemptable):
assert self._leaderPrivateIP
entireCluster = self._getNodesInCluster(nodeType=nodeType)
logger.debug('All nodes in cluster: %s', entireCluster)
workerInstances = []
for instance in entireCluster:
scheduling = instance.extra.get('scheduling')
# If this field is not found in the extra meta-data, assume the node is not preemptable.
if scheduling and scheduling.get('preemptible', False) != preemptable:
continue
isWorker = True
for ip in instance.private_ips:
if ip == self._leaderPrivateIP:
isWorker = False
break # don't include the leader
if isWorker and instance.state == 'running':
workerInstances.append(instance)
logger.debug('All workers found in cluster: %s', workerInstances)
return [Node(publicIP=i.public_ips[0], privateIP=i.private_ips[0],
name=i.name, launchTime=i.created_at, nodeType=i.size,
preemptable=preemptable, tags=None)
for i in workerInstances]
def getLeader(self):
instances = self._getNodesInCluster()
instances.sort(key=lambda x: x.created_at)
try:
leader = instances[0] # assume leader was launched first
except IndexError:
raise NoSuchClusterException(self.clusterName)
return Node(publicIP=leader.public_ips[0], privateIP=leader.private_ips[0],
name=leader.name, launchTime=leader.created_at, nodeType=leader.size,
preemptable=False, tags=None)
def _injectWorkerFiles(self, node, botoExists):
"""
Set up the credentials on the worker.
"""
node.waitForNode('toil_worker', keyName=self._keyName)
node.copySshKeys(self._keyName)
node.injectFile(self._credentialsPath, GoogleJobStore.nodeServiceAccountJson, 'toil_worker')
if self._sseKey:
node.injectFile(self._sseKey, self._sseKey, 'toil_worker')
if botoExists:
node.injectFile(self._botoPath, self.NODE_BOTO_PATH, 'toil_worker')
def _getNodesInCluster(self, nodeType=None):
instanceGroup = self._gceDriver.ex_get_instancegroup(self.clusterName, zone=self._zone)
instances = instanceGroup.list_instances()
if nodeType:
instances = [instance for instance in instances if instance.size == nodeType]
return instances
def _getDriver(self):
""" Connect to GCE """
driverCls = get_driver(Provider.GCE)
return driverCls(self._clientEmail,
self._googleJson,
project=self._projectId,
datacenter=self._zone)
def _terminateInstances(self, instances):
def worker(driver, instance):
logger.debug('Terminating instance: %s', instance.name)
driver.destroy_node(instance)
threads = []
for instance in instances:
t = threading.Thread(target=worker, args=(self._gceDriver,instance))
threads.append(t)
t.start()
logger.debug('... Waiting for instance(s) to shut down...')
for t in threads:
t.join()
# MONKEY PATCH - This function was copied form libcloud to fix a bug.
DEFAULT_TASK_COMPLETION_TIMEOUT = 180
def ex_create_multiple_nodes(
self, base_name, size, image, number, location=None,
ex_network='default', ex_subnetwork=None, ex_tags=None,
ex_metadata=None, ignore_errors=True, use_existing_disk=True,
poll_interval=2, external_ip='ephemeral',
ex_disk_type='pd-standard', ex_disk_auto_delete=True,
ex_service_accounts=None, timeout=DEFAULT_TASK_COMPLETION_TIMEOUT,
description=None, ex_can_ip_forward=None, ex_disks_gce_struct=None,
ex_nic_gce_struct=None, ex_on_host_maintenance=None,
ex_automatic_restart=None, ex_image_family=None,
ex_preemptible=None):
"""
Monkey patch to gce.py in libcloud to allow disk and images to be specified.
Also changed name to a uuid below.
The prefix 'wp' identifies preemptable nodes and 'wn' non-preemptable nodes.
"""
# if image and ex_disks_gce_struct:
# raise ValueError("Cannot specify both 'image' and "
# "'ex_disks_gce_struct'.")
driver = self._getDriver()
if image and ex_image_family:
raise ValueError("Cannot specify both 'image' and "
"'ex_image_family'")
location = location or driver.zone
if not hasattr(location, 'name'):
location = driver.ex_get_zone(location)
if not hasattr(size, 'name'):
size = driver.ex_get_size(size, location)
if not hasattr(ex_network, 'name'):
ex_network = driver.ex_get_network(ex_network)
if ex_subnetwork and not hasattr(ex_subnetwork, 'name'):
ex_subnetwork = \
driver.ex_get_subnetwork(ex_subnetwork,
region=driver._get_region_from_zone(
location))
if ex_image_family:
image = driver.ex_get_image_from_family(ex_image_family)
if image and not hasattr(image, 'name'):
image = driver.ex_get_image(image)
if not hasattr(ex_disk_type, 'name'):
ex_disk_type = driver.ex_get_disktype(ex_disk_type, zone=location)
node_attrs = {'size': size,
'image': image,
'location': location,
'network': ex_network,
'subnetwork': ex_subnetwork,
'tags': ex_tags,
'metadata': ex_metadata,
'ignore_errors': ignore_errors,
'use_existing_disk': use_existing_disk,
'external_ip': external_ip,
'ex_disk_type': ex_disk_type,
'ex_disk_auto_delete': ex_disk_auto_delete,
'ex_service_accounts': ex_service_accounts,
'description': description,
'ex_can_ip_forward': ex_can_ip_forward,
'ex_disks_gce_struct': ex_disks_gce_struct,
'ex_nic_gce_struct': ex_nic_gce_struct,
'ex_on_host_maintenance': ex_on_host_maintenance,
'ex_automatic_restart': ex_automatic_restart,
'ex_preemptible': ex_preemptible}
# List for holding the status information for disk/node creation.
status_list = []
for i in range(number):
name = 'wp' if ex_preemptible else 'wn'
name += str(uuid.uuid4()) #'%s-%03d' % (base_name, i)
status = {'name': name, 'node_response': None, 'node': None}
status_list.append(status)
start_time = time.time()
complete = False
while not complete:
if (time.time() - start_time >= timeout):
raise Exception("Timeout (%s sec) while waiting for multiple "
"instances")
complete = True
time.sleep(poll_interval)
for status in status_list:
# Create the node or check status if already in progress.
if not status['node']:
if not status['node_response']:
driver._multi_create_node(status, node_attrs)
else:
driver._multi_check_node(status, node_attrs)
# If any of the nodes have not been created (or failed) we are
# not done yet.
if not status['node']:
complete = False
# Return list of nodes
node_list = []
for status in status_list:
node_list.append(status['node'])
return node_list
| []
| []
| [
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["GOOGLE_APPLICATION_CREDENTIALS"] | python | 1 | 0 | |
services/auth.go | package services
import (
"crypto/sha1"
"errors"
"fmt"
"os"
"time"
"github.com/plattyp/addon/accessor"
"github.com/plattyp/addon/resources"
)
// ValidateSSOToken is used to determine if an SSO token is valid
func ValidateSSOToken(userAccessor accessor.UserAccessor, userID int64, token string, timestamp int64) (*resources.User, int, error) {
herokuSSOSalt := os.Getenv("HEROKU_SSO_SALT")
preToken := fmt.Sprintf("%d:%s:%d", userID, herokuSSOSalt, timestamp)
h := sha1.New()
h.Write([]byte(preToken))
encodedToken := fmt.Sprintf("%x", h.Sum(nil))
// Validate Token
if token != encodedToken {
return nil, 403, errors.New("Token is invalid")
}
// Validate Timestamp
currentTime := time.Now().UTC().Unix()
if currentTime > timestamp {
return nil, 403, errors.New("Token has expired")
}
// Lookup User
user, err := userAccessor.FetchUser(userID)
if err != nil {
return nil, 404, err
}
// Successful Authentication
return user, 200, nil
}
| [
"\"HEROKU_SSO_SALT\""
]
| []
| [
"HEROKU_SSO_SALT"
]
| [] | ["HEROKU_SSO_SALT"] | go | 1 | 0 | |
tests/test_multiprocess.py | from __future__ import unicode_literals
from fcntl import LOCK_EX, LOCK_SH
import glob
import os
import shutil
import sys
import tempfile
import time
from prometheus_client import mmap_dict, values
from prometheus_client.core import (
CollectorRegistry, Counter, Gauge, Histogram, Sample, Summary,
)
from prometheus_client.exposition import generate_latest
import prometheus_client.multiprocess
from prometheus_client.multiprocess import (
advisory_lock, archive_metrics, InMemoryCollector, mark_process_dead,
merge, MultiProcessCollector
)
from prometheus_client.values import MultiProcessValue, MutexValue
if sys.version_info < (2, 7):
# We need the skip decorators from unittest2 on Python 2.6.
import unittest2 as unittest
else:
import unittest
class TestMultiProcess(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
os.environ['prometheus_multiproc_dir'] = self.tempdir
values.ValueClass = MultiProcessValue(lambda: 123)
self.registry = CollectorRegistry()
self.collector = MultiProcessCollector(self.registry, self.tempdir)
@property
def _value_class(self):
return
def tearDown(self):
del os.environ['prometheus_multiproc_dir']
shutil.rmtree(self.tempdir)
values.ValueClass = MutexValue
def test_counter_adds(self):
c1 = Counter('c', 'help', registry=None)
values.ValueClass = MultiProcessValue(lambda: 456)
c2 = Counter('c', 'help', registry=None)
self.assertEqual(0, self.registry.get_sample_value('c_total'))
c1.inc(1)
c2.inc(2)
self.assertEqual(3, self.registry.get_sample_value('c_total'))
def test_summary_adds(self):
s1 = Summary('s', 'help', registry=None)
values.ValueClass = MultiProcessValue(lambda: 456)
s2 = Summary('s', 'help', registry=None)
self.assertEqual(0, self.registry.get_sample_value('s_count'))
self.assertEqual(0, self.registry.get_sample_value('s_sum'))
s1.observe(1)
s2.observe(2)
self.assertEqual(2, self.registry.get_sample_value('s_count'))
self.assertEqual(3, self.registry.get_sample_value('s_sum'))
def test_histogram_adds(self):
h1 = Histogram('h', 'help', registry=None)
values.ValueClass = MultiProcessValue(lambda: 456)
h2 = Histogram('h', 'help', registry=None)
self.assertEqual(0, self.registry.get_sample_value('h_count'))
self.assertEqual(0, self.registry.get_sample_value('h_sum'))
self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
h1.observe(1)
h2.observe(2)
self.assertEqual(2, self.registry.get_sample_value('h_count'))
self.assertEqual(3, self.registry.get_sample_value('h_sum'))
self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
def test_gauge_all(self):
values.ValueClass = MultiProcessValue(lambda: 123)
g1 = Gauge('g', 'help', registry=None, multiprocess_mode='all')
values.ValueClass = MultiProcessValue(lambda: 456)
g2 = Gauge('g', 'help', registry=None, multiprocess_mode='all')
self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '123'}))
self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '456'}))
g1.set(1)
g2.set(2)
archive_metrics()
mark_process_dead(123, os.environ['prometheus_multiproc_dir'])
self.assertEqual(1, self.registry.get_sample_value('g', {'pid': '123'}))
self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'}))
def test_gauge_liveall(self):
g1 = Gauge('g', 'help', registry=None, multiprocess_mode='liveall')
self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '123'}))
g1.set(1)
values.ValueClass = MultiProcessValue(lambda: 456)
g2 = Gauge('g', 'help', registry=None, multiprocess_mode='liveall')
self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '456'}))
g2.set(2)
self.assertEqual(1, self.registry.get_sample_value('g', {'pid': '123'}))
self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'}))
mark_process_dead(123, os.environ['prometheus_multiproc_dir'])
self.assertEqual(None, self.registry.get_sample_value('g', {'pid': '123'}))
self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'}))
def test_gauge_latest(self):
self.assertEqual(None, self.registry.get_sample_value('g'))
g1 = Gauge('g', 'G', registry=None, multiprocess_mode=Gauge.LATEST)
g1.set(0)
self.assertEqual(0, self.registry.get_sample_value('g'))
g1.set(123)
self.assertEqual(123, self.registry.get_sample_value('g'))
t0 = time.time()
g1.set(1, timestamp=t0)
self.assertEqual(1, self.registry.get_sample_value('g'))
archive_metrics()
self.assertEqual(1, self.registry.get_sample_value('g'))
values.ValueClass = MultiProcessValue(lambda: '456789')
g2 = Gauge('g', 'G', registry=None, multiprocess_mode=Gauge.LATEST)
t1 = t0 - time.time()
g2.set(2, timestamp=t1)
self.assertEqual(1, self.registry.get_sample_value('g'))
archive_metrics()
self.assertEqual(1, self.registry.get_sample_value('g'))
def test_gauge_min(self):
g1 = Gauge('g', 'help', registry=None, multiprocess_mode='min')
values.ValueClass = MultiProcessValue(lambda: 456)
g2 = Gauge('g', 'help', registry=None, multiprocess_mode='min')
self.assertEqual(0, self.registry.get_sample_value('g'))
g1.set(1)
g2.set(2)
self.assertEqual(1, self.registry.get_sample_value('g'))
def test_gauge_max(self):
g1 = Gauge('g', 'help', registry=None, multiprocess_mode='max')
values.ValueClass = MultiProcessValue(lambda: 456)
g2 = Gauge('g', 'help', registry=None, multiprocess_mode='max')
self.assertEqual(0, self.registry.get_sample_value('g'))
g1.set(1)
g2.set(2)
self.assertEqual(2, self.registry.get_sample_value('g'))
def test_gauge_livesum(self):
g1 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum')
values.ValueClass = MultiProcessValue(lambda: 456)
g2 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum')
self.assertEqual(0, self.registry.get_sample_value('g'))
g1.set(1)
g2.set(2)
self.assertEqual(3, self.registry.get_sample_value('g'))
mark_process_dead(123, os.environ['prometheus_multiproc_dir'])
self.assertEqual(2, self.registry.get_sample_value('g'))
def test_namespace_subsystem(self):
c1 = Counter('c', 'help', registry=None, namespace='ns', subsystem='ss')
c1.inc(1)
self.assertEqual(1, self.registry.get_sample_value('ns_ss_c_total'))
def test_counter_across_forks(self):
pid = 0
values.ValueClass = MultiProcessValue(lambda: pid)
c1 = Counter('c', 'help', registry=None)
self.assertEqual(0, self.registry.get_sample_value('c_total'))
c1.inc(1)
c1.inc(1)
pid = 1
c1.inc(1)
self.assertEqual(3, self.registry.get_sample_value('c_total'))
self.assertEqual(1, c1._value.get())
def test_initialization_detects_pid_change(self):
pid = 0
values.ValueClass = MultiProcessValue(lambda: pid)
# can not inspect the files cache directly, as it's a closure, so we
# check for the actual files themselves
def files():
fs = os.listdir(os.environ['prometheus_multiproc_dir'])
fs.sort()
return fs
c1 = Counter('c1', 'c1', registry=None)
self.assertEqual(files(), ['counter_0.db'])
c2 = Counter('c2', 'c2', registry=None)
self.assertEqual(files(), ['counter_0.db'])
pid = 1
c3 = Counter('c3', 'c3', registry=None)
self.assertEqual(files(), ['counter_0.db', 'counter_1.db'])
@unittest.skipIf(sys.version_info < (2, 7), "Test requires Python 2.7+.")
def test_collect(self):
pid = 0
values.ValueClass = MultiProcessValue(lambda: pid)
labels = dict((i, i) for i in 'abcd')
def add_label(key, value):
l = labels.copy()
l[key] = value
return l
c = Counter('c', 'help', labelnames=labels.keys(), registry=None)
g = Gauge('g', 'help', labelnames=labels.keys(), registry=None,
multiprocess_mode='all')
h = Histogram('h', 'help', labelnames=labels.keys(), registry=None)
c.labels(**labels).inc(1)
g.labels(**labels).set(1)
h.labels(**labels).observe(1)
pid = 1
c.labels(**labels).inc(1)
g.labels(**labels).set(1)
h.labels(**labels).observe(5)
metrics = dict((m.name, m) for m in self.collector.collect())
self.assertEqual(
metrics['c'].samples, [Sample('c_total', labels, 2.0)]
)
metrics['g'].samples.sort(key=lambda x: x[1]['pid'])
self.assertEqual(metrics['g'].samples, [
Sample('g', add_label('pid', '0'), 1.0),
Sample('g', add_label('pid', '1'), 1.0),
])
metrics['h'].samples.sort(
key=lambda x: (x[0], float(x[1].get('le', 0)))
)
expected_histogram = [
Sample('h_bucket', add_label('le', '0.005'), 0.0),
Sample('h_bucket', add_label('le', '0.01'), 0.0),
Sample('h_bucket', add_label('le', '0.025'), 0.0),
Sample('h_bucket', add_label('le', '0.05'), 0.0),
Sample('h_bucket', add_label('le', '0.075'), 0.0),
Sample('h_bucket', add_label('le', '0.1'), 0.0),
Sample('h_bucket', add_label('le', '0.25'), 0.0),
Sample('h_bucket', add_label('le', '0.5'), 0.0),
Sample('h_bucket', add_label('le', '0.75'), 0.0),
Sample('h_bucket', add_label('le', '1.0'), 1.0),
Sample('h_bucket', add_label('le', '2.5'), 1.0),
Sample('h_bucket', add_label('le', '5.0'), 2.0),
Sample('h_bucket', add_label('le', '7.5'), 2.0),
Sample('h_bucket', add_label('le', '10.0'), 2.0),
Sample('h_bucket', add_label('le', '+Inf'), 2.0),
Sample('h_count', labels, 2.0),
Sample('h_sum', labels, 6.0),
]
self.assertEqual(metrics['h'].samples, expected_histogram)
@unittest.skipIf(sys.version_info < (2, 7), "Test requires Python 2.7+.")
def test_merge_no_accumulate(self):
pid = 0
values.ValueClass = MultiProcessValue(lambda: pid)
labels = dict((i, i) for i in 'abcd')
def add_label(key, value):
l = labels.copy()
l[key] = value
return l
h = Histogram('h', 'help', labelnames=labels.keys(), registry=None)
h.labels(**labels).observe(1)
pid = 1
h.labels(**labels).observe(5)
path = os.path.join(os.environ['prometheus_multiproc_dir'], '*.db')
files = glob.glob(path)
metrics = dict(
(m.name, m) for m in merge(files, accumulate=False)
)
metrics['h'].samples.sort(
key=lambda x: (x[0], float(x[1].get('le', 0)))
)
expected_histogram = [
Sample('h_bucket', add_label('le', '0.005'), 0.0),
Sample('h_bucket', add_label('le', '0.01'), 0.0),
Sample('h_bucket', add_label('le', '0.025'), 0.0),
Sample('h_bucket', add_label('le', '0.05'), 0.0),
Sample('h_bucket', add_label('le', '0.075'), 0.0),
Sample('h_bucket', add_label('le', '0.1'), 0.0),
Sample('h_bucket', add_label('le', '0.25'), 0.0),
Sample('h_bucket', add_label('le', '0.5'), 0.0),
Sample('h_bucket', add_label('le', '0.75'), 0.0),
Sample('h_bucket', add_label('le', '1.0'), 1.0),
Sample('h_bucket', add_label('le', '2.5'), 0.0),
Sample('h_bucket', add_label('le', '5.0'), 1.0),
Sample('h_bucket', add_label('le', '7.5'), 0.0),
Sample('h_bucket', add_label('le', '10.0'), 0.0),
Sample('h_bucket', add_label('le', '+Inf'), 0.0),
Sample('h_sum', labels, 6.0),
]
self.assertEqual(metrics['h'].samples, expected_histogram)
def test_missing_gauge_file_during_merge(self):
# These files don't exist, just like if mark_process_dead(9999999) had been
# called during self.collector.collect(), after the glob found it
# but before the merge actually happened.
# This should not raise and return no metrics
self.assertFalse(merge([
os.path.join(self.tempdir, 'gauge_liveall_9999999.db'),
os.path.join(self.tempdir, 'gauge_livesum_9999999.db'),
]))
class TestMmapedDict(unittest.TestCase):
def setUp(self):
fd, self.tempfile = tempfile.mkstemp()
os.close(fd)
self.d = mmap_dict.MmapedDict(self.tempfile)
def test_timestamp(self):
t0 = time.time()
self.d.write_value("foo", 3.0, timestamp=t0)
v, t = self.d.read_value_timestamp("foo")
self.assertTrue((v - 3.0) ** 2 < 0.001)
self.assertTrue((t0 - t) ** 2 < 0.001)
def test_process_restart(self):
self.d.write_value('abc', 123.0)
self.d.close()
self.d = mmap_dict.MmapedDict(self.tempfile)
self.assertEqual(123, self.d.read_value('abc'))
self.assertEqual([('abc', 123.0, None)], list(self.d.read_all_values()))
def test_expansion(self):
key = 'a' * mmap_dict._INITIAL_MMAP_SIZE
self.d.write_value(key, 123.0)
self.assertEqual([(key, 123.0, None)], list(self.d.read_all_values()))
def test_multi_expansion(self):
key = 'a' * mmap_dict._INITIAL_MMAP_SIZE * 4
self.d.write_value('abc', 42.0)
self.d.write_value(key, 123.0)
self.d.write_value('def', 17.0)
self.assertEqual(
[('abc', 42.0, None), (key, 123.0, None), ('def', 17.0, None)],
list(self.d.read_all_values()))
def test_corruption_detected(self):
self.d.write_value('abc', 42.0)
# corrupt the written data
self.d._m[8:16] = b'somejunk'
with self.assertRaises(RuntimeError):
list(self.d.read_all_values())
def tearDown(self):
os.unlink(self.tempfile)
class TestUnsetEnv(unittest.TestCase):
def setUp(self):
self.registry = CollectorRegistry()
fp, self.tmpfl = tempfile.mkstemp()
os.close(fp)
def test_unset_syncdir_env(self):
self.assertRaises(
ValueError, MultiProcessCollector, self.registry)
def test_file_syncpath(self):
registry = CollectorRegistry()
self.assertRaises(
ValueError, MultiProcessCollector, registry, self.tmpfl)
def tearDown(self):
os.remove(self.tmpfl)
class TestAdvisoryLock(unittest.TestCase):
"""
These tests use lock aqusition as a proxy for cleanup/collect operations,
the former using exclusive locks, the latter shared locks
"""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
os.environ['prometheus_multiproc_dir'] = self.tempdir
values.ValueClass = MultiProcessValue(lambda: 123)
self.registry = CollectorRegistry()
self.collector = MultiProcessCollector(self.registry, self.tempdir)
def test_cleanup_waits_for_collectors(self):
# IOError in python2, OSError in python3
with self.assertRaises(EnvironmentError):
with advisory_lock(LOCK_SH):
archive_metrics(blocking=False)
def test_collect_doesnt_block_other_collects(self):
values.ValueClass = MultiProcessValue(lambda: 0)
labels = dict((i, i) for i in 'abcd')
c = Counter('c', 'help', labelnames=labels.keys(), registry=None)
c.labels(**labels).inc(1)
with advisory_lock(LOCK_SH):
metrics = dict((m.name, m) for m in self.collector.collect(blocking=False))
self.assertEqual(
metrics['c'].samples, [Sample('c_total', labels, 1.0)]
)
def test_collect_waits_for_cleanup(self):
values.ValueClass = MultiProcessValue(lambda: 0)
labels = dict((i, i) for i in 'abcd')
c = Counter('c', 'help', labelnames=labels.keys(), registry=None)
c.labels(**labels).inc(1)
with self.assertRaises(EnvironmentError):
with advisory_lock(LOCK_EX):
self.collector.collect(blocking=False)
def test_exceptions_release_lock(self):
with self.assertRaises(ValueError):
with advisory_lock(LOCK_EX):
raise ValueError
# Do an operation which requires acquiring the lock
archive_metrics(blocking=False)
def tearDown(self):
del os.environ['prometheus_multiproc_dir']
shutil.rmtree(self.tempdir)
values.ValueClass = MutexValue
class TestInMemoryCollector(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
os.environ['prometheus_multiproc_dir'] = self.tempdir
values.ValueClass = MultiProcessValue(lambda: 123)
self.registry = CollectorRegistry()
self.collector = InMemoryCollector(self.registry)
def tearDown(self):
del os.environ['prometheus_multiproc_dir']
shutil.rmtree(self.tempdir)
values.ValueClass = MutexValue
prometheus_client.multiprocess._metrics_cache = prometheus_client.multiprocess.MetricsCache()
def test_serves_empty_metrics_if_no_metrics_written(self):
self.assertEqual(self.collector.collect(), [])
def test_serves_empty_metrics_if_not_processed(self):
c1 = Counter('c', 'help', registry=None)
# The cleanup/archiver task hasn't run yet, no metrics
self.assertEqual(None, self.registry.get_sample_value('c_total'))
c1.inc(1)
# Still no metrics
self.assertEqual(self.collector.collect(), [])
def test_serves_metrics(self):
labels = dict((i, i) for i in 'abcd')
c = Counter('c', 'help', labelnames=labels.keys(), registry=None)
c.labels(**labels).inc(1)
self.assertEqual(None, self.registry.get_sample_value('c_total', labels))
archive_metrics()
self.assertEqual(self.collector.collect()[0].samples, [Sample('c_total', labels, 1.0)])
def test_displays_archive_stats(self):
output = generate_latest(self.registry)
self.assertIn("archive_duration_seconds", output)
def test_aggregates_live_and_archived_metrics(self):
pid = 456
values.ValueClass = MultiProcessValue(lambda: pid)
def files():
fs = os.listdir(os.environ['prometheus_multiproc_dir'])
fs.sort()
return fs
c1 = Counter('c1', 'c1', registry=None)
c1.inc(1)
self.assertIn('counter_456.db', files())
archive_metrics()
self.assertNotIn('counter_456.db', files())
self.assertEqual(1, self.registry.get_sample_value('c1_total'))
pid = 789
values.ValueClass = MultiProcessValue(lambda: pid)
c1 = Counter('c1', 'c1', registry=None)
c1.inc(2)
g1 = Gauge('g1', 'g1', registry=None, multiprocess_mode="liveall")
g1.set(5)
self.assertIn('counter_789.db', files())
# Pretend that pid 789 is live
archive_metrics(aggregate_only=True)
# The live counter should be merged with the archived counter, and the
# liveall gauge should be included
self.assertIn('counter_789.db', files())
self.assertIn('gauge_liveall_789.db', files())
self.assertEqual(3, self.registry.get_sample_value('c1_total'))
self.assertEqual(5, self.registry.get_sample_value('g1', labels={u'pid': u'789'}))
# Now pid 789 is dead
archive_metrics()
# The formerly live counter's value should be archived, and the
# liveall gauge should be removed completely
self.assertNotIn('counter_789.db', files())
self.assertNotIn('gauge_liveall_789.db', files())
self.assertEqual(3, self.registry.get_sample_value('c1_total'))
self.assertEqual(None, self.registry.get_sample_value('g1', labels={u'pid': u'789'}))
| []
| []
| [
"prometheus_multiproc_dir"
]
| [] | ["prometheus_multiproc_dir"] | python | 1 | 0 | |
node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
from gyp.common import OrderedSet
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target(object):
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here. In this case, we also need to save the compile_deps for the target,
# so that the target that directly depends on the .objs can also depend
# on those.
self.component_objs = None
self.compile_deps = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter(object):
def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.hash_for_rules = hash_for_rules
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
assert not os.path.isabs(path_dir), (
"'%s' can not be absolute path (see crbug.com/462153)." % path_dir)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets, order_only=None):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
assert not order_only
return None
if len(targets) > 1 or order_only:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
self.ninja.variable('cc_host', '$cl_' + arch)
self.ninja.variable('cxx_host', '$cl_' + arch)
self.ninja.variable('asm', '$ml_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
elif self.flavor == 'mac' and len(self.archs) > 1:
link_deps = collections.defaultdict(list)
compile_deps = self.target.actions_stamp or actions_depends
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
self.target.compile_deps = compile_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
compile_deps)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRulesOrActions(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
xcassets = self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends)
self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetToolchainEnv()
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'], self.hash_for_rules)
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
depfile = action.get('depfile', None)
if depfile:
depfile = self.ExpandSpecial(depfile, self.base_to_build)
pool = 'console' if int(action.get('ninja_use_console', 0)) else None
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env, pool,
depfile=depfile)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetToolchainEnv()
all_outputs = []
for rule in rules:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'], self.hash_for_rules)
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
pool = 'console' if int(rule.get('ninja_use_console', 0)) else None
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env, pool)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if '${%s}' % var in argument:
needed_variables.add(var)
def cygwin_munge(path):
# pylint: disable=cell-var-from-loop
if is_cygwin:
return path.replace('\\', '/')
return path
inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])]
# If there are n source files matching the rule, and m additional rule
# inputs, then adding 'inputs' to each build edge written below will
# write m * n inputs. Collapsing reduces this to m + n.
sources = rule.get('rule_sources', [])
num_inputs = len(inputs)
if prebuild:
num_inputs += 1
if num_inputs > 2 and len(sources) > 2:
inputs = [self.WriteCollapsedDependencies(
rule['rule_name'], inputs, order_only=prebuild)]
prebuild = []
# For each source file, write an edge that generates all the outputs.
for source in sources:
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
outputs = [self.GypPathToNinja(o, env) for o in outputs]
if self.flavor == 'win':
# WriteNewNinjaRule uses unique_name for creating an rsp file on win.
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetToolchainEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
xcassets = []
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
if os.path.splitext(output)[-1] != '.xcassets':
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource'), \
('binary', isBinary)])
bundle_depends.append(output)
else:
xcassets.append(res)
return xcassets
def WriteMacXCassets(self, xcassets, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources' .xcassets files.
This add an invocation of 'actool' via the 'mac_tool.py' helper script.
It assumes that the assets catalogs define at least one imageset and
thus an Assets.car file will be generated in the application resources
directory. If this is not the case, then the build will probably be done
at each invocation of ninja."""
if not xcassets:
return
extra_arguments = {}
settings_to_arg = {
'XCASSETS_APP_ICON': 'app-icon',
'XCASSETS_LAUNCH_IMAGE': 'launch-image',
}
settings = self.xcode_settings.xcode_settings[self.config_name]
for settings_key, arg_name in settings_to_arg.iteritems():
value = settings.get(settings_key)
if value:
extra_arguments[arg_name] = value
partial_info_plist = None
if extra_arguments:
partial_info_plist = self.GypPathToUniqueOutput(
'assetcatalog_generated_info.plist')
extra_arguments['output-partial-info-plist'] = partial_info_plist
outputs = []
outputs.append(
os.path.join(
self.xcode_settings.GetBundleResourceFolder(),
'Assets.car'))
if partial_info_plist:
outputs.append(partial_info_plist)
keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor)
extra_env = self.xcode_settings.GetPerTargetSettings()
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
bundle_depends.extend(self.ninja.build(
outputs, 'compile_xcassets', xcassets,
variables=[('env', env), ('keys', keys)]))
return partial_info_plist
def WriteMacInfoPlist(self, partial_info_plist, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
if partial_info_plist:
intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist')
info_plist = self.ninja.build(
intermediate_plist, 'merge_infoplist',
[partial_info_plist, info_plist])
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys),
('binary', isBinary)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
self.ninja.variable('nm', '$nm_host')
self.ninja.variable('readelf', '$readelf_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
asmflags = self.msvs_settings.GetAsmflags(config_name)
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
elif self.toolset == 'host':
cflags_c = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CFLAGS_host', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CXXFLAGS_host', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'asmflags',
map(self.ExpandSpecial, asmflags))
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetToolchainEnv()
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
if self.flavor == 'win':
midl_include_dirs = config.get('midl_include_dirs', [])
midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs(
midl_include_dirs, config_name)
self.WriteVariableList(ninja_file, 'midl_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in midl_include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
arflags = config.get('arflags', [])
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
self.WriteVariableList(ninja_file, 'arflags',
map(self.ExpandSpecial, arflags))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
build_output = output
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
# TODO(yyanagisawa): more work needed to fix:
# https://code.google.com/p/gyp/issues/detail?id=411
if (spec['type'] in ('shared_library', 'loadable_module') and
not self.is_mac_bundle):
extra_bindings.append(('lib', output))
self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
variables=extra_bindings)
else:
self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
order_deps = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
if target.compile_deps:
order_deps.add(target.compile_deps)
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
map(self.ExpandSpecial, ldflags))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor != 'win':
link_file_list = output
if self.is_mac_bundle:
# 'Dependency Framework.framework/Versions/A/Dependency Framework' ->
# 'Dependency Framework.framework.rsp'
link_file_list = self.xcode_settings.GetWrapperName()
if arch:
link_file_list += '.' + arch
link_file_list += '.rsp'
# If an rspfile contains spaces, ninja surrounds the filename with
# quotes around it and then passes it to open(), creating a file with
# quotes in its name (and when looking for the rsp file, the name
# makes it through bash which strips the quotes) :-/
link_file_list = link_file_list.replace(' ', '_')
extra_bindings.append(
('link_file_list',
gyp.common.EncodePOSIXShellArgument(link_file_list)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if ('/NOENTRY' not in ldflags and
not self.msvs_settings.GetNoImportLibrary(config_name)):
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
order_only=list(order_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetToolchainEnv(self, additional_settings=None):
"""Returns the variables toolchain would set for build steps."""
env = self.GetSortedXcodeEnv(additional_settings=additional_settings)
if self.flavor == 'win':
env = self.GetMsvsToolchainEnv(
additional_settings=additional_settings)
return env
def GetMsvsToolchainEnv(self, additional_settings=None):
"""Returns the variables Visual Studio would set for build steps."""
return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=self.config_name)
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
depfile=None):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, depfile=depfile,
restat=True, pool=pool,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
exts = gyp.MSVSUtil.TARGET_TYPE_EXT
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0))
if pool_size:
return pool_size
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
# VS 2015 uses 20% more working set than VS 2013 and can consume all RAM
# on a 64 GB machine.
mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB
hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
if os.path.exists("/proc/meminfo"):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
ar = 'lib.exe'
# cc and cxx must be set to the correct architecture by overriding with one
# of cl_x86 or cl_x64 below.
cc = 'UNSET'
cxx = 'UNSET'
ld = 'link.exe'
ld_host = '$ld'
else:
ar = 'ar'
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
ar_host = 'ar'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
clang_cl = None
nm = 'nm'
nm_host = 'nm'
readelf = 'readelf'
readelf_host = 'readelf'
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_root, value)
if key == 'AR.host':
ar_host = os.path.join(build_to_root, value)
if key == 'CC':
cc = os.path.join(build_to_root, value)
if cc.endswith('clang-cl'):
clang_cl = cc
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key == 'LDXX':
ldxx = os.path.join(build_to_root, value)
if key == 'LDXX.host':
ldxx_host = os.path.join(build_to_root, value)
if key == 'NM':
nm = os.path.join(build_to_root, value)
if key == 'NM.host':
nm_host = os.path.join(build_to_root, value)
if key == 'READELF':
readelf = os.path.join(build_to_root, value)
if key == 'READELF.host':
readelf_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
configs = [target_dicts[qualified_target]['configurations'][config_name]
for qualified_target in target_list]
shared_system_includes = None
if not generator_flags.get('ninja_use_custom_environment_files', 0):
shared_system_includes = \
gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes(
configs, generator_flags)
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, shared_system_includes, OpenOutput)
for arch, path in cl_paths.iteritems():
if clang_cl:
# If we have selected clang-cl, use that instead.
path = clang_cl
command = CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, 'win'))
if clang_cl:
# Use clang-cl to cross-compile for x86 or x86_64.
command += (' -m32' if arch == 'x86' else ' -m64')
master_ninja.variable('cl_' + arch, command)
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', ar)
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('ml_x86', 'ml.exe')
master_ninja.variable('ml_x64', 'ml64.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar))
if flavor != 'mac':
# Mac does not use readelf/nm for .TOC generation, so avoiding polluting
# the master ninja with extra unused variables.
master_ninja.variable(
'nm', GetEnvironFallback(['NM_target', 'NM'], nm))
master_ninja.variable(
'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host))
master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host))
master_ninja.variable('readelf_host',
GetEnvironFallback(['READELF_host'], readelf_host))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
master_ninja.variable('ldxx_host', ldxx_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$midl_includes $idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $out',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes $asmflags /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $arflags $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $arflags $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ $readelf -d $lib | grep SONAME ; '
'$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content=
'-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content='-Wl,--start-group $in $solibs $libs -Wl,--end-group',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in $solibs $libs -Wl,--end-group'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
master_ninja.rule(
'solipo',
description='SOLIPO $out, POSTBUILDS',
command=(
'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&'
'%(extract_toc)s > $lib.TOC'
% { 'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}))
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; '
'else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then '
'mv $lib.tmp $lib.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '@$link_file_list$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys')
master_ninja.rule(
'merge_infoplist',
description='MERGE INFOPLISTS $in',
command='$env ./gyp-mac-tool merge-info-plist $out $in')
master_ninja.rule(
'compile_xcassets',
description='COMPILE XCASSETS $in',
command='$env ./gyp-mac-tool compile-xcassets $keys $in')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
if flavor == 'win':
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
elif flavor == 'zos':
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -fRP $in $out')
else:
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
# short name of targets that were skipped because they didn't contain anything
# interesting.
# NOTE: there may be overlap between this an non_empty_target_names.
empty_target_names = set()
# Set of non-empty short target names.
# NOTE: there may be overlap between this an empty_target_names.
non_empty_target_names = set()
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
# If build_file is a symlink, we must not follow it because there's a chance
# it could point to a path above toplevel_dir, and we cannot correctly deal
# with that case at the moment.
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir,
False)
qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name,
toolset)
hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest()
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
non_empty_target_names.add(name)
else:
empty_target_names.add(name)
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
# Write phony targets for any empty targets that weren't written yet. As
# short names are not necessarily unique only do this for short names that
# haven't already been output for another target.
empty_target_names = empty_target_names - non_empty_target_names
if empty_target_names:
master_ninja.newline()
master_ninja.comment('Empty targets (output for completeness).')
for name in sorted(empty_target_names):
master_ninja.build(name, 'phony')
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| []
| []
| [
"CFLAGS",
"GYP_LINK_CONCURRENCY",
"CFLAGS_host",
"GYP_USE_SEPARATE_MSPDBSRV",
"GYP_LINK_CONCURRENCY_MAX",
"CXXFLAGS_host",
"CPPFLAGS",
"CPPFLAGS_host",
"LDFLAGS",
"CXXFLAGS"
]
| [] | ["CFLAGS", "GYP_LINK_CONCURRENCY", "CFLAGS_host", "GYP_USE_SEPARATE_MSPDBSRV", "GYP_LINK_CONCURRENCY_MAX", "CXXFLAGS_host", "CPPFLAGS", "CPPFLAGS_host", "LDFLAGS", "CXXFLAGS"] | python | 10 | 0 | |
api.go | package main
import (
"fmt"
"os"
"strconv"
"github.com/claudiocandio/gemini-api"
"github.com/claudiocandio/gemini-api/logger"
"gopkg.in/yaml.v2"
)
type gemini_yml struct {
Gemini_api_credentials struct {
Gemini_api_key string `yaml:"gemini_api_key"`
Gemini_api_secret string `yaml:"gemini_api_secret"`
Gemini_api_production string `yaml:"gemini_api_production"`
} `yaml:"gemini_api_credentials"`
}
func start_api(gemini_config_yml string) (*gemini.Api, error) {
var gc gemini_yml
var gemini_api_production bool
var err error
if gemini_config_yml != "" {
fp, err := os.Open(gemini_config_yml)
if err != nil {
logger.Debug("Open configuration file error",
fmt.Sprintf("gemini_config_yml:%s", gemini_config_yml),
fmt.Sprintf("error:%s", err))
return nil, fmt.Errorf("Cannot open configuration file: %s", gemini_config_yml)
}
decoder := yaml.NewDecoder(fp)
err = decoder.Decode(&gc)
if err != nil {
return nil, err
}
fp.Close()
}
if gc.Gemini_api_credentials.Gemini_api_key == "" {
gc.Gemini_api_credentials.Gemini_api_key = os.Getenv("GEMINI_API_KEY")
if gc.Gemini_api_credentials.Gemini_api_key == "" {
errlog.Println("Missing GEMINI_API_KEY")
}
}
if gc.Gemini_api_credentials.Gemini_api_secret == "" {
gc.Gemini_api_credentials.Gemini_api_secret = os.Getenv("GEMINI_API_SECRET")
if gc.Gemini_api_credentials.Gemini_api_secret == "" {
errlog.Println("Missing GEMINI_API_SECRET")
}
}
if gc.Gemini_api_credentials.Gemini_api_production != "" {
gemini_api_production, err = strconv.ParseBool(gc.Gemini_api_credentials.Gemini_api_production)
if gc.Gemini_api_credentials.Gemini_api_production != "" && err != nil {
errlog.Println("GEMINI_API_PRODUCTION environment variable must be set as true or false")
gc.Gemini_api_credentials.Gemini_api_production = ""
}
} else {
gc.Gemini_api_credentials.Gemini_api_production = os.Getenv("GEMINI_API_PRODUCTION")
if gc.Gemini_api_credentials.Gemini_api_production == "" {
errlog.Println("Missing GEMINI_API_PRODUCTION")
}
gemini_api_production, err = strconv.ParseBool(gc.Gemini_api_credentials.Gemini_api_production)
if gc.Gemini_api_credentials.Gemini_api_production != "" && err != nil {
errlog.Println("GEMINI_API_PRODUCTION environment variable must be set as true or false")
gc.Gemini_api_credentials.Gemini_api_production = ""
}
}
if gc.Gemini_api_credentials.Gemini_api_key == "" ||
gc.Gemini_api_credentials.Gemini_api_secret == "" ||
gc.Gemini_api_credentials.Gemini_api_production == "" {
return nil, fmt.Errorf("Set Gemini API credentials.")
}
if gemini_api_production {
logger.Debug("Connecting to Gemini Production site.")
} else {
logger.Debug("Connecting to Gemini Sandbox site.")
}
api := gemini.New(gemini_api_production,
gc.Gemini_api_credentials.Gemini_api_key,
gc.Gemini_api_credentials.Gemini_api_secret)
//will show gemini api key & secret !!!
logger.Trace("Gemini",
fmt.Sprintf("api:%v", api),
fmt.Sprintf("gc:%v", gc))
return api, nil
}
| [
"\"GEMINI_API_KEY\"",
"\"GEMINI_API_SECRET\"",
"\"GEMINI_API_PRODUCTION\""
]
| []
| [
"GEMINI_API_SECRET",
"GEMINI_API_PRODUCTION",
"GEMINI_API_KEY"
]
| [] | ["GEMINI_API_SECRET", "GEMINI_API_PRODUCTION", "GEMINI_API_KEY"] | go | 3 | 0 | |
train.py | #!/usr/bin/env python3
"""Train L-CNN
Usage:
train.py [options] <yaml-config>
train.py (-h | --help )
Arguments:
<yaml-config> Path to the yaml hyper-parameter file
Options:
-h --help Show this screen.
-d --devices <devices> Comma seperated GPU devices [default: 0]
-i --identifier <identifier> Folder identifier [default: default-identifier]
"""
import datetime
import glob
import os
import os.path as osp
import platform
import pprint
import random
import shlex
import shutil
import signal
import subprocess
import sys
import threading
import numpy as np
import torch
import yaml
from docopt import docopt
import scipy.io as sio
import lcnn
from lcnn.config import C, M
from lcnn.datasets import WireframeDataset, collate
from lcnn.models.line_vectorizer import LineVectorizer
from lcnn.models.multitask_learner import MultitaskHead, MultitaskLearner
from lcnn.models.HT import hough_transform
def get_outdir(identifier):
# load config
name = str(datetime.datetime.now().strftime("%y%m%d-%H%M%S"))
name += "-%s" % identifier
outdir = osp.join(osp.expanduser(C.io.logdir), name)
if not osp.exists(outdir):
os.makedirs(outdir)
C.io.resume_from = outdir
C.to_yaml(osp.join(outdir, "config.yaml"))
return outdir
def main():
args = docopt(__doc__)
config_file = args["<yaml-config>"] or "config/wireframe.yaml"
C.update(C.from_yaml(filename=config_file))
M.update(C.model)
pprint.pprint(C, indent=4)
resume_from = C.io.resume_from
# WARNING: L-CNN is still not deterministic
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
device_name = "cpu"
os.environ["CUDA_VISIBLE_DEVICES"] = args["--devices"]
if torch.cuda.is_available():
device_name = "cuda"
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed(0)
print("Let's use", torch.cuda.device_count(), "GPU(s)!")
else:
print("CUDA is not available")
device = torch.device(device_name)
# 1. dataset
# uncomment for debug DataLoader
# wireframe.datasets.WireframeDataset(datadir, split="train")[0]
# sys.exit(0)
datadir = C.io.datadir
kwargs = {
"collate_fn": collate,
"num_workers": C.io.num_workers if os.name != "nt" else 0,
"pin_memory": True,
}
train_loader = torch.utils.data.DataLoader(
WireframeDataset(datadir, split="train"),
shuffle=True,
batch_size=M.batch_size,
**kwargs,
)
val_loader = torch.utils.data.DataLoader(
WireframeDataset(datadir, split="valid"),
shuffle=False,
batch_size=M.batch_size_eval,
**kwargs,
)
epoch_size = len(train_loader)
# print("epoch_size (train):", epoch_size)
# print("epoch_size (valid):", len(val_loader))
if resume_from:
checkpoint = torch.load(osp.join(resume_from, "checkpoint_latest.pth"))
# 2. model
### load vote_index matrix for Hough transform
### defualt settings: (128, 128, 3, 1)
if os.path.isfile(C.io.vote_index):
print('load vote_index ... ')
vote_index = sio.loadmat(C.io.vote_index)['vote_index']
else:
print('compute vote_index ... ')
vote_index = hough_transform(rows=128, cols=128, theta_res=3, rho_res=1)
sio.savemat(C.io.vote_index, {'vote_index': vote_index})
vote_index = torch.from_numpy(vote_index).float().contiguous().to(device)
print('vote_index loaded', vote_index.shape)
if M.backbone == "stacked_hourglass":
model = lcnn.models.hg(
depth=M.depth,
head=MultitaskHead,
num_stacks=M.num_stacks,
num_blocks=M.num_blocks,
num_classes=sum(sum(M.head_size, [])),
vote_index=vote_index,
)
else:
raise NotImplementedError
model = MultitaskLearner(model)
model = LineVectorizer(model)
print("model:", model)
train_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('num of total parameters', train_params)
if resume_from:
model.load_state_dict(checkpoint["model_state_dict"])
model = model.to(device)
# 3. optimizer
if C.optim.name == "Adam":
optim = torch.optim.Adam(
model.parameters(),
lr=C.optim.lr,
weight_decay=C.optim.weight_decay,
amsgrad=C.optim.amsgrad,
)
elif C.optim.name == "SGD":
optim = torch.optim.SGD(
model.parameters(),
lr=C.optim.lr,
weight_decay=C.optim.weight_decay,
momentum=C.optim.momentum,
)
else:
raise NotImplementedError
if resume_from:
optim.load_state_dict(checkpoint["optim_state_dict"])
outdir = resume_from or get_outdir(args["--identifier"])
print("outdir:", outdir)
try:
trainer = lcnn.trainer.Trainer(
device=device,
model=model,
optimizer=optim,
train_loader=train_loader,
val_loader=val_loader,
out=outdir,
)
if resume_from:
trainer.iteration = checkpoint["iteration"]
if trainer.iteration % epoch_size != 0:
print("WARNING: iteration is not a multiple of epoch_size, reset it")
trainer.iteration -= trainer.iteration % epoch_size
trainer.best_mean_loss = checkpoint["best_mean_loss"]
del checkpoint
trainer.train()
except BaseException:
if len(glob.glob(f"{outdir}/viz/*")) <= 1:
shutil.rmtree(outdir)
raise
if __name__ == "__main__":
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
usaspending_api/download/filestreaming/download_generation.py | import json
import logging
import multiprocessing
import os
from pathlib import Path
from typing import Optional, Tuple, List
import psutil as ps
import re
import shutil
import subprocess
import tempfile
import time
import traceback
from datetime import datetime, timezone
from django.conf import settings
from usaspending_api.awards.v2.filters.filter_helpers import add_date_range_comparison_types
from usaspending_api.awards.v2.lookups.lookups import contract_type_mapping, assistance_type_mapping, idv_type_mapping
from usaspending_api.common.csv_helpers import count_rows_in_delimited_file, partition_large_delimited_file
from usaspending_api.common.exceptions import InvalidParameterException
from usaspending_api.common.helpers.orm_helpers import generate_raw_quoted_query
from usaspending_api.common.helpers.s3_helpers import multipart_upload
from usaspending_api.common.helpers.text_helpers import slugify_text_for_file_names
from usaspending_api.common.retrieve_file_from_uri import RetrieveFileFromUri
from usaspending_api.download.download_utils import construct_data_date_range
from usaspending_api.download.filestreaming import NAMING_CONFLICT_DISCRIMINATOR
from usaspending_api.download.filestreaming.download_source import DownloadSource
from usaspending_api.download.filestreaming.file_description import build_file_description, save_file_description
from usaspending_api.download.filestreaming.zip_file import append_files_to_zip_file
from usaspending_api.download.helpers import verify_requested_columns_available, write_to_download_log as write_to_log
from usaspending_api.download.lookups import JOB_STATUS_DICT, VALUE_MAPPINGS, FILE_FORMATS
from usaspending_api.download.models import DownloadJob
DOWNLOAD_VISIBILITY_TIMEOUT = 60 * 10
MAX_VISIBILITY_TIMEOUT = 60 * 60 * settings.DOWNLOAD_DB_TIMEOUT_IN_HOURS
EXCEL_ROW_LIMIT = 1000000
WAIT_FOR_PROCESS_SLEEP = 5
logger = logging.getLogger(__name__)
def generate_download(download_job: DownloadJob, origination: Optional[str] = None):
"""Create data archive files from the download job object"""
# Parse data from download_job
json_request = json.loads(download_job.json_request)
columns = json_request.get("columns", None)
limit = json_request.get("limit", None)
piid = json_request.get("piid", None)
award_id = json_request.get("award_id")
assistance_id = json_request.get("assistance_id")
file_format = json_request.get("file_format")
file_name = start_download(download_job)
working_dir = None
try:
# Create temporary files and working directory
zip_file_path = settings.CSV_LOCAL_PATH + file_name
if not settings.IS_LOCAL and os.path.exists(zip_file_path):
# Clean up a zip file that might exist from a prior attempt at this download
os.remove(zip_file_path)
working_dir = os.path.splitext(zip_file_path)[0]
if not os.path.exists(working_dir):
os.mkdir(working_dir)
write_to_log(message=f"Generating {file_name}", download_job=download_job)
# Generate sources from the JSON request object
sources = get_download_sources(json_request, origination)
for source in sources:
# Parse and write data to the file; if there are no matching columns for a source then add an empty file
source_column_count = len(source.columns(columns))
if source_column_count == 0:
create_empty_data_file(
source, download_job, working_dir, piid, assistance_id, zip_file_path, file_format
)
else:
download_job.number_of_columns += source_column_count
parse_source(
source, columns, download_job, working_dir, piid, assistance_id, zip_file_path, limit, file_format
)
include_data_dictionary = json_request.get("include_data_dictionary")
if include_data_dictionary:
add_data_dictionary_to_zip(working_dir, zip_file_path)
include_file_description = json_request.get("include_file_description")
if include_file_description:
write_to_log(message="Adding file description to zip file")
file_description = build_file_description(include_file_description["source"], sources)
file_description = file_description.replace("[AWARD_ID]", str(award_id))
file_description_path = save_file_description(
working_dir, include_file_description["destination"], file_description
)
append_files_to_zip_file([file_description_path], zip_file_path)
download_job.file_size = os.stat(zip_file_path).st_size
except InvalidParameterException as e:
exc_msg = "InvalidParameterException was raised while attempting to process the DownloadJob"
fail_download(download_job, e, exc_msg)
raise InvalidParameterException(e)
except Exception as e:
# Set error message; job_status_id will be set in download_sqs_worker.handle()
exc_msg = "An exception was raised while attempting to process the DownloadJob"
fail_download(download_job, e, exc_msg)
raise Exception(download_job.error_message) from e
finally:
# Remove working directory
if working_dir and os.path.exists(working_dir):
shutil.rmtree(working_dir)
_kill_spawned_processes(download_job)
try:
# push file to S3 bucket, if not local
if not settings.IS_LOCAL:
bucket = settings.BULK_DOWNLOAD_S3_BUCKET_NAME
region = settings.USASPENDING_AWS_REGION
start_uploading = time.perf_counter()
multipart_upload(bucket, region, zip_file_path, os.path.basename(zip_file_path))
write_to_log(
message=f"Uploading took {time.perf_counter() - start_uploading:.2f}s", download_job=download_job
)
except Exception as e:
# Set error message; job_status_id will be set in download_sqs_worker.handle()
exc_msg = "An exception was raised while attempting to upload the file"
fail_download(download_job, e, exc_msg)
if isinstance(e, InvalidParameterException):
raise InvalidParameterException(e)
else:
raise Exception(download_job.error_message) from e
finally:
# Remove generated file
if not settings.IS_LOCAL and os.path.exists(zip_file_path):
os.remove(zip_file_path)
_kill_spawned_processes(download_job)
return finish_download(download_job)
def get_download_sources(json_request: dict, origination: Optional[str] = None):
download_sources = []
for download_type in json_request["download_types"]:
agency_id = json_request.get("agency", "all")
filter_function = VALUE_MAPPINGS[download_type]["filter_function"]
download_type_table = VALUE_MAPPINGS[download_type]["table"]
if VALUE_MAPPINGS[download_type]["source_type"] == "award":
# Award downloads
# Use correct date range columns for advanced search
# (Will not change anything for keyword search since "time_period" is not provided))
filters = add_date_range_comparison_types(
json_request["filters"],
is_subaward=download_type != "awards",
gte_date_type="action_date",
lte_date_type="date_signed",
)
queryset = filter_function(filters)
if filters.get("prime_and_sub_award_types") is not None:
award_type_codes = set(filters["prime_and_sub_award_types"][download_type])
else:
award_type_codes = set(filters["award_type_codes"])
if (
award_type_codes & (set(contract_type_mapping.keys()) | set(idv_type_mapping.keys()))
or "procurement" in award_type_codes
):
# only generate d1 files if the user is asking for contract data
d1_source = DownloadSource(VALUE_MAPPINGS[download_type]["table_name"], "d1", download_type, agency_id)
d1_filters = {f"{VALUE_MAPPINGS[download_type]['contract_data']}__isnull": False}
d1_source.queryset = queryset & download_type_table.objects.filter(**d1_filters)
download_sources.append(d1_source)
if award_type_codes & set(assistance_type_mapping.keys()) or ("grant" in award_type_codes):
# only generate d2 files if the user is asking for assistance data
d2_source = DownloadSource(VALUE_MAPPINGS[download_type]["table_name"], "d2", download_type, agency_id)
d2_filters = {f"{VALUE_MAPPINGS[download_type]['assistance_data']}__isnull": False}
d2_source.queryset = queryset & download_type_table.objects.filter(**d2_filters)
download_sources.append(d2_source)
elif VALUE_MAPPINGS[download_type]["source_type"] == "account":
# Account downloads
account_source = DownloadSource(
VALUE_MAPPINGS[download_type]["table_name"], json_request["account_level"], download_type, agency_id
)
account_source.queryset = filter_function(
download_type,
VALUE_MAPPINGS[download_type]["table"],
json_request["filters"],
json_request["account_level"],
)
download_sources.append(account_source)
elif VALUE_MAPPINGS[download_type]["source_type"] == "disaster":
# Disaster Page downloads
disaster_source = DownloadSource(
VALUE_MAPPINGS[download_type]["source_type"],
VALUE_MAPPINGS[download_type]["table_name"],
download_type,
agency_id,
)
disaster_source.award_category = json_request["award_category"]
disaster_source.queryset = filter_function(
json_request["filters"], download_type, VALUE_MAPPINGS[download_type]["base_fields"]
)
download_sources.append(disaster_source)
verify_requested_columns_available(tuple(download_sources), json_request.get("columns", []))
return download_sources
def build_data_file_name(source, download_job, piid, assistance_id):
d_map = {"d1": "Contracts", "d2": "Assistance", "treasury_account": "TAS", "federal_account": "FA"}
if download_job and download_job.monthly_download:
# For monthly archives, use the existing detailed zip filename for the data files
# e.g. FY(All)-012_Contracts_Delta_20191108.zip -> FY(All)-012_Contracts_Delta_20191108_%.csv
return strip_file_extension(download_job.file_name)
file_name_pattern = VALUE_MAPPINGS[source.source_type]["download_name"]
timestamp = datetime.strftime(datetime.now(timezone.utc), "%Y-%m-%d_H%HM%MS%S")
if source.is_for_idv or source.is_for_contract:
data_file_name = file_name_pattern.format(piid=slugify_text_for_file_names(piid, "UNKNOWN", 50))
elif source.is_for_assistance:
data_file_name = file_name_pattern.format(
assistance_id=slugify_text_for_file_names(assistance_id, "UNKNOWN", 50)
)
elif source.source_type == "disaster_recipient":
data_file_name = file_name_pattern.format(award_category=source.award_category, timestamp=timestamp)
else:
if source.agency_code == "all":
agency = "All"
else:
agency = str(source.agency_code)
request = json.loads(download_job.json_request)
filters = request["filters"]
if request.get("limit"):
agency = ""
elif source.file_type not in ("treasury_account", "federal_account"):
agency = f"{agency}_"
data_file_name = file_name_pattern.format(
agency=agency,
data_quarters=construct_data_date_range(filters),
level=d_map[source.file_type],
timestamp=timestamp,
type=d_map[source.file_type],
)
return data_file_name
def parse_source(source, columns, download_job, working_dir, piid, assistance_id, zip_file_path, limit, file_format):
"""Write to delimited text file(s) and zip file(s) using the source data"""
data_file_name = build_data_file_name(source, download_job, piid, assistance_id)
source_query = source.row_emitter(columns)
extension = FILE_FORMATS[file_format]["extension"]
source.file_name = f"{data_file_name}.{extension}"
source_path = os.path.join(working_dir, source.file_name)
write_to_log(message=f"Preparing to download data as {source.file_name}", download_job=download_job)
# Generate the query file; values, limits, dates fixed
export_query = generate_export_query(source_query, limit, source, columns, file_format)
temp_file, temp_file_path = generate_export_query_temp_file(export_query, download_job)
start_time = time.perf_counter()
try:
# Create a separate process to run the PSQL command; wait
psql_process = multiprocessing.Process(target=execute_psql, args=(temp_file_path, source_path, download_job))
psql_process.start()
wait_for_process(psql_process, start_time, download_job)
delim = FILE_FORMATS[file_format]["delimiter"]
# Log how many rows we have
write_to_log(message="Counting rows in delimited text file", download_job=download_job)
try:
download_job.number_of_rows += count_rows_in_delimited_file(
filename=source_path, has_header=True, delimiter=delim
)
except Exception:
write_to_log(
message="Unable to obtain delimited text file line count", is_error=True, download_job=download_job
)
download_job.save()
# Create a separate process to split the large data files into smaller file and write to zip; wait
zip_process = multiprocessing.Process(
target=split_and_zip_data_files,
args=(zip_file_path, source_path, data_file_name, file_format, download_job),
)
zip_process.start()
wait_for_process(zip_process, start_time, download_job)
download_job.save()
except Exception as e:
raise e
finally:
# Remove temporary files
os.close(temp_file)
os.remove(temp_file_path)
def split_and_zip_data_files(zip_file_path, source_path, data_file_name, file_format, download_job=None):
try:
# Split data files into separate files
# e.g. `Assistance_prime_transactions_delta_%s.csv`
log_time = time.perf_counter()
delim = FILE_FORMATS[file_format]["delimiter"]
extension = FILE_FORMATS[file_format]["extension"]
output_template = f"{data_file_name}_%s.{extension}"
write_to_log(message="Beginning the delimited text file partition", download_job=download_job)
list_of_files = partition_large_delimited_file(
file_path=source_path, delimiter=delim, row_limit=EXCEL_ROW_LIMIT, output_name_template=output_template
)
msg = f"Partitioning data into {len(list_of_files)} files took {time.perf_counter() - log_time:.4f}s"
write_to_log(message=msg, download_job=download_job)
# Zip the split files into one zipfile
write_to_log(message="Beginning zipping and compression", download_job=download_job)
log_time = time.perf_counter()
append_files_to_zip_file(list_of_files, zip_file_path)
write_to_log(
message=f"Writing to zipfile took {time.perf_counter() - log_time:.4f}s", download_job=download_job
)
except Exception as e:
message = "Exception while partitioning text file"
if download_job:
fail_download(download_job, e, message)
write_to_log(message=message, download_job=download_job, is_error=True)
logger.error(e)
raise e
def start_download(download_job):
# Update job attributes
download_job.job_status_id = JOB_STATUS_DICT["running"]
download_job.number_of_rows = 0
download_job.number_of_columns = 0
download_job.file_size = 0
download_job.save()
write_to_log(message=f"Starting to process DownloadJob {download_job.download_job_id}", download_job=download_job)
return download_job.file_name
def finish_download(download_job):
download_job.job_status_id = JOB_STATUS_DICT["finished"]
download_job.save()
write_to_log(message=f"Finished processing DownloadJob {download_job.download_job_id}", download_job=download_job)
return download_job.file_name
def wait_for_process(process, start_time, download_job):
"""Wait for the process to complete, throw errors for timeouts or Process exceptions"""
log_time = time.perf_counter()
# Let the thread run until it finishes (max MAX_VISIBILITY_TIMEOUT), with a buffer of DOWNLOAD_VISIBILITY_TIMEOUT
sleep_count = 0
while process.is_alive():
if (
download_job
and not download_job.monthly_download
and (time.perf_counter() - start_time) > MAX_VISIBILITY_TIMEOUT
):
break
if sleep_count < 10:
time.sleep(WAIT_FOR_PROCESS_SLEEP / 5)
else:
time.sleep(WAIT_FOR_PROCESS_SLEEP)
sleep_count += 1
over_time = (time.perf_counter() - start_time) >= MAX_VISIBILITY_TIMEOUT
if download_job and (not download_job.monthly_download and over_time) or process.exitcode != 0:
if process.is_alive():
# Process is running for longer than MAX_VISIBILITY_TIMEOUT, kill it
write_to_log(
message=f"Attempting to terminate process (pid {process.pid})", download_job=download_job, is_error=True
)
process.terminate()
e = TimeoutError(
f"DownloadJob {download_job.download_job_id} lasted longer than {MAX_VISIBILITY_TIMEOUT / 3600} hours"
)
else:
# An error occurred in the process
e = Exception("Command failed. Please see the logs for details.")
raise e
return time.perf_counter() - log_time
def generate_export_query(source_query, limit, source, columns, file_format):
if limit:
source_query = source_query[:limit]
query_annotated = apply_annotations_to_sql(generate_raw_quoted_query(source_query), source.columns(columns))
options = FILE_FORMATS[file_format]["options"]
return r"\COPY ({}) TO STDOUT {}".format(query_annotated, options)
def generate_export_query_temp_file(export_query, download_job, temp_dir=None):
write_to_log(message=f"Saving PSQL Query: {export_query}", download_job=download_job, is_debug=True)
dir_name = "/tmp"
if temp_dir:
dir_name = temp_dir
# Create a unique temporary file to hold the raw query, using \copy
(temp_sql_file, temp_sql_file_path) = tempfile.mkstemp(prefix="bd_sql_", dir=dir_name)
with open(temp_sql_file_path, "w") as file:
file.write(export_query)
return temp_sql_file, temp_sql_file_path
def apply_annotations_to_sql(raw_query, aliases):
"""
Django's ORM understandably doesn't allow aliases to be the same names as other fields available. However, if we
want to use the efficiency of psql's COPY method and keep the column names, we need to allow these scenarios. This
function simply outputs a modified raw sql which does the aliasing, allowing these scenarios.
"""
cte_sql, select_statements = _select_columns(raw_query)
DIRECT_SELECT_QUERY_REGEX = r'^[^ ]*\."[^"]*"$' # Django is pretty consistent with how it prints out queries
# Create a list from the non-derived values between SELECT and FROM
selects_list = [str for str in select_statements if re.search(DIRECT_SELECT_QUERY_REGEX, str)]
# Create a list from the derived values between SELECT and FROM
aliased_list = [str for str in select_statements if not re.search(DIRECT_SELECT_QUERY_REGEX, str.strip())]
deriv_dict = {}
for str in aliased_list:
split_string = _top_level_split(str, " AS ")
alias = split_string[1].replace('"', "").replace(",", "").strip()
if alias not in aliases:
raise Exception(f'alias "{alias}" not found!')
deriv_dict[alias] = split_string[0]
# Match aliases with their values
values_list = [
f'{deriv_dict[alias] if alias in deriv_dict else selects_list.pop(0)} AS "{alias}"' for alias in aliases
]
sql = raw_query.replace(_top_level_split(raw_query, "FROM")[0], "SELECT " + ", ".join(values_list), 1)
if cte_sql:
sql = f"{cte_sql} {sql}"
# Now that we've converted the queryset to SQL, cleaned up aliasing for non-annotated fields, and sorted
# the SELECT columns, there's one final step. The Django ORM does now allow alias names to conflict with
# column/field names on the underlying model. For annotated fields, naming conflict exceptions occur at
# the time they are applied to the queryset which means they never get to this function. To work around
# this, we give them a temporary name that cannot conflict with a field name on the model by appending
# the suffix specified by NAMING_CONFLICT_DISCRIMINATOR. Now that we have the "final" SQL, we must remove
# that suffix.
return sql.replace(NAMING_CONFLICT_DISCRIMINATOR, "")
def _select_columns(sql: str) -> Tuple[str, List[str]]:
in_quotes = False
in_cte = False
parens_depth = 0
last_processed_index = 0
cte_sql = None
retval = []
for index, char in enumerate(sql):
if char == '"':
in_quotes = not in_quotes
if in_quotes:
continue
if char == "(":
parens_depth = parens_depth + 1
if in_cte:
continue
if char == ")":
parens_depth = parens_depth - 1
if in_cte and parens_depth == 0:
in_cte = False
cte_sql = sql[: index + 1]
last_processed_index = index
if parens_depth == 0 and not in_cte:
# Set flag to ignore the CTE
if sql[index : index + 5] == "WITH ":
in_cte = True
# Ignore the SELECT statement
if sql[index : index + 6] == "SELECT":
last_processed_index = index + 6
# If there is a FROM at the bottom level, we have all the values we need and can return
if sql[index : index + 4] == "FROM":
retval.append(sql[last_processed_index:index].strip())
return cte_sql, retval
# If there is a comma on the bottom level, add another select value and start parsing a new one
if char == ",":
retval.append(sql[last_processed_index:index].strip())
last_processed_index = index + 1 # skips the comma by design
return cte_sql, retval # this will almost certainly error out later.
def _top_level_split(sql, splitter):
in_quotes = False
parens_depth = 0
for index, char in enumerate(sql):
if char == '"':
in_quotes = not in_quotes
if in_quotes:
continue
if char == "(":
parens_depth = parens_depth + 1
if char == ")":
parens_depth = parens_depth - 1
if parens_depth == 0:
if sql[index : index + len(splitter)] == splitter:
return [sql[:index], sql[index + len(splitter) :]]
raise Exception(f"SQL string ${sql} cannot be split on ${splitter}")
def execute_psql(temp_sql_file_path, source_path, download_job):
"""Executes a single PSQL command within its own Subprocess"""
try:
log_time = time.perf_counter()
temp_env = os.environ.copy()
if not download_job.monthly_download:
# Since terminating the process isn't guarenteed to end the DB statement, add timeout to client connection
temp_env["PGOPTIONS"] = f"--statement-timeout={settings.DOWNLOAD_DB_TIMEOUT_IN_HOURS}h"
cat_command = subprocess.Popen(["cat", temp_sql_file_path], stdout=subprocess.PIPE)
subprocess.check_output(
["psql", "-q", "-o", source_path, retrieve_db_string(), "-v", "ON_ERROR_STOP=1"],
stdin=cat_command.stdout,
stderr=subprocess.STDOUT,
env=temp_env,
)
duration = time.perf_counter() - log_time
write_to_log(
message=f"Wrote {os.path.basename(source_path)}, took {duration:.4f} seconds", download_job=download_job
)
except Exception as e:
if not settings.IS_LOCAL:
# Not logging the command as it can contain the database connection string
e.cmd = "[redacted psql command]"
logger.error(e)
sql = subprocess.check_output(["cat", temp_sql_file_path]).decode()
logger.error(f"Faulty SQL: {sql}")
raise e
def retrieve_db_string():
"""It is necessary for this to be a function so the test suite can mock the connection string"""
return settings.DOWNLOAD_DATABASE_URL
def strip_file_extension(file_name):
return os.path.splitext(os.path.basename(file_name))[0]
def fail_download(download_job, exception, message):
stack_trace = "".join(
traceback.format_exception(etype=type(exception), value=exception, tb=exception.__traceback__)
)
download_job.error_message = f"{message}:\n{stack_trace}"
download_job.job_status_id = JOB_STATUS_DICT["failed"]
download_job.save()
def add_data_dictionary_to_zip(working_dir, zip_file_path):
write_to_log(message="Adding data dictionary to zip file")
data_dictionary_file_name = "Data_Dictionary_Crosswalk.xlsx"
data_dictionary_file_path = os.path.join(working_dir, data_dictionary_file_name)
data_dictionary_url = settings.DATA_DICTIONARY_DOWNLOAD_URL
RetrieveFileFromUri(data_dictionary_url).copy(data_dictionary_file_path)
append_files_to_zip_file([data_dictionary_file_path], zip_file_path)
def _kill_spawned_processes(download_job=None):
"""Cleanup (kill) any spawned child processes during this job run"""
job = ps.Process(os.getpid())
for spawn_of_job in job.children(recursive=True):
write_to_log(
message=f"Attempting to terminate child process with PID [{spawn_of_job.pid}] and name "
f"[{spawn_of_job.name}]",
download_job=download_job,
is_error=True,
)
try:
spawn_of_job.kill()
except ps.NoSuchProcess:
pass
def create_empty_data_file(
source: DownloadSource,
download_job: DownloadJob,
working_dir: str,
piid: str,
assistance_id: str,
zip_file_path: str,
file_format: str,
) -> None:
data_file_name = build_data_file_name(source, download_job, piid, assistance_id)
extension = FILE_FORMATS[file_format]["extension"]
source.file_name = f"{data_file_name}.{extension}"
source_path = os.path.join(working_dir, source.file_name)
write_to_log(
message=f"Skipping download of {source.file_name} due to no valid columns provided", download_job=download_job
)
Path(source_path).touch()
append_files_to_zip_file([source_path], zip_file_path)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
vendor/github.com/containers/buildah/imagebuildah/stage_executor.go | package imagebuildah
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/containers/buildah"
buildahdocker "github.com/containers/buildah/docker"
"github.com/containers/buildah/pkg/chrootuser"
"github.com/containers/buildah/util"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
securejoin "github.com/cyphar/filepath-securejoin"
docker "github.com/fsouza/go-dockerclient"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/openshift/imagebuilder"
"github.com/openshift/imagebuilder/dockerfile/parser"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// StageExecutor bundles up what we need to know when executing one stage of a
// (possibly multi-stage) build.
// Each stage may need to produce an image to be used as the base in a later
// stage (with the last stage's image being the end product of the build), and
// it may need to leave its working container in place so that the container's
// root filesystem's contents can be used as the source for a COPY instruction
// in a later stage.
// Each stage has its own base image, so it starts with its own configuration
// and set of volumes.
// If we're naming the result of the build, only the last stage will apply that
// name to the image that it produces.
type StageExecutor struct {
executor *Executor
index int
stages int
name string
builder *buildah.Builder
preserved int
volumes imagebuilder.VolumeSet
volumeCache map[string]string
volumeCacheInfo map[string]os.FileInfo
mountPoint string
copyFrom string // Used to keep track of the --from flag from COPY and ADD
output string
containerIDs []string
}
// Preserve informs the stage executor that from this point on, it needs to
// ensure that only COPY and ADD instructions can modify the contents of this
// directory or anything below it.
// The StageExecutor handles this by caching the contents of directories which
// have been marked this way before executing a RUN instruction, invalidating
// that cache when an ADD or COPY instruction sets any location under the
// directory as the destination, and using the cache to reset the contents of
// the directory tree after processing each RUN instruction.
// It would be simpler if we could just mark the directory as a read-only bind
// mount of itself during Run(), but the directory is expected to be remain
// writeable while the RUN instruction is being handled, even if any changes
// made within the directory are ultimately discarded.
func (s *StageExecutor) Preserve(path string) error {
logrus.Debugf("PRESERVE %q", path)
if s.volumes.Covers(path) {
// This path is already a subdirectory of a volume path that
// we're already preserving, so there's nothing new to be done
// except ensure that it exists.
archivedPath := filepath.Join(s.mountPoint, path)
if err := os.MkdirAll(archivedPath, 0755); err != nil {
return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath)
}
if err := s.volumeCacheInvalidate(path); err != nil {
return errors.Wrapf(err, "error ensuring volume path %q is preserved", archivedPath)
}
return nil
}
// Figure out where the cache for this volume would be stored.
s.preserved++
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
if err != nil {
return errors.Errorf("unable to locate temporary directory for container")
}
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
// Save info about the top level of the location that we'll be archiving.
var archivedPath string
// Try and resolve the symlink (if one exists)
// Set archivedPath and path based on whether a symlink is found or not
if symLink, err := resolveSymlink(s.mountPoint, path); err == nil {
archivedPath = filepath.Join(s.mountPoint, symLink)
path = symLink
} else {
return errors.Wrapf(err, "error reading symbolic link to %q", path)
}
st, err := os.Stat(archivedPath)
if os.IsNotExist(err) {
if err = os.MkdirAll(archivedPath, 0755); err != nil {
return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath)
}
st, err = os.Stat(archivedPath)
}
if err != nil {
logrus.Debugf("error reading info about %q: %v", archivedPath, err)
return errors.Wrapf(err, "error reading info about volume path %q", archivedPath)
}
s.volumeCacheInfo[path] = st
if !s.volumes.Add(path) {
// This path is not a subdirectory of a volume path that we're
// already preserving, so adding it to the list should work.
return errors.Errorf("error adding %q to the volume cache", path)
}
s.volumeCache[path] = cacheFile
// Now prune cache files for volumes that are now supplanted by this one.
removed := []string{}
for cachedPath := range s.volumeCache {
// Walk our list of cached volumes, and check that they're
// still in the list of locations that we need to cache.
found := false
for _, volume := range s.volumes {
if volume == cachedPath {
// We need to keep this volume's cache.
found = true
break
}
}
if !found {
// We don't need to keep this volume's cache. Make a
// note to remove it.
removed = append(removed, cachedPath)
}
}
// Actually remove the caches that we decided to remove.
for _, cachedPath := range removed {
archivedPath := filepath.Join(s.mountPoint, cachedPath)
logrus.Debugf("no longer need cache of %q in %q", archivedPath, s.volumeCache[cachedPath])
if err := os.Remove(s.volumeCache[cachedPath]); err != nil {
if os.IsNotExist(err) {
continue
}
return errors.Wrapf(err, "error removing %q", s.volumeCache[cachedPath])
}
delete(s.volumeCache, cachedPath)
}
return nil
}
// Remove any volume cache item which will need to be re-saved because we're
// writing to part of it.
func (s *StageExecutor) volumeCacheInvalidate(path string) error {
invalidated := []string{}
for cachedPath := range s.volumeCache {
if strings.HasPrefix(path, cachedPath+string(os.PathSeparator)) {
invalidated = append(invalidated, cachedPath)
}
}
for _, cachedPath := range invalidated {
if err := os.Remove(s.volumeCache[cachedPath]); err != nil {
if os.IsNotExist(err) {
continue
}
return errors.Wrapf(err, "error removing volume cache %q", s.volumeCache[cachedPath])
}
archivedPath := filepath.Join(s.mountPoint, cachedPath)
logrus.Debugf("invalidated volume cache for %q from %q", archivedPath, s.volumeCache[cachedPath])
delete(s.volumeCache, cachedPath)
}
return nil
}
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
func (s *StageExecutor) volumeCacheSave() error {
for cachedPath, cacheFile := range s.volumeCache {
archivedPath := filepath.Join(s.mountPoint, cachedPath)
_, err := os.Stat(cacheFile)
if err == nil {
logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile)
continue
}
if !os.IsNotExist(err) {
return errors.Wrapf(err, "error checking for cache of %q in %q", archivedPath, cacheFile)
}
if err := os.MkdirAll(archivedPath, 0755); err != nil {
return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath)
}
logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
cache, err := os.Create(cacheFile)
if err != nil {
return errors.Wrapf(err, "error creating archive at %q", cacheFile)
}
defer cache.Close()
rc, err := archive.Tar(archivedPath, archive.Uncompressed)
if err != nil {
return errors.Wrapf(err, "error archiving %q", archivedPath)
}
defer rc.Close()
_, err = io.Copy(cache, rc)
if err != nil {
return errors.Wrapf(err, "error archiving %q to %q", archivedPath, cacheFile)
}
}
return nil
}
// Restore the contents of each of the executor's list of volumes.
func (s *StageExecutor) volumeCacheRestore() error {
for cachedPath, cacheFile := range s.volumeCache {
archivedPath := filepath.Join(s.mountPoint, cachedPath)
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
cache, err := os.Open(cacheFile)
if err != nil {
return errors.Wrapf(err, "error opening archive at %q", cacheFile)
}
defer cache.Close()
if err := os.RemoveAll(archivedPath); err != nil {
return errors.Wrapf(err, "error clearing volume path %q", archivedPath)
}
if err := os.MkdirAll(archivedPath, 0755); err != nil {
return errors.Wrapf(err, "error recreating volume path %q", archivedPath)
}
err = archive.Untar(cache, archivedPath, nil)
if err != nil {
return errors.Wrapf(err, "error extracting archive at %q", archivedPath)
}
if st, ok := s.volumeCacheInfo[cachedPath]; ok {
if err := os.Chmod(archivedPath, st.Mode()); err != nil {
return errors.Wrapf(err, "error restoring permissions on %q", archivedPath)
}
if err := os.Chown(archivedPath, 0, 0); err != nil {
return errors.Wrapf(err, "error setting ownership on %q", archivedPath)
}
if err := os.Chtimes(archivedPath, st.ModTime(), st.ModTime()); err != nil {
return errors.Wrapf(err, "error restoring datestamps on %q", archivedPath)
}
}
}
return nil
}
// digestSpecifiedContent digests any content that this next instruction would add to
// the image, returning the digester if there is any, or nil otherwise. We
// don't care about the details of where in the filesystem the content actually
// goes, because we're not actually going to add it here, so this is less
// involved than Copy().
func (s *StageExecutor) digestSpecifiedContent(node *parser.Node, argValues []string, envValues []string) (string, error) {
// No instruction: done.
if node == nil {
return "", nil
}
// Not adding content: done.
switch strings.ToUpper(node.Value) {
default:
return "", nil
case "ADD", "COPY":
}
// Pull out everything except the first node (the instruction) and the
// last node (the destination).
var srcs []string
destination := node
for destination.Next != nil {
destination = destination.Next
if destination.Next != nil {
srcs = append(srcs, destination.Value)
}
}
var sources []string
var idMappingOptions *buildah.IDMappingOptions
contextDir := s.executor.contextDir
for _, flag := range node.Flags {
if strings.HasPrefix(flag, "--from=") {
// Flag says to read the content from another
// container. Update the ID mappings and
// all-content-comes-from-below-this-directory value.
from := strings.TrimPrefix(flag, "--from=")
if other, ok := s.executor.stages[from]; ok {
contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[from]; ok {
contextDir = builder.MountPoint
idMappingOptions = &builder.IDMappingOptions
} else {
return "", errors.Errorf("the stage %q has not been built", from)
}
}
}
varValues := append(argValues, envValues...)
for _, src := range srcs {
// If src has an argument within it, resolve it to its
// value. Otherwise just return the value found.
name, err := imagebuilder.ProcessWord(src, varValues)
if err != nil {
return "", errors.Wrapf(err, "unable to resolve source %q", src)
}
src = name
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
// Source is a URL. TODO: cache this content
// somewhere, so that we can avoid pulling it down
// again if we end up needing to drop it into the
// filesystem.
sources = append(sources, src)
} else {
// Source is not a URL, so it's a location relative to
// the all-content-comes-from-below-this-directory
// directory. Also raise an error if the src escapes
// the context directory.
contextSrc, err := securejoin.SecureJoin(contextDir, src)
if err == nil && strings.HasPrefix(src, "../") {
err = errors.New("escaping context directory error")
}
if err != nil {
return "", errors.Wrapf(err, "forbidden path for %q, it is outside of the build context %q", src, contextDir)
}
sources = append(sources, contextSrc)
}
}
// If the all-content-comes-from-below-this-directory is the build
// context, read its .dockerignore.
var excludes []string
if contextDir == s.executor.contextDir {
var err error
if excludes, err = imagebuilder.ParseDockerignore(contextDir); err != nil {
return "", errors.Wrapf(err, "error parsing .dockerignore in %s", contextDir)
}
}
// Restart the digester and have it do a dry-run copy to compute the
// digest information.
options := buildah.AddAndCopyOptions{
Excludes: excludes,
ContextDir: contextDir,
IDMappingOptions: idMappingOptions,
DryRun: true,
}
s.builder.ContentDigester.Restart()
download := strings.ToUpper(node.Value) == "ADD"
// If destination.Value has an argument within it, resolve it to its
// value. Otherwise just return the value found.
destValue, destErr := imagebuilder.ProcessWord(destination.Value, varValues)
if destErr != nil {
return "", errors.Wrapf(destErr, "unable to resolve destination %q", destination.Value)
}
err := s.builder.Add(destValue, download, options, sources...)
if err != nil {
return "", errors.Wrapf(err, "error dry-running %q", node.Original)
}
// Return the formatted version of the digester's result.
contentDigest := ""
prefix, digest := s.builder.ContentDigester.Digest()
if prefix != "" {
prefix += ":"
}
if digest.Validate() == nil {
contentDigest = prefix + digest.Encoded()
}
return contentDigest, nil
}
// Copy copies data into the working tree. The "Download" field is how
// imagebuilder tells us the instruction was "ADD" and not "COPY".
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
s.builder.ContentDigester.Restart()
for _, copy := range copies {
// Check the file and see if part of it is a symlink.
// Convert it to the target if so. To be ultrasafe
// do the same for the mountpoint.
hadFinalPathSeparator := len(copy.Dest) > 0 && copy.Dest[len(copy.Dest)-1] == os.PathSeparator
secureMountPoint, err := securejoin.SecureJoin("", s.mountPoint)
if err != nil {
return errors.Wrapf(err, "error resolving symlinks for copy destination %s", copy.Dest)
}
finalPath, err := securejoin.SecureJoin(secureMountPoint, copy.Dest)
if err != nil {
return errors.Wrapf(err, "error resolving symlinks for copy destination %s", copy.Dest)
}
if !strings.HasPrefix(finalPath, secureMountPoint) {
return errors.Wrapf(err, "error resolving copy destination %s", copy.Dest)
}
copy.Dest = strings.TrimPrefix(finalPath, secureMountPoint)
if len(copy.Dest) == 0 || copy.Dest[len(copy.Dest)-1] != os.PathSeparator {
if hadFinalPathSeparator {
copy.Dest += string(os.PathSeparator)
}
}
if copy.Download {
logrus.Debugf("ADD %#v, %#v", excludes, copy)
} else {
logrus.Debugf("COPY %#v, %#v", excludes, copy)
}
if err := s.volumeCacheInvalidate(copy.Dest); err != nil {
return err
}
var sources []string
// The From field says to read the content from another
// container. Update the ID mappings and
// all-content-comes-from-below-this-directory value.
var idMappingOptions *buildah.IDMappingOptions
var copyExcludes []string
contextDir := s.executor.contextDir
if len(copy.From) > 0 {
if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index {
contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[copy.From]; ok {
contextDir = builder.MountPoint
idMappingOptions = &builder.IDMappingOptions
} else {
return errors.Errorf("the stage %q has not been built", copy.From)
}
copyExcludes = excludes
} else {
copyExcludes = append(s.executor.excludes, excludes...)
}
for _, src := range copy.Src {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
// Source is a URL, allowed for ADD but not COPY.
if copy.Download {
sources = append(sources, src)
} else {
// returns an error to be compatible with docker
return errors.Errorf("source can't be a URL for COPY")
}
} else {
// Treat the source, which is not a URL, as a
// location relative to the
// all-content-comes-from-below-this-directory
// directory. Also raise an error if the src
// escapes the context directory.
srcSecure, err := securejoin.SecureJoin(contextDir, src)
if err == nil && strings.HasPrefix(src, "../") {
err = errors.New("escaping context directory error")
}
if err != nil {
return errors.Wrapf(err, "forbidden path for %q, it is outside of the build context %q", src, contextDir)
}
if hadFinalPathSeparator {
// If destination is a folder, we need to take extra care to
// ensure that files are copied with correct names (since
// resolving a symlink may result in a different name).
_, srcName := filepath.Split(src)
_, srcNameSecure := filepath.Split(srcSecure)
if srcName != srcNameSecure {
options := buildah.AddAndCopyOptions{
Chown: copy.Chown,
ContextDir: contextDir,
Excludes: copyExcludes,
IDMappingOptions: idMappingOptions,
}
// If we've a tar file, it will create a directory using the name of the tar
// file if we don't blank it out.
if strings.HasSuffix(srcName, ".tar") || strings.HasSuffix(srcName, ".gz") {
srcName = ""
}
if err := s.builder.Add(filepath.Join(copy.Dest, srcName), copy.Download, options, srcSecure); err != nil {
return err
}
continue
}
}
sources = append(sources, srcSecure)
}
}
options := buildah.AddAndCopyOptions{
Chown: copy.Chown,
ContextDir: contextDir,
Excludes: copyExcludes,
IDMappingOptions: idMappingOptions,
}
if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
return err
}
}
return nil
}
// Run executes a RUN instruction using the stage's current working container
// as a root directory.
func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
logrus.Debugf("RUN %#v, %#v", run, config)
if s.builder == nil {
return errors.Errorf("no build container available")
}
stdin := s.executor.in
if stdin == nil {
devNull, err := os.Open(os.DevNull)
if err != nil {
return errors.Errorf("error opening %q for reading: %v", os.DevNull, err)
}
defer devNull.Close()
stdin = devNull
}
options := buildah.RunOptions{
Hostname: config.Hostname,
Runtime: s.executor.runtime,
Args: s.executor.runtimeArgs,
NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "",
Mounts: convertMounts(s.executor.transientMounts),
Env: config.Env,
User: config.User,
WorkingDir: config.WorkingDir,
Entrypoint: config.Entrypoint,
Cmd: config.Cmd,
Stdin: stdin,
Stdout: s.executor.out,
Stderr: s.executor.err,
Quiet: s.executor.quiet,
NamespaceOptions: s.executor.namespaceOptions,
}
if config.NetworkDisabled {
options.ConfigureNetwork = buildah.NetworkDisabled
} else {
options.ConfigureNetwork = buildah.NetworkEnabled
}
args := run.Args
if run.Shell {
if len(config.Shell) > 0 && s.builder.Format == buildah.Dockerv2ImageManifest {
args = append(config.Shell, args...)
} else {
args = append([]string{"/bin/sh", "-c"}, args...)
}
}
if err := s.volumeCacheSave(); err != nil {
return err
}
err := s.builder.Run(args, options)
if err2 := s.volumeCacheRestore(); err2 != nil {
if err == nil {
return err2
}
}
return err
}
// UnrecognizedInstruction is called when we encounter an instruction that the
// imagebuilder parser didn't understand.
func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", strings.ToUpper(step.Command))
err := fmt.Sprintf(errStr+"%#v", step)
if s.executor.ignoreUnrecognizedInstructions {
logrus.Debugf(err)
return nil
}
switch logrus.GetLevel() {
case logrus.ErrorLevel:
logrus.Errorf(errStr)
case logrus.DebugLevel:
logrus.Debugf(err)
default:
logrus.Errorf("+(UNHANDLED LOGLEVEL) %#v", step)
}
return errors.Errorf(err)
}
// prepare creates a working container based on the specified image, or if one
// isn't specified, the first argument passed to the first FROM instruction we
// can find in the stage's parsed tree.
func (s *StageExecutor) prepare(ctx context.Context, stage imagebuilder.Stage, from string, initializeIBConfig, rebase bool) (builder *buildah.Builder, err error) {
ib := stage.Builder
node := stage.Node
if from == "" {
base, err := ib.From(node)
if err != nil {
logrus.Debugf("prepare(node.Children=%#v)", node.Children)
return nil, errors.Wrapf(err, "error determining starting point for build")
}
from = base
}
displayFrom := from
// stage.Name will be a numeric string for all stages without an "AS" clause
asImageName := stage.Name
if asImageName != "" {
if _, err := strconv.Atoi(asImageName); err != nil {
displayFrom = from + " AS " + asImageName
}
}
if initializeIBConfig && rebase {
logrus.Debugf("FROM %#v", displayFrom)
if !s.executor.quiet {
s.executor.log("FROM %s", displayFrom)
}
}
builderOptions := buildah.BuilderOptions{
Args: ib.Args,
FromImage: from,
PullPolicy: s.executor.pullPolicy,
Registry: s.executor.registry,
BlobDirectory: s.executor.blobDirectory,
SignaturePolicyPath: s.executor.signaturePolicyPath,
ReportWriter: s.executor.reportWriter,
SystemContext: s.executor.systemContext,
Isolation: s.executor.isolation,
NamespaceOptions: s.executor.namespaceOptions,
ConfigureNetwork: s.executor.configureNetwork,
CNIPluginPath: s.executor.cniPluginPath,
CNIConfigDir: s.executor.cniConfigDir,
IDMappingOptions: s.executor.idmappingOptions,
CommonBuildOpts: s.executor.commonBuildOptions,
DefaultMountsFilePath: s.executor.defaultMountsFilePath,
Format: s.executor.outputFormat,
Capabilities: s.executor.capabilities,
Devices: s.executor.devices,
MaxPullRetries: s.executor.maxPullPushRetries,
PullRetryDelay: s.executor.retryPullPushDelay,
}
// Check and see if the image is a pseudonym for the end result of a
// previous stage, named by an AS clause in the Dockerfile.
if asImageFound, ok := s.executor.imageMap[from]; ok {
builderOptions.FromImage = asImageFound
}
builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions)
if err != nil {
return nil, errors.Wrapf(err, "error creating build container")
}
if initializeIBConfig {
volumes := map[string]struct{}{}
for _, v := range builder.Volumes() {
volumes[v] = struct{}{}
}
ports := map[docker.Port]struct{}{}
for _, p := range builder.Ports() {
ports[docker.Port(p)] = struct{}{}
}
dConfig := docker.Config{
Hostname: builder.Hostname(),
Domainname: builder.Domainname(),
User: builder.User(),
Env: builder.Env(),
Cmd: builder.Cmd(),
Image: from,
Volumes: volumes,
WorkingDir: builder.WorkDir(),
Entrypoint: builder.Entrypoint(),
Labels: builder.Labels(),
Shell: builder.Shell(),
StopSignal: builder.StopSignal(),
OnBuild: builder.OnBuild(),
ExposedPorts: ports,
}
var rootfs *docker.RootFS
if builder.Docker.RootFS != nil {
rootfs = &docker.RootFS{
Type: builder.Docker.RootFS.Type,
}
for _, id := range builder.Docker.RootFS.DiffIDs {
rootfs.Layers = append(rootfs.Layers, id.String())
}
}
dImage := docker.Image{
Parent: builder.FromImage,
ContainerConfig: dConfig,
Container: builder.Container,
Author: builder.Maintainer(),
Architecture: builder.Architecture(),
RootFS: rootfs,
}
dImage.Config = &dImage.ContainerConfig
err = ib.FromImage(&dImage, node)
if err != nil {
if err2 := builder.Delete(); err2 != nil {
logrus.Debugf("error deleting container which we failed to update: %v", err2)
}
return nil, errors.Wrapf(err, "error updating build context")
}
}
mountPoint, err := builder.Mount(builder.MountLabel)
if err != nil {
if err2 := builder.Delete(); err2 != nil {
logrus.Debugf("error deleting container which we failed to mount: %v", err2)
}
return nil, errors.Wrapf(err, "error mounting new container")
}
if rebase {
// Make this our "current" working container.
s.mountPoint = mountPoint
s.builder = builder
}
logrus.Debugln("Container ID:", builder.ContainerID)
return builder, nil
}
// Delete deletes the stage's working container, if we have one.
func (s *StageExecutor) Delete() (err error) {
if s.builder != nil {
err = s.builder.Delete()
s.builder = nil
}
return err
}
// stepRequiresLayer indicates whether or not the step should be followed by
// committing a layer container when creating an intermediate image.
func (*StageExecutor) stepRequiresLayer(step *imagebuilder.Step) bool {
switch strings.ToUpper(step.Command) {
case "ADD", "COPY", "RUN":
return true
}
return false
}
// getImageRootfs checks for an image matching the passed-in name in local
// storage. If it isn't found, it pulls down a copy. Then, if we don't have a
// working container root filesystem based on the image, it creates one. Then
// it returns that root filesystem's location.
func (s *StageExecutor) getImageRootfs(ctx context.Context, stage imagebuilder.Stage, image string) (mountPoint string, err error) {
if builder, ok := s.executor.containerMap[image]; ok {
return builder.MountPoint, nil
}
builder, err := s.prepare(ctx, stage, image, false, false)
if err != nil {
return "", err
}
s.executor.containerMap[image] = builder
return builder.MountPoint, nil
}
// Execute runs each of the steps in the stage's parsed tree, in turn.
func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, base string) (imgID string, ref reference.Canonical, err error) {
ib := stage.Builder
checkForLayers := s.executor.layers && s.executor.useCache
moreStages := s.index < s.stages-1
lastStage := !moreStages
imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[fmt.Sprintf("%d", stage.Position)])
rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[fmt.Sprintf("%d", stage.Position)])
// If the base image's name corresponds to the result of an earlier
// stage, substitute that image's ID for the base image's name here.
// If not, then go on assuming that it's just a regular image that's
// either in local storage, or one that we have to pull from a
// registry.
if stageImage, isPreviousStage := s.executor.imageMap[base]; isPreviousStage {
base = stageImage
}
// Create the (first) working container for this stage. Reinitializing
// the imagebuilder configuration may alter the list of steps we have,
// so take a snapshot of them *after* that.
if _, err := s.prepare(ctx, stage, base, true, true); err != nil {
return "", nil, err
}
children := stage.Node.Children
// A helper function to only log "COMMIT" as an explicit step if it's
// the very last step of a (possibly multi-stage) build.
logCommit := func(output string, instruction int) {
moreInstructions := instruction < len(children)-1
if moreInstructions || moreStages {
return
}
commitMessage := "COMMIT"
if output != "" {
commitMessage = fmt.Sprintf("%s %s", commitMessage, output)
}
logrus.Debugf(commitMessage)
if !s.executor.quiet {
s.executor.log(commitMessage)
}
}
logCacheHit := func(cacheID string) {
if !s.executor.quiet {
cacheHitMessage := "--> Using cache"
fmt.Fprintf(s.executor.out, "%s %s\n", cacheHitMessage, cacheID)
}
}
logImageID := func(imgID string) {
if len(imgID) > 11 {
imgID = imgID[0:11]
}
if s.executor.iidfile == "" {
fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
}
}
if len(children) == 0 {
// There are no steps.
if s.builder.FromImageID == "" || s.executor.squash {
// We either don't have a base image, or we need to
// squash the contents of the base image. Whichever is
// the case, we need to commit() to create a new image.
logCommit(s.output, -1)
if imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(nil, ""), false, s.output); err != nil {
return "", nil, errors.Wrapf(err, "error committing base container")
}
} else if len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 {
// The image would be modified by the labels passed
// via the command line, so we need to commit.
logCommit(s.output, -1)
if imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(stage.Node, ""), true, s.output); err != nil {
return "", nil, err
}
} else {
// We don't need to squash the base image, and the
// image wouldn't be modified by the command line
// options, so just reuse the base image.
logCommit(s.output, -1)
if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil {
return "", nil, err
}
}
logImageID(imgID)
}
for i, node := range children {
moreInstructions := i < len(children)-1
lastInstruction := !moreInstructions
// Resolve any arguments in this instruction.
step := ib.Step()
if err := step.Resolve(node); err != nil {
return "", nil, errors.Wrapf(err, "error resolving step %+v", *node)
}
logrus.Debugf("Parsed Step: %+v", *step)
if !s.executor.quiet {
s.executor.log("%s", step.Original)
}
// Check if there's a --from if the step command is COPY or
// ADD. Set copyFrom to point to either the context directory
// or the root of the container from the specified stage.
// Also check the chown flag for validity.
s.copyFrom = s.executor.contextDir
for _, flag := range step.Flags {
command := strings.ToUpper(step.Command)
// chown and from flags should have an '=' sign, '--chown=' or '--from='
if command == "COPY" && (flag == "--chown" || flag == "--from") {
return "", nil, errors.Errorf("COPY only supports the --chown=<uid:gid> and the --from=<image|stage> flags")
}
if command == "ADD" && flag == "--chown" {
return "", nil, errors.Errorf("ADD only supports the --chown=<uid:gid> flag")
}
if strings.Contains(flag, "--from") && command == "COPY" {
var mountPoint string
arr := strings.Split(flag, "=")
if len(arr) != 2 {
return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
}
otherStage, ok := s.executor.stages[arr[1]]
if !ok {
if mountPoint, err = s.getImageRootfs(ctx, stage, arr[1]); err != nil {
return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1])
}
} else {
mountPoint = otherStage.mountPoint
}
s.copyFrom = mountPoint
break
}
}
// Determine if there are any RUN instructions to be run after
// this step. If not, we won't have to bother preserving the
// contents of any volumes declared between now and when we
// finish.
noRunsRemaining := false
if moreInstructions {
noRunsRemaining = !ib.RequiresStart(&parser.Node{Children: children[i+1:]})
}
// If we're doing a single-layer build, just process the
// instruction.
if !s.executor.layers {
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
// In case we added content, retrieve its digest.
addedContentDigest, err := s.digestSpecifiedContent(node, ib.Arguments(), ib.Config().Env)
if err != nil {
return "", nil, err
}
if moreInstructions {
// There are still more instructions to process
// for this stage. Make a note of the
// instruction in the history that we'll write
// for the image when we eventually commit it.
now := time.Now()
s.builder.AddPrependedEmptyLayer(&now, s.executor.getCreatedBy(node, addedContentDigest), "", "")
continue
} else {
// This is the last instruction for this stage,
// so we should commit this container to create
// an image, but only if it's the last one, or
// if it's used as the basis for a later stage.
if lastStage || imageIsUsedLater {
logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node, addedContentDigest), false, s.output)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
logImageID(imgID)
} else {
imgID = ""
}
break
}
}
// We're in a multi-layered build.
var (
commitName string
cacheID string
err error
rebase bool
)
// If we have to commit for this instruction, only assign the
// stage's configured output name to the last layer.
if lastInstruction {
commitName = s.output
}
// If we're using the cache, and we've managed to stick with
// cached images so far, look for one that matches what we
// expect to produce for this instruction.
if checkForLayers && !(s.executor.squash && lastInstruction && lastStage) {
addedContentDigest, err := s.digestSpecifiedContent(node, ib.Arguments(), ib.Config().Env)
if err != nil {
return "", nil, err
}
cacheID, err = s.intermediateImageExists(ctx, node, addedContentDigest)
if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
}
if cacheID != "" {
// Note the cache hit.
logCacheHit(cacheID)
} else {
// We're not going to find any more cache hits.
checkForLayers = false
}
}
if cacheID != "" {
// A suitable cached image was found, so just reuse it.
// If we need to name the resulting image because it's
// the last step in this stage, add the name to the
// image.
imgID = cacheID
if commitName != "" {
logCommit(commitName, i)
if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
return "", nil, err
}
logImageID(imgID)
}
// Update our working container to be based off of the
// cached image, if we might need to use it as a basis
// for the next instruction, or if we need the root
// filesystem to match the image contents for the sake
// of a later stage that wants to copy content from it.
rebase = moreInstructions || rootfsIsUsedLater
// If the instruction would affect our configuration,
// process the configuration change so that, if we fall
// off the cache path, the filesystem changes from the
// last cache image will be all that we need, since we
// still don't want to restart using the image's
// configuration blob.
if !s.stepRequiresLayer(step) {
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
}
} else {
// If we didn't find a cached image that we could just reuse,
// process the instruction directly.
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
// In case we added content, retrieve its digest.
addedContentDigest, err := s.digestSpecifiedContent(node, ib.Arguments(), ib.Config().Env)
if err != nil {
return "", nil, err
}
// Create a new image, maybe with a new layer.
logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node, addedContentDigest), !s.stepRequiresLayer(step), commitName)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
logImageID(imgID)
// We only need to build a new container rootfs
// using this image if we plan on making
// further changes to it. Subsequent stages
// that just want to use the rootfs as a source
// for COPY or ADD will be content with what we
// already have.
rebase = moreInstructions
}
if rebase {
// Since we either committed the working container or
// are about to replace it with one based on a cached
// image, add the current working container's ID to the
// list of successful intermediate containers that
// we'll clean up later.
s.containerIDs = append(s.containerIDs, s.builder.ContainerID)
// Prepare for the next step or subsequent phases by
// creating a new working container with the
// just-committed or updated cached image as its new
// base image.
if _, err := s.prepare(ctx, stage, imgID, false, true); err != nil {
return "", nil, errors.Wrap(err, "error preparing container for next step")
}
}
}
return imgID, ref, nil
}
// tagExistingImage adds names to an image already in the store
func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output string) (string, reference.Canonical, error) {
// If we don't need to attach a name to the image, just return the cache ID.
if output == "" {
return cacheID, nil, nil
}
// Get the destination image reference.
dest, err := s.executor.resolveNameToImageRef(output)
if err != nil {
return "", nil, err
}
policyContext, err := util.GetPolicyContext(s.executor.systemContext)
if err != nil {
return "", nil, err
}
defer func() {
if destroyErr := policyContext.Destroy(); destroyErr != nil {
if err == nil {
err = destroyErr
} else {
err = errors.Wrap(err, destroyErr.Error())
}
}
}()
// Look up the source image, expecting it to be in local storage
src, err := is.Transport.ParseStoreReference(s.executor.store, cacheID)
if err != nil {
return "", nil, errors.Wrapf(err, "error getting source imageReference for %q", cacheID)
}
manifestBytes, err := cp.Image(ctx, policyContext, dest, src, nil)
if err != nil {
return "", nil, errors.Wrapf(err, "error copying image %q", cacheID)
}
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
return "", nil, errors.Wrapf(err, "error computing digest of manifest for image %q", cacheID)
}
img, err := is.Transport.GetStoreImage(s.executor.store, dest)
if err != nil {
return "", nil, errors.Wrapf(err, "error locating new copy of image %q (i.e., %q)", cacheID, transports.ImageName(dest))
}
var ref reference.Canonical
if dref := dest.DockerReference(); dref != nil {
if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q (i.e., %q)", cacheID, transports.ImageName(dest))
}
}
return img.ID, ref, nil
}
// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build.
// It verifies this by checking the parent of the top layer of the image and the history.
func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string) (string, error) {
// Get the list of images available in the image store
images, err := s.executor.store.Images()
if err != nil {
return "", errors.Wrap(err, "error getting image list from store")
}
var baseHistory []v1.History
if s.builder.FromImageID != "" {
baseHistory, err = s.executor.getImageHistory(ctx, s.builder.FromImageID)
if err != nil {
return "", errors.Wrapf(err, "error getting history of base image %q", s.builder.FromImageID)
}
}
for _, image := range images {
var imageTopLayer *storage.Layer
if image.TopLayer != "" {
imageTopLayer, err = s.executor.store.Layer(image.TopLayer)
if err != nil {
return "", errors.Wrapf(err, "error getting top layer info")
}
}
// If the parent of the top layer of an image is equal to the current build image's top layer,
// it means that this image is potentially a cached intermediate image from a previous
// build. Next we double check that the history of this image is equivalent to the previous
// lines in the Dockerfile up till the point we are at in the build.
if imageTopLayer == nil || (s.builder.TopLayer != "" && (imageTopLayer.Parent == s.builder.TopLayer || imageTopLayer.ID == s.builder.TopLayer)) {
history, err := s.executor.getImageHistory(ctx, image.ID)
if err != nil {
return "", errors.Wrapf(err, "error getting history of %q", image.ID)
}
// children + currNode is the point of the Dockerfile we are currently at.
if s.executor.historyMatches(baseHistory, currNode, history, addedContentDigest) {
return image.ID, nil
}
}
}
return "", nil
}
// commit writes the container's contents to an image, using a passed-in tag as
// the name if there is one, generating a unique ID-based one otherwise.
func (s *StageExecutor) commit(ctx context.Context, ib *imagebuilder.Builder, createdBy string, emptyLayer bool, output string) (string, reference.Canonical, error) {
var imageRef types.ImageReference
if output != "" {
imageRef2, err := s.executor.resolveNameToImageRef(output)
if err != nil {
return "", nil, err
}
imageRef = imageRef2
}
if ib.Author != "" {
s.builder.SetMaintainer(ib.Author)
}
config := ib.Config()
if createdBy != "" {
s.builder.SetCreatedBy(createdBy)
}
s.builder.SetHostname(config.Hostname)
s.builder.SetDomainname(config.Domainname)
s.builder.SetArchitecture(s.executor.architecture)
s.builder.SetOS(s.executor.os)
s.builder.SetUser(config.User)
s.builder.ClearPorts()
for p := range config.ExposedPorts {
s.builder.SetPort(string(p))
}
for _, envSpec := range config.Env {
spec := strings.SplitN(envSpec, "=", 2)
s.builder.SetEnv(spec[0], spec[1])
}
s.builder.SetCmd(config.Cmd)
s.builder.ClearVolumes()
for v := range config.Volumes {
s.builder.AddVolume(v)
}
s.builder.ClearOnBuild()
for _, onBuildSpec := range config.OnBuild {
s.builder.SetOnBuild(onBuildSpec)
}
s.builder.SetWorkDir(config.WorkingDir)
s.builder.SetEntrypoint(config.Entrypoint)
s.builder.SetShell(config.Shell)
s.builder.SetStopSignal(config.StopSignal)
if config.Healthcheck != nil {
s.builder.SetHealthcheck(&buildahdocker.HealthConfig{
Test: append([]string{}, config.Healthcheck.Test...),
Interval: config.Healthcheck.Interval,
Timeout: config.Healthcheck.Timeout,
StartPeriod: config.Healthcheck.StartPeriod,
Retries: config.Healthcheck.Retries,
})
} else {
s.builder.SetHealthcheck(nil)
}
s.builder.ClearLabels()
for k, v := range config.Labels {
s.builder.SetLabel(k, v)
}
for _, labelSpec := range s.executor.labels {
label := strings.SplitN(labelSpec, "=", 2)
if len(label) > 1 {
s.builder.SetLabel(label[0], label[1])
} else {
s.builder.SetLabel(label[0], "")
}
}
for _, annotationSpec := range s.executor.annotations {
annotation := strings.SplitN(annotationSpec, "=", 2)
if len(annotation) > 1 {
s.builder.SetAnnotation(annotation[0], annotation[1])
} else {
s.builder.SetAnnotation(annotation[0], "")
}
}
if imageRef != nil {
logName := transports.ImageName(imageRef)
logrus.Debugf("COMMIT %q", logName)
} else {
logrus.Debugf("COMMIT")
}
writer := s.executor.reportWriter
if s.executor.layers || !s.executor.useCache {
writer = nil
}
options := buildah.CommitOptions{
Compression: s.executor.compression,
SignaturePolicyPath: s.executor.signaturePolicyPath,
ReportWriter: writer,
PreferredManifestType: s.executor.outputFormat,
SystemContext: s.executor.systemContext,
Squash: s.executor.squash,
EmptyLayer: emptyLayer,
BlobDirectory: s.executor.blobDirectory,
SignBy: s.executor.signBy,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
}
imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
if err != nil {
return "", nil, err
}
var ref reference.Canonical
if imageRef != nil {
if dref := imageRef.DockerReference(); dref != nil {
if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q", imgID)
}
}
}
return imgID, ref, nil
}
func (s *StageExecutor) EnsureContainerPath(path string) error {
targetPath, err := securejoin.SecureJoin(s.mountPoint, path)
if err != nil {
return errors.Wrapf(err, "error ensuring container path %q", path)
}
_, err = os.Stat(targetPath)
if err != nil && os.IsNotExist(err) {
err = os.MkdirAll(targetPath, 0755)
if err != nil {
return errors.Wrapf(err, "error creating directory path %q", targetPath)
}
// get the uid and gid so that we can set the correct permissions on the
// working directory
uid, gid, _, err := chrootuser.GetUser(s.mountPoint, s.builder.User())
if err != nil {
return errors.Wrapf(err, "error getting uid and gid for user %q", s.builder.User())
}
if err = os.Chown(targetPath, int(uid), int(gid)); err != nil {
return errors.Wrapf(err, "error setting ownership on %q", targetPath)
}
}
if err != nil {
return errors.Wrapf(err, "error ensuring container path %q", path)
}
return nil
}
| [
"\"BUILDAH_NOPIVOT\""
]
| []
| [
"BUILDAH_NOPIVOT"
]
| [] | ["BUILDAH_NOPIVOT"] | go | 1 | 0 | |
example/appengine/app.go | // Copyright 2017 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package demo provides an app that shows how to use the github package on
// Google App Engine.
package demo
import (
"fmt"
"net/http"
"os"
"github.com/google/go-github/v18/github"
"golang.org/x/oauth2"
"google.golang.org/appengine"
"google.golang.org/appengine/log"
)
func init() {
http.HandleFunc("/", handler)
}
func handler(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
ctx := appengine.NewContext(r)
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: os.Getenv("GITHUB_AUTH_TOKEN")},
)
tc := oauth2.NewClient(ctx, ts)
client := github.NewClient(tc)
commits, _, err := client.Repositories.ListCommits(ctx, "google", "go-github", nil)
if err != nil {
log.Errorf(ctx, "ListCommits: %v", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
for _, commit := range commits {
fmt.Fprintln(w, commit.GetHTMLURL())
}
}
| [
"\"GITHUB_AUTH_TOKEN\""
]
| []
| [
"GITHUB_AUTH_TOKEN"
]
| [] | ["GITHUB_AUTH_TOKEN"] | go | 1 | 0 | |
dashboard/data.py | import datetime
import os
from trello.trelloclient import TrelloClient
TRELLO_API_KEY = os.getenv('TRELLO_API_KEY')
TRELLO_API_SECRET = os.getenv('TRELLO_API_SECRET')
TRELLO_TOKEN = os.getenv('TRELLO_TOKEN')
BOARD_ID = os.getenv('TRELLO_BOARD', '5f7f61eda018ce481185be8f')
HIGHLIGHTS_BOARD_ID = os.getenv('HIGHLIGHTS_BOARD', '60e4b0e00879a001f87ff95c')
COLOR_EPIC = 'purple'
COLOR_TASK = 'blue'
COLOR_PRODUCT = None
LIST_DONE = 'Done'
LIST_IN_PROGRESS = 'In Progress'
LIST_BACKLOG = 'Backlog'
LIST_BLOCKED = 'Blocked/Waiting'
LIST_EVENTS = 'Scheduled Events'
LABEL_CONFERENCE_TALK = 'Conference Talk'
LABEL_CONFERENCE_WORKSHOP = 'Conference Workshop'
LABEL_CONTENT = 'Content'
LABEL_CUSTOMER = 'Customer Engagement'
LABEL_LIVE_STREAM = 'Live Stream'
class DashboardData:
def __init__(self):
# Board Agnostic
self.label_names = None # [str]
self.epic_label_names = None # [str]
self.task_label_names = None # [str]
self.product_label_names = None # [str]
self.members_by_id = {} # [str: Member]
# Live Board
self.board = None
self.all_labels = None # [Label]
self.all_cards = None # [Card]
self.all_lists = None # [TrelloList]
self.all_members = None # [Member]
self.list_names = None # [str]
self.lists_by_id = None # {str: [List]}
self.lists_by_name = None # {str: [List]}
self.ongoing_list_ids = None # [str]
self.cards_by_list_id = {} # {str: [Card]}
self.cards_by_label = {} # {str: [Card]}
self.cards_by_member = {} # {str: [Card]}
# Archive Board
self.archives = None
self.archive_lists = None # [TrelloList]
self.archive_cards = None # [Card]
self.archive_lists_by_id = {} # {str: [List]}
self.archive_cards_by_list_id = {} # {str: [List]}
self.archive_cards_by_label = {} # {str: [List]}
self.archive_cards_by_member = {} # {str: [List]}
self.highlights_list_ids = None # [str]
def load(self, client: TrelloClient) -> None:
"""
Loads all of the necessary data from the Trello client, organizing it as necessary
for future calls. No other calls should be made to objects of this class without having
first called this method.
:param client: authenticated trello client
"""
# Live network calls
self.board = client.get_board(BOARD_ID)
self.all_labels = self.board.get_labels()
self.all_cards = self.board.open_cards()
self.all_lists = self.board.open_lists()
self.all_members = self.board.all_members()
self.archives = client.get_board(HIGHLIGHTS_BOARD_ID)
self.archive_lists = self.archives.open_lists()
self.archive_cards = self.archives.open_cards()
# Organize labels
self.label_names = [label.name for label in self.all_labels]
self.epic_label_names = [label.name for label in self.all_labels if label.color == COLOR_EPIC]
self.task_label_names = [label.name for label in self.all_labels if label.color == COLOR_TASK]
self.product_label_names = [label.name for label in self.all_labels if label.color == COLOR_PRODUCT]
self.event_label_names = [LABEL_CUSTOMER, LABEL_CONFERENCE_WORKSHOP, LABEL_CONFERENCE_TALK]
# Organize members
self.members_by_id = {m.id: m for m in self.all_members}
# Organize lists
self.list_names = [tlist.name for tlist in self.all_lists]
self.lists_by_id = {tlist.id: tlist for tlist in self.all_lists}
self.lists_by_name = {tlist.name: tlist for tlist in self.all_lists}
self.archive_lists_by_id = {tlist.id: tlist for tlist in self.archive_lists}
self.ongoing_list_ids = (
self.lists_by_name[LIST_DONE].id,
self.lists_by_name[LIST_IN_PROGRESS].id
)
self.highlights_list_ids = [tlist.id for tlist in self.archive_lists if
tlist.name.startswith('Highlights')]
# Organize cards
def _process_card(card, member_cards, label_cards, list_cards):
# Rebuild date as a date object
if card.due:
card.real_due_date = datetime.datetime.strptime(card.due, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
card.real_due_date = None
# Add in member names instead of IDs
if card.member_ids:
card.member_names = [self.members_by_id[m_id].full_name for m_id in card.member_ids]
for member in card.member_names:
mapping = member_cards.setdefault(member, [])
mapping.append(card)
# Label breakdown
if card.labels:
# In most cases, any cards with multiple labels will only have one per type
# (i.e. epic, activity, product, etc). In case they do cover multiple, sort them
# alphabetically for consistency.
card.labels.sort(key=lambda x: x.name)
for label in card.labels:
mapping = label_cards.setdefault(label.name, [])
mapping.append(card)
# List cache
list_cards.setdefault(card.list_id, []).append(card)
for card in self.all_cards:
_process_card(card, self.cards_by_member, self.cards_by_label, self.cards_by_list_id)
for card in self.archive_cards:
_process_card(card, self.archive_cards_by_member, self.archive_cards_by_label,
self.archive_cards_by_list_id)
def in_progress_cards(self):
"""
Cards: All from 'In Progress' list
Sort: Due Date
Extra Fields: type
"""
in_progress_id = self.lists_by_name[LIST_IN_PROGRESS].id
in_progress_cards = self.cards_by_list_id[in_progress_id]
add_card_types(in_progress_cards, self.task_label_names)
sorted_cards = sorted(in_progress_cards, key=sort_cards_by_due)
return sorted_cards
def backlog_cards(self):
"""
Cards: All from 'Backlog' list
Sort: Due Date
Extra Fields: type
"""
backlog_id = self.lists_by_name[LIST_BACKLOG].id
backlog_cards = self.cards_by_list_id[backlog_id]
add_card_types(backlog_cards, self.task_label_names)
sorted_cards = sorted(backlog_cards, key=sort_cards_by_due)
return sorted_cards
def blocked_cards(self):
"""
Cards: All from the 'Blocked/Waiting' list
Sort: Due Date
Extra Fields: type
"""
blocked_id = self.lists_by_name[LIST_BLOCKED].id
blocked_cards = self.cards_by_list_id[blocked_id]
add_card_types(blocked_cards, self.task_label_names)
sorted_cards = sorted(blocked_cards, key=sort_cards_by_due)
return sorted_cards
def upcoming_events_cards(self):
"""
Cards: All from 'Scheduled Events' and 'In Progress' list
Sort: Due Date
Extra Fields: type
"""
# Everything in the scheduled events list
all_cards = self.cards_by_list_id[self.lists_by_name[LIST_EVENTS].id]
# Event-related cards from the in progress list
in_progress_cards = self.cards_by_list_id[self.lists_by_name[LIST_IN_PROGRESS].id]
for c in in_progress_cards:
if not c.labels:
continue
for label_name in c.labels:
if label_name.name in self.event_label_names:
all_cards.append(c)
break
add_card_types(all_cards, self.event_label_names)
sorted_cards = sorted(all_cards, key=sort_cards_by_due)
return sorted_cards
def done_cards(self):
"""
Cards: All from the 'Done' list
Sort: Due Date
Extra Fields: type
"""
done_id = self.lists_by_name[LIST_DONE].id
if done_id in self.cards_by_list_id:
done_cards = self.cards_by_list_id[done_id]
add_card_types(done_cards, self.task_label_names)
cards = sorted(done_cards, key=sort_cards_by_due)
else:
cards = []
return cards
def coming_soon_cards(self):
"""
Cards: From 'Backlog' and 'Scheduled Events' with due dates in the next 21 days
Sort: Due Date
Extra Fields: type
"""
backlog_id = self.lists_by_name[LIST_BACKLOG].id
backlog_cards = self.cards_by_list_id[backlog_id]
events_id = self.lists_by_name[LIST_EVENTS].id
events_cards = self.cards_by_list_id[events_id]
all_soon_cards = backlog_cards + events_cards
# Filter out cards with no due date or those due in more than X many days
upcoming_date = datetime.datetime.now() + datetime.timedelta(days=21)
upcoming_cards = [c for c in all_soon_cards if c.real_due_date and c.real_due_date < upcoming_date]
add_card_types(upcoming_cards, self.task_label_names)
sorted_cards = sorted(upcoming_cards, key=sort_cards_by_due)
return sorted_cards
def in_progress_products(self):
"""
Cards: [product labels, cards] for 'In Progress'
Sort: Due Date
Extra Fields: type
"""
return self._list_label_filter([self.lists_by_name[LIST_IN_PROGRESS].id], self.product_label_names)
def in_progress_activities(self):
"""
Cards: [task labels, cards] for 'In Progress'
Sort: Due Date
Extra Fields: type
"""
return self._list_label_filter([self.lists_by_name[LIST_IN_PROGRESS].id], self.task_label_names)
def in_progress_epics(self):
"""
Cards: [epic labels, cards] for 'In Progress'
Sort: Due Date
Extra Fields: type
"""
return self._list_label_filter([self.lists_by_name[LIST_IN_PROGRESS].id], self.epic_label_names)
def in_progress_team(self):
"""
Cards: [member name, cards] for 'In Progress'
Sort: Due Date
Extra Fields: type
"""
filtered = {}
for member_name, card_list in self.cards_by_member.items():
filtered[member_name] = []
for card in card_list:
if card.list_id in [self.lists_by_name[LIST_IN_PROGRESS].id]:
filtered[member_name].append(card)
add_card_types(filtered[member_name], self.task_label_names)
filtered[member_name].sort(key=sort_cards_by_due)
return filtered
def backlog_products(self):
"""
Cards: [product label, cards] for 'Backlog'
Sort: Due Date
Extra Fields: type
"""
return self._list_label_filter([self.lists_by_name[LIST_BACKLOG].id], self.product_label_names)
def backlog_activities(self):
"""
Cards: [task label, cards] for 'Backlog'
Sort: Due Date
Extra Fields: type
"""
return self._list_label_filter([self.lists_by_name[LIST_BACKLOG].id], self.task_label_names)
def backlog_epics(self):
"""
Cards: [epic label, cards] for 'Backlog'
Sort: Due Date
Extra Fields: type
"""
return self._list_label_filter([self.lists_by_name[LIST_BACKLOG].id], self.epic_label_names)
def backlog_team(self):
"""
Cards: [member name, cards] for 'Backlog'
Sort: Due Date
Extra Fields: type
"""
filtered = {}
for member_name, card_list in self.cards_by_member.items():
filtered[member_name] = []
for card in card_list:
if card.list_id in [self.lists_by_name[LIST_BACKLOG].id]:
filtered[member_name].append(card)
add_card_types(filtered[member_name], self.task_label_names)
filtered[member_name].sort(key=sort_cards_by_due)
return filtered
def month_list(self):
""" Returns a tuple of [name, id] for all monthly highlights lists """
monthly_list = []
for l in self.archive_lists:
if l.name.startswith('Highlights'):
name = l.name[len('Highlights - '):]
monthly_list.append([name, l.id])
return monthly_list
def month_highlights(self, list_id):
"""
Cards: all cards from the given list
Sort: Type
Extra Fields: type
"""
trello_list = self.archive_lists_by_id[list_id]
highlight_label_names = self.task_label_names + self.epic_label_names
cards_by_label = self._list_label_filter([list_id], highlight_label_names,
label_cards=self.archive_cards_by_label)
# Add extra data for each card
for card_list in cards_by_label.values():
add_card_types(card_list, highlight_label_names)
pull_up_custom_fields(card_list)
# Summarize monthly data
stats = {
'Event Attendance': 0,
'Customer Attendance': 0,
}
# Event Attendance
event_attendance_cards = \
cards_by_label.get(LABEL_CONFERENCE_TALK, []) +\
cards_by_label.get(LABEL_CONFERENCE_WORKSHOP, []) +\
cards_by_label.get(LABEL_LIVE_STREAM, [])
for card in event_attendance_cards:
if card.attendees:
stats['Event Attendance'] += card.attendees
# Customer Attendance
for card in cards_by_label.get(LABEL_CUSTOMER, []):
if card.attendees:
stats['Customer Attendance'] += card.attendees
return cards_by_label, trello_list.name, stats
def customer_attendees(self):
labels = (LABEL_CUSTOMER, )
return self._process_attendees_list(labels)
def all_attendees(self):
labels = (LABEL_CONFERENCE_TALK, LABEL_CONFERENCE_WORKSHOP, LABEL_CUSTOMER, LABEL_LIVE_STREAM)
return self._process_attendees_list(labels)
def _process_attendees_list(self, labels):
month_cards = {}
month_data = {}
for month_list_id in self.highlights_list_ids:
# Parse month name out of the list name
month_list_name = self.archive_lists_by_id[month_list_id].name
month_name = month_list_name.split(' ')[2]
# Initialize the month aggregate data
month_data[month_name] = {
'attendees': 0
}
# Get the relevant cards for the month
month_by_labels = self._list_label_filter([month_list_id], labels, label_cards=self.archive_cards_by_label)
all_cards_for_month = []
for cards in month_by_labels.values():
all_cards_for_month += cards
# For each card, pull up the type information for simplicity
add_card_types(all_cards_for_month, labels)
# For each card, pull the attendees up to the top level for simplicity
for c in all_cards_for_month:
# Figure out the event attendance
c.attendees = 0 # default in case we don't have these values
if len(c.custom_fields) > 0:
for field in c.custom_fields:
if field.name == 'Attendees':
c.attendees = int(field.value)
# Increment the monthly count
month_data[month_name]['attendees'] += c.attendees
# Store the results
month_cards[month_name] = all_cards_for_month
return month_cards, month_data
def _list_label_filter(self, id_list, label_list, label_cards=None):
label_cards = label_cards or self.cards_by_label
filtered = {}
for label, card_list in label_cards.items():
if label not in label_list:
continue
filtered[label] = []
for card in card_list:
if card.list_id in id_list:
filtered[label].append(card)
# Remove the label if there were no matching cards found
if len(filtered[label]) == 0:
filtered.pop(label)
return filtered
def sort_cards_by_due(card):
""" Sorting key function for sorting a list of cards by their due date. """
if card.due:
return card.due
else:
return ''
def sort_cards_by_type(card):
""" Sorting key function for card types (as added by add_card_types) """
if len(card.types) > 0:
return card.types[0]
else:
return ''
def add_card_types(card_list, accepted_labels):
"""
Adds a new field named "type" to each card in the given list. The type will be a list
of all label names in that card that appear in the list of provided acceptable labels.
If the card has no labels or none match, the type field will be an empty list.
"""
for c in card_list:
card_types = []
if c.labels:
card_types = [l.name for l in c.labels if l.name in accepted_labels]
c.types = card_types
def pull_up_custom_fields(card_list):
"""
For each of the custom fields we use, pull them up to the card level for simplicity.
"""
for c in card_list:
# Establish defaults
c.attendees = None
c.content_url = None
if len(c.custom_fields) > 0:
for field in c.custom_fields:
if field.name == 'Attendees':
c.attendees = int(field.value)
elif field.name == 'URL':
c.content_url = field.value
| []
| []
| [
"TRELLO_API_KEY",
"TRELLO_BOARD",
"TRELLO_API_SECRET",
"TRELLO_TOKEN",
"HIGHLIGHTS_BOARD"
]
| [] | ["TRELLO_API_KEY", "TRELLO_BOARD", "TRELLO_API_SECRET", "TRELLO_TOKEN", "HIGHLIGHTS_BOARD"] | python | 5 | 0 | |
lib/ibmq_login.py | import os
from qiskit import IBMQ
def login():
try:
token = os.environ['IBMQ_TOKEN']
except:
print("IBMQ_TOKEN environment variable not found. You must set this variable to the account token string.")
return False
print("Connecting to IBMQ account...")
try:
IBMQ.enable_account(token)
except:
print("Unable to access IBMQ account")
return False
return True
| []
| []
| [
"IBMQ_TOKEN"
]
| [] | ["IBMQ_TOKEN"] | python | 1 | 0 | |
kube/kube.go | package kube
import (
"context"
"log"
"os"
"path/filepath"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
type KubeCLient struct{}
// create a Kubernetes clientset for cluster auth
func (k *KubeCLient) CreateClientSet() *kubernetes.Clientset {
// When running the binary inside of a pod in a cluster,
// the kubelet will automatically mount a service account into the container at:
// /var/run/secrets/kubernetes.io/serviceaccount.
// It replaces the kubeconfig file and is turned into a rest.Config via the rest.InClusterConfig() method
config, err := rest.InClusterConfig()
if err != nil {
// fallback to kubeconfig
kubeconfig := filepath.Join("~", ".kube", "config")
if envvar := os.Getenv("K8S_KUBECONFIG"); len(envvar) > 0 {
kubeconfig = envvar
}
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
log.Printf("The kubeconfig cannot be loaded: %v\n", err)
os.Exit(1)
}
}
// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
log.Printf("Error: %v\n", err)
}
return clientset
}
// get specific kubernetes secret based on the secret name
func (k *KubeCLient) GetSecret(c *kubernetes.Clientset, namespace, secretname string) *v1.Secret {
sec, err := c.CoreV1().Secrets(namespace).Get(context.TODO(), secretname, metav1.GetOptions{})
if err != nil {
log.Printf("\"%v\" Secret not found in \"%v\" Namespace\n",
secretname, namespace)
log.Printf("%v\n", err)
}
return sec
}
// get kubernetes secret data field
func (k *KubeCLient) GetSecretData(c *kubernetes.Clientset, namespace, secretname, datafield string) []byte {
sec, err := c.CoreV1().Secrets(namespace).Get(context.TODO(), secretname, metav1.GetOptions{})
if err != nil {
log.Printf("Data-field from \"%v\" Secret not found in \"%v\" Namespace\n",
secretname, namespace)
log.Printf("%v\n", err)
}
return sec.Data[datafield]
}
// update an specific kubernetes secret
func (k *KubeCLient) UpdateSecret(c *kubernetes.Clientset, namespace string, secret *v1.Secret) *v1.Secret {
sec, err := c.CoreV1().Secrets(namespace).Update(context.TODO(), secret, metav1.UpdateOptions{})
if err != nil {
log.Printf("Error Updating \"%v\" Secret in \"%v\" Namespace \n",
secret.Name, namespace)
log.Printf("%v\n", err)
}
log.Printf("Updated \"%v\" Secret in \"%v\" Namespace\n",
secret.Name, namespace)
return sec
}
// create a new kubernetes secret
func (k *KubeCLient) CreateSecret(c *kubernetes.Clientset, namespace string, secret *v1.Secret) *v1.Secret {
sec, err := c.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{})
if err != nil {
log.Printf("Error Creating \"%v\" Secret in \"%v\" Namespace \n",
secret.Name, namespace)
log.Printf("%v\n", err)
}
log.Printf("Created \"%v\" Secret in \"%v\" Namespace\n",
secret.Name, namespace)
return sec
}
// delete kubernetes secret
func (k *KubeCLient) DeleteSecret(c *kubernetes.Clientset, namespace string, secret *v1.Secret) {
deletePolicy := metav1.DeletePropagationForeground
err := c.CoreV1().Secrets(namespace).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{PropagationPolicy: &deletePolicy})
if err != nil {
log.Printf("Error Deleting \"%v\" in \"%v\" Namespace \n", secret.Name, namespace)
log.Printf("%v\n", err)
}
log.Printf("Deleted \"%v\" Secret in \"%v\" Namespace\n",
secret.Name, namespace)
}
// get specific kubernetes configmap based on the configmap name
func (k *KubeCLient) GetConfigmap(c *kubernetes.Clientset, namespace, cmname string) *v1.ConfigMap {
cm, err := c.CoreV1().ConfigMaps(namespace).Get(context.TODO(), cmname, metav1.GetOptions{})
if err != nil {
log.Printf("\"%v\" Configmap not found in \"%v\" Namespace\n",
cmname, namespace)
log.Printf("%v\n", err)
}
return cm
}
// get all namespace names and return in a slice of type string
func (k *KubeCLient) GetAllNamespaceNames(c *kubernetes.Clientset) []string {
var namespaces []string
ns, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
log.Printf("Error Getting Namespaces: %v\n", err)
}
for _, n := range ns.Items {
namespaces = append(namespaces, n.Name)
}
return namespaces
}
// get all pod names and return in a slice of type string
func (k *KubeCLient) GetAllPodNames(c *kubernetes.Clientset, namespace string) []string {
var pods []string
ns, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
log.Printf("Error Getting Pods: %v\n", err)
}
for _, p := range ns.Items {
pods = append(pods, p.Name)
}
return pods
}
// delete a specific kubernetes pod based on the podname
func (k *KubeCLient) DeletePod(c *kubernetes.Clientset, namespace, podname string) {
err := c.CoreV1().Pods(namespace).Delete(context.TODO(), podname, metav1.DeleteOptions{})
if err != nil {
log.Printf("Error Deleting \"%v\" Pod\n", err)
}
log.Printf("Deleted \"%v\" Pod in \"%v\" Namespace\n ",
podname, namespace)
}
| [
"\"K8S_KUBECONFIG\""
]
| []
| [
"K8S_KUBECONFIG"
]
| [] | ["K8S_KUBECONFIG"] | go | 1 | 0 | |
tests/providers/google/cloud/operators/test_video_intelligence_system.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
from airflow.providers.google.cloud.example_dags.example_video_intelligence import GCP_BUCKET_NAME
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_AI_KEY, GCP_GCS_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_VIDEO_SOURCE_URL = "https://www.sample-videos.com/video123/mp4/720/big_buck_bunny_720p_1mb.mp4"
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_AI_KEY)
class CloudVideoIntelligenceExampleDagsTest(GoogleSystemTest):
@provide_gcp_context(GCP_AI_KEY)
def setUp(self):
self.create_gcs_bucket(GCP_BUCKET_NAME, location="europe-north1")
self.execute_with_ctx(
cmd=["bash", "-c", f"curl {GCP_VIDEO_SOURCE_URL} | gsutil cp - gs://{GCP_BUCKET_NAME}/video.mp4"],
key=GCP_GCS_KEY,
)
super().setUp()
@provide_gcp_context(GCP_AI_KEY)
def tearDown(self):
self.delete_gcs_bucket(GCP_BUCKET_NAME)
super().tearDown()
@provide_gcp_context(GCP_AI_KEY)
def test_example_dag(self):
self.run_dag('example_gcp_video_intelligence', CLOUD_DAG_FOLDER)
| []
| []
| [
"GCP_PROJECT_ID"
]
| [] | ["GCP_PROJECT_ID"] | python | 1 | 0 | |
test/compare/testdata/misc/struct_cmp.dir/main.go | package main
import "fmt"
import "struct_cmp.dir/b"
var A1 = struct{ F int }{}
var A2 = struct{ f int }{}
var A3 = struct {
F int `k:"v"`
}{}
var A4 = struct {
F int `k:"v2"`
}{}
func main() {
fmt.Println(A1)
fmt.Println(b.B1)
fmt.Println(A1 == b.B1)
// This comparison cannot be done (type checking error): types are
// different. This is the expected behavior.
//
// fmt.Println(A2 == b.B2)
i1 := interface{}(A2)
i2 := interface{}(b.B2)
fmt.Println(i1 == i2)
fmt.Println(A3)
fmt.Println(b.B3)
fmt.Println(A3 == b.B3)
// This comparison cannot be done (type checking error): types are
// different. This is the expected behavior.
//
// fmt.Println(A4 == b.B3)
i3 := interface{}(A1)
i4 := interface{}(b.B3)
fmt.Println(i3 == i4)
}
| []
| []
| []
| [] | [] | go | null | null | null |
compiler/tests/06_hierarchical_predecode2x4_test.py | #!/usr/bin/env python3
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import unittest
from testutils import *
import sys, os
sys.path.append(os.getenv("OPENRAM_HOME"))
import globals
from globals import OPTS
from sram_factory import factory
import debug
class hierarchical_predecode2x4_test(openram_test):
def runTest(self):
config_file = "{}/tests/configs/config".format(os.getenv("OPENRAM_HOME"))
globals.init_openram(config_file)
debug.info(1, "Testing sample for hierarchy_predecode2x4")
a = factory.create(module_type="hierarchical_predecode2x4")
self.local_check(a)
globals.end_openram()
# run the test from the command line
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main(testRunner=debugTestRunner())
| []
| []
| [
"OPENRAM_HOME"
]
| [] | ["OPENRAM_HOME"] | python | 1 | 0 | |
core/loadbalancer/loadbalancer_test.go | package loadbalancer_test
// Forked from github.com/micro/go-micro
// Some parts of this file have been modified to make it functional in this package
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/go-chassis/go-chassis/core/common"
"github.com/go-chassis/go-chassis/core/config"
"github.com/go-chassis/go-chassis/core/lager"
"github.com/go-chassis/go-chassis/core/loadbalancer"
"github.com/go-chassis/go-chassis/core/registry"
_ "github.com/go-chassis/go-chassis/core/registry/servicecenter"
"github.com/go-chassis/go-chassis/pkg/runtime"
"github.com/go-chassis/go-chassis/pkg/util/tags"
"github.com/stretchr/testify/assert"
)
func TestEnable(t *testing.T) {
p := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", filepath.Join(p, "src", "github.com", "go-chassis", "go-chassis", "examples", "discovery", "client"))
t.Log(os.Getenv("CHASSIS_HOME"))
lager.Initialize("", "INFO", "", "size", true, 1, 10, 7)
config.Init()
LBstr := make(map[string]string)
LBstr["name"] = "RoundRobin"
config.GetLoadBalancing().Strategy = LBstr
loadbalancer.Enable()
assert.Equal(t, "RoundRobin", config.GetLoadBalancing().Strategy["name"])
LBstr["name"] = ""
config.GetLoadBalancing().Strategy = LBstr
loadbalancer.Enable()
assert.Equal(t, "", config.GetLoadBalancing().Strategy["name"])
LBstr["name"] = "ABC"
config.GetLoadBalancing().Strategy = LBstr
loadbalancer.Enable()
assert.Equal(t, "ABC", config.GetLoadBalancing().Strategy["name"])
}
func TestBuildStrategy(t *testing.T) {
t.Log("testing default selector")
testData1 := []*registry.MicroService{
{
ServiceName: "test1",
AppID: "default",
Level: "FRONT",
Version: "1.0",
Status: "UP",
},
}
testData2 := []*registry.MicroServiceInstance{
{
InstanceID: "01",
HostName: "test1",
Status: "UP",
EndpointsMap: map[string]string{"rest": "127.0.0.1", "highway": "10.0.0.3:8080"},
},
{
InstanceID: "02",
HostName: "test2",
Status: "UP",
EndpointsMap: map[string]string{"highway": "10.0.0.3:8080"},
},
}
p := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", filepath.Join(p, "src", "github.com", "go-chassis", "go-chassis", "examples", "discovery", "server"))
lager.Initialize("", "INFO", "", "size", true, 1, 10, 7)
config.Init()
registry.Enable()
registry.DoRegister()
t.Log("System init finished")
sid, _, err := registry.DefaultRegistrator.RegisterServiceAndInstance(testData1[0], testData2[0])
assert.NoError(t, err)
_, _, err = registry.DefaultRegistrator.RegisterServiceAndInstance(testData1[0], testData2[1])
assert.NoError(t, err)
loadbalancer.Enable()
registry.Enable()
registry.DoRegister()
runtime.ServiceID = sid
t.Log(runtime.ServiceID)
time.Sleep(1 * time.Second)
s, err := loadbalancer.BuildStrategy(sid, "test1", "", common.LatestVersion, nil, nil, utiltags.Tags{})
assert.NoError(t, err)
ins, err := s.Pick()
t.Log(ins.EndpointsMap)
assert.NoError(t, err)
ins, err = s.Pick()
assert.NoError(t, err)
t.Log(ins.EndpointsMap)
s, err = loadbalancer.BuildStrategy(sid, "fake", "", "0.1", nil, nil, utiltags.Tags{})
assert.Error(t, err)
t.Log(err)
switch err.(type) {
case loadbalancer.LBError:
default:
t.Log("Should return lb err")
t.Fail()
}
}
func BenchmarkDefaultSelector_Select(b *testing.B) {
p := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", filepath.Join(p, "src", "github.com", "go-chassis", "go-chassis", "examples", "discovery", "client"))
lager.Initialize("", "INFO", "", "size", true, 1, 10, 7)
config.Init()
registry.Enable()
registry.DoRegister()
loadbalancer.Enable()
testData1 := []*registry.MicroService{
{
ServiceName: "test2",
AppID: "default",
Level: "FRONT",
Version: "1.0",
Status: "UP",
},
}
testData2 := []*registry.MicroServiceInstance{
{
HostName: "test1",
Status: "UP",
EndpointsMap: map[string]string{"highway": "10.0.0.4:1234"},
},
{
HostName: "test2",
Status: "UP",
EndpointsMap: map[string]string{"highway": "10.0.0.3:1234"},
},
}
_, _, _ = registry.DefaultRegistrator.RegisterServiceAndInstance(testData1[0], testData2[0])
_, _, _ = registry.DefaultRegistrator.RegisterServiceAndInstance(testData1[0], testData2[1])
time.Sleep(1 * time.Second)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = loadbalancer.BuildStrategy(runtime.ServiceID, "test2", "", "1.0", nil, nil, utiltags.Tags{})
}
}
| [
"\"GOPATH\"",
"\"CHASSIS_HOME\"",
"\"GOPATH\"",
"\"GOPATH\""
]
| []
| [
"GOPATH",
"CHASSIS_HOME"
]
| [] | ["GOPATH", "CHASSIS_HOME"] | go | 2 | 0 | |
src/main/java/com/wesleyhome/stats/feed/request/api/credentials/EnvironmentCredentials.java | package com.wesleyhome.stats.feed.request.api.credentials;
import com.wesleyhome.stats.feed.request.api.ApiCredentials;
public class EnvironmentCredentials implements ApiCredentials {
@Override
public String getApiToken() {
return System.getenv("MYSPORTSFEED_API_TOKEN");
}
@Override
public String getPassword() {
return System.getenv("MYSPORTSFEED_API_PASSWORD");
}
}
| [
"\"MYSPORTSFEED_API_TOKEN\"",
"\"MYSPORTSFEED_API_PASSWORD\""
]
| []
| [
"MYSPORTSFEED_API_TOKEN",
"MYSPORTSFEED_API_PASSWORD"
]
| [] | ["MYSPORTSFEED_API_TOKEN", "MYSPORTSFEED_API_PASSWORD"] | java | 2 | 0 | |
config/config.go | // Copyright (c) 2014-2019 Ludovic Fauvet
// Licensed under the MIT license
package config
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/opensourceways/mirrorbits/core"
"github.com/op/go-logging"
"gopkg.in/yaml.v2"
)
var (
// TEMPLATES_PATH is set at compile time
TEMPLATES_PATH = ""
)
var (
log = logging.MustGetLogger("main")
config *Configuration
configMutex sync.RWMutex
subscribers []chan bool
subscribersLock sync.RWMutex
)
func defaultConfig() Configuration {
return Configuration{
Repository: "",
Templates: TEMPLATES_PATH,
LocalJSPath: "",
OutputMode: "auto",
ListenAddress: ":8080",
Gzip: false,
RedisAddress: "127.0.0.1:6379",
RedisPassword: "",
RedisDB: 0,
LogDir: "",
TraceFileLocation: "",
GeoipDatabasePath: "/usr/share/GeoIP/",
ConcurrentSync: 5,
ScanInterval: 30,
CheckInterval: 1,
RepositoryScanInterval: 5,
MaxLinkHeaders: 10,
FixTimezoneOffsets: false,
Hashes: hashing{
SHA1: false,
SHA256: true,
MD5: false,
},
DisallowRedirects: false,
WeightDistributionRange: 1.5,
DisableOnMissingFile: false,
RPCListenAddress: "localhost:3390",
RPCPassword: "",
SchemaStrictMatch: true,
}
}
// Configuration contains all the option available in the yaml file
type Configuration struct {
Repository string `yaml:"Repository"`
Templates string `yaml:"Templates"`
LocalJSPath string `yaml:"LocalJSPath"`
OutputMode string `yaml:"OutputMode"`
ListenAddress string `yaml:"ListenAddress"`
Gzip bool `yaml:"Gzip"`
RedisAddress string `yaml:"RedisAddress"`
RedisPassword string `yaml:"RedisPassword"`
RedisDB int `yaml:"RedisDB"`
LogDir string `yaml:"LogDir"`
TraceFileLocation string `yaml:"TraceFileLocation"`
GeoipDatabasePath string `yaml:"GeoipDatabasePath"`
ConcurrentSync int `yaml:"ConcurrentSync"`
ScanInterval int `yaml:"ScanInterval"`
CheckInterval int `yaml:"CheckInterval"`
RepositoryScanInterval int `yaml:"RepositoryScanInterval"`
MaxLinkHeaders int `yaml:"MaxLinkHeaders"`
FixTimezoneOffsets bool `yaml:"FixTimezoneOffsets"`
Hashes hashing `yaml:"Hashes"`
DisallowRedirects bool `yaml:"DisallowRedirects"`
WeightDistributionRange float32 `yaml:"WeightDistributionRange"`
DisableOnMissingFile bool `yaml:"DisableOnMissingFile"`
Fallbacks []fallback `yaml:"Fallbacks"`
SchemaStrictMatch bool `yaml:"SchemaStrictMatch"`
RedisSentinelMasterName string `yaml:"RedisSentinelMasterName"`
RedisSentinels []sentinels `yaml:"RedisSentinels"`
RPCListenAddress string `yaml:"RPCListenAddress"`
RPCPassword string `yaml:"RPCPassword"`
}
type fallback struct {
URL string `yaml:"URL"`
CountryCode string `yaml:"CountryCode"`
ContinentCode string `yaml:"ContinentCode"`
}
type sentinels struct {
Host string `yaml:"Host"`
}
type hashing struct {
SHA1 bool `yaml:"SHA1"`
SHA256 bool `yaml:"SHA256"`
MD5 bool `yaml:"MD5"`
}
// LoadConfig loads the configuration file if it has not yet been loaded
func LoadConfig() {
if config != nil {
return
}
err := ReloadConfig()
if err != nil {
log.Fatal(err)
}
}
// ReloadConfig reloads the configuration file and update it globally
func ReloadConfig() error {
if core.ConfigFile == "" {
if fileExists("/etc/mirrorbits.conf") {
core.ConfigFile = "/etc/mirrorbits.conf"
}
}
content, err := ioutil.ReadFile(core.ConfigFile)
if err != nil {
fmt.Println("Configuration could not be found.\n\tUse -config <path>")
os.Exit(1)
}
if os.Getenv("DEBUG") != "" {
fmt.Println("Reading configuration from", core.ConfigFile)
}
c := defaultConfig()
// Overload the default configuration with the user's one
err = yaml.Unmarshal(content, &c)
if err != nil {
return fmt.Errorf("%s in %s", err, core.ConfigFile)
}
// Sanitize
if c.WeightDistributionRange <= 0 {
return fmt.Errorf("WeightDistributionRange must be > 0")
}
if !isInSlice(c.OutputMode, []string{"auto", "json", "redirect"}) {
return fmt.Errorf("Config: outputMode can only be set to 'auto', 'json' or 'redirect'")
}
if c.Repository == "" {
return fmt.Errorf("Path to local repository not configured (see mirrorbits.conf)")
}
c.Repository, err = filepath.Abs(c.Repository)
if err != nil {
return fmt.Errorf("Invalid local repository path: %s", err)
}
if c.RepositoryScanInterval < 0 {
c.RepositoryScanInterval = 0
}
if config != nil &&
(c.RedisAddress != config.RedisAddress ||
c.RedisPassword != config.RedisPassword ||
!testSentinelsEq(c.RedisSentinels, config.RedisSentinels)) {
// TODO reload redis connections
// Currently established connections will be updated only in case of disconnection
}
//if config != nil &&
// (c.RedisAddress != GetRedisAddress() ||
// c.RedisPassword != GetRedisPwd() ||
// !testSentinelsEq(c.RedisSentinels, config.RedisSentinels)) {
// // TODO reload redis connections
// // Currently established connections will be updated only in case of disconnection
//}
// Lock the pointer during the swap
configMutex.Lock()
config = &c
configMutex.Unlock()
// Notify all subscribers that the configuration has been reloaded
notifySubscribers()
return nil
}
// GetConfig returns a pointer to a configuration object
// FIXME reading from the pointer could cause a race!
func GetConfig() *Configuration {
configMutex.RLock()
defer configMutex.RUnlock()
if config == nil {
panic("Configuration not loaded")
}
return config
}
// SetConfiguration is only used for testing purpose
func SetConfiguration(c *Configuration) {
config = c
}
// SubscribeConfig allows subscribers to get notified when
// the configuration is updated.
func SubscribeConfig(subscriber chan bool) {
subscribersLock.Lock()
defer subscribersLock.Unlock()
subscribers = append(subscribers, subscriber)
}
func notifySubscribers() {
subscribersLock.RLock()
defer subscribersLock.RUnlock()
for _, subscriber := range subscribers {
select {
case subscriber <- true:
default:
// Don't block if the subscriber is unavailable
// and discard the message.
}
}
}
func fileExists(filename string) bool {
_, err := os.Stat(filename)
return err == nil
}
func testSentinelsEq(a, b []sentinels) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i].Host != b[i].Host {
return false
}
}
return true
}
//DUPLICATE
func isInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
func GetRedisAddress() string {
return os.Getenv("REDIS_ADDRESS_PORT")
}
func GetRedisPwd() string {
return os.Getenv("REDIS_PWD")
} | [
"\"DEBUG\"",
"\"REDIS_ADDRESS_PORT\"",
"\"REDIS_PWD\""
]
| []
| [
"REDIS_PWD",
"REDIS_ADDRESS_PORT",
"DEBUG"
]
| [] | ["REDIS_PWD", "REDIS_ADDRESS_PORT", "DEBUG"] | go | 3 | 0 | |
sqlx_test.go | // The following environment variables, if set, will be used:
//
// * SQLX_SQLITE_DSN
// * SQLX_POSTGRES_DSN
// * SQLX_MYSQL_DSN
//
// Set any of these variables to 'skip' to skip them. Note that for MySQL,
// the string '?parseTime=True' will be appended to the DSN if it's not there
// already.
//
package sqlx
import (
"database/sql"
"database/sql/driver"
"encoding/json"
"fmt"
"log"
"os"
"reflect"
"strings"
"testing"
"time"
"github.com/Blank-Xu/sqlx/reflectx"
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
)
/* compile time checks that Db, Tx, Stmt (qStmt) implement expected interfaces */
var _, _ Ext = &DB{}, &Tx{}
var _, _ ColScanner = &Row{}, &Rows{}
var _ Queryer = &qStmt{}
var _ Execer = &qStmt{}
var TestPostgres = true
var TestSqlite = true
var TestMysql = true
var sldb *DB
var pgdb *DB
var mysqldb *DB
func init() {
ConnectAll()
}
func ConnectAll() {
var err error
pgdsn := os.Getenv("SQLX_POSTGRES_DSN")
mydsn := os.Getenv("SQLX_MYSQL_DSN")
sqdsn := os.Getenv("SQLX_SQLITE_DSN")
TestPostgres = pgdsn != "skip"
TestMysql = mydsn != "skip"
TestSqlite = sqdsn != "skip"
if !strings.Contains(mydsn, "parseTime=true") {
mydsn += "?parseTime=true"
}
if TestPostgres {
pgdb, err = Connect("postgres", pgdsn)
if err != nil {
fmt.Printf("Disabling PG tests:\n %v\n", err)
TestPostgres = false
}
} else {
fmt.Println("Disabling Postgres tests.")
}
if TestMysql {
mysqldb, err = Connect("mysql", mydsn)
if err != nil {
fmt.Printf("Disabling MySQL tests:\n %v", err)
TestMysql = false
}
} else {
fmt.Println("Disabling MySQL tests.")
}
if TestSqlite {
sldb, err = Connect("sqlite3", sqdsn)
if err != nil {
fmt.Printf("Disabling SQLite:\n %v", err)
TestSqlite = false
}
} else {
fmt.Println("Disabling SQLite tests.")
}
}
type Schema struct {
create string
drop string
}
func (s Schema) Postgres() (string, string) {
return s.create, s.drop
}
func (s Schema) MySQL() (string, string) {
return strings.Replace(s.create, `"`, "`", -1), s.drop
}
func (s Schema) Sqlite3() (string, string) {
return strings.Replace(s.create, `now()`, `CURRENT_TIMESTAMP`, -1), s.drop
}
var defaultSchema = Schema{
create: `
CREATE TABLE person (
first_name text,
last_name text,
email text,
added_at timestamp default now()
);
CREATE TABLE place (
country text,
city text NULL,
telcode integer
);
CREATE TABLE capplace (
"COUNTRY" text,
"CITY" text NULL,
"TELCODE" integer
);
CREATE TABLE nullperson (
first_name text NULL,
last_name text NULL,
email text NULL
);
CREATE TABLE employees (
name text,
id integer,
boss_id integer
);
`,
drop: `
drop table person;
drop table place;
drop table capplace;
drop table nullperson;
drop table employees;
`,
}
type Person struct {
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
Email string
AddedAt time.Time `db:"added_at"`
}
type Place struct {
Country string
City sql.NullString
TelCode int
}
type PlacePtr struct {
Country string
City *string
TelCode int
}
type PersonPlace struct {
Person
Place
}
type PersonPlacePtr struct {
*Person
*Place
}
type EmbedConflict struct {
FirstName string `db:"first_name"`
Person
}
type SliceMember struct {
Country string
City sql.NullString
TelCode int
People []Person `db:"-"`
Addresses []Place `db:"-"`
}
// Note that because of field map caching, we need a new type here
// if we've used Place already somewhere in sqlx
type CPlace Place
func MultiExec(e Execer, query string) {
stmts := strings.Split(query, ";\n")
if len(strings.Trim(stmts[len(stmts)-1], " \n\t\r")) == 0 {
stmts = stmts[:len(stmts)-1]
}
for _, s := range stmts {
_, err := e.Exec(s)
if err != nil {
fmt.Println(err, s)
}
}
}
func RunWithSchema(schema Schema, t *testing.T, test func(db *DB, t *testing.T)) {
runner := func(db *DB, t *testing.T, create, drop string) {
defer func() {
MultiExec(db, drop)
}()
MultiExec(db, create)
test(db, t)
}
if TestPostgres {
create, drop := schema.Postgres()
runner(pgdb, t, create, drop)
}
if TestSqlite {
create, drop := schema.Sqlite3()
runner(sldb, t, create, drop)
}
if TestMysql {
create, drop := schema.MySQL()
runner(mysqldb, t, create, drop)
}
}
func loadDefaultFixture(db *DB, t *testing.T) {
tx := db.MustBegin()
tx.MustExec(tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "Jason", "Moiron", "[email protected]")
tx.MustExec(tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "John", "Doe", "[email protected]")
tx.MustExec(tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1")
tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852")
tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65")
if db.DriverName() == "mysql" {
tx.MustExec(tx.Rebind("INSERT INTO capplace (`COUNTRY`, `TELCODE`) VALUES (?, ?)"), "Sarf Efrica", "27")
} else {
tx.MustExec(tx.Rebind("INSERT INTO capplace (\"COUNTRY\", \"TELCODE\") VALUES (?, ?)"), "Sarf Efrica", "27")
}
tx.MustExec(tx.Rebind("INSERT INTO employees (name, id) VALUES (?, ?)"), "Peter", "4444")
tx.MustExec(tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Joe", "1", "4444")
tx.MustExec(tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Martin", "2", "4444")
_ = tx.Commit()
}
// Test a new backwards compatible feature, that missing scan destinations
// will silently scan into sql.RawText rather than failing/panicing
func TestMissingNames(t *testing.T) {
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
loadDefaultFixture(db, t)
type PersonPlus struct {
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
Email string
//AddedAt time.Time `db:"added_at"`
}
// test Select first
pps := []PersonPlus{}
// pps lacks added_at destination
err := db.Select(&pps, "SELECT * FROM person")
if err == nil {
t.Error("Expected missing name from Select to fail, but it did not.")
}
// test Get
pp := PersonPlus{}
err = db.Get(&pp, "SELECT * FROM person LIMIT 1")
if err == nil {
t.Error("Expected missing name Get to fail, but it did not.")
}
// test naked StructScan
pps = []PersonPlus{}
rows, err := db.Query("SELECT * FROM person LIMIT 1")
if err != nil {
t.Fatal(err)
}
rows.Next()
err = StructScan(rows, &pps)
if err == nil {
t.Error("Expected missing name in StructScan to fail, but it did not.")
}
rows.Close()
// now try various things with unsafe set.
db = db.Unsafe()
pps = []PersonPlus{}
err = db.Select(&pps, "SELECT * FROM person")
if err != nil {
t.Error(err)
}
// test Get
pp = PersonPlus{}
err = db.Get(&pp, "SELECT * FROM person LIMIT 1")
if err != nil {
t.Error(err)
}
// test naked StructScan
pps = []PersonPlus{}
rowsx, err := db.Queryx("SELECT * FROM person LIMIT 1")
if err != nil {
t.Fatal(err)
}
rowsx.Next()
err = StructScan(rowsx, &pps)
if err != nil {
t.Error(err)
}
rowsx.Close()
// test Named stmt
if !isUnsafe(db) {
t.Error("Expected db to be unsafe, but it isn't")
}
nstmt, err := db.PrepareNamed(`SELECT * FROM person WHERE first_name != :name`)
if err != nil {
t.Fatal(err)
}
// its internal stmt should be marked unsafe
if !nstmt.Stmt.unsafe {
t.Error("expected NamedStmt to be unsafe but its underlying stmt did not inherit safety")
}
pps = []PersonPlus{}
err = nstmt.Select(&pps, map[string]interface{}{"name": "Jason"})
if err != nil {
t.Fatal(err)
}
if len(pps) != 1 {
t.Errorf("Expected 1 person back, got %d", len(pps))
}
// test it with a safe db
db.unsafe = false
if isUnsafe(db) {
t.Error("expected db to be safe but it isn't")
}
nstmt, err = db.PrepareNamed(`SELECT * FROM person WHERE first_name != :name`)
if err != nil {
t.Fatal(err)
}
// it should be safe
if isUnsafe(nstmt) {
t.Error("NamedStmt did not inherit safety")
}
nstmt.Unsafe()
if !isUnsafe(nstmt) {
t.Error("expected newly unsafed NamedStmt to be unsafe")
}
pps = []PersonPlus{}
err = nstmt.Select(&pps, map[string]interface{}{"name": "Jason"})
if err != nil {
t.Fatal(err)
}
if len(pps) != 1 {
t.Errorf("Expected 1 person back, got %d", len(pps))
}
})
}
func TestEmbeddedStructs(t *testing.T) {
type Loop1 struct{ Person }
type Loop2 struct{ Loop1 }
type Loop3 struct{ Loop2 }
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
loadDefaultFixture(db, t)
peopleAndPlaces := []PersonPlace{}
err := db.Select(
&peopleAndPlaces,
`SELECT person.*, place.* FROM
person natural join place`)
if err != nil {
t.Fatal(err)
}
for _, pp := range peopleAndPlaces {
if len(pp.Person.FirstName) == 0 {
t.Errorf("Expected non zero lengthed first name.")
}
if len(pp.Place.Country) == 0 {
t.Errorf("Expected non zero lengthed country.")
}
}
// test embedded structs with StructScan
rows, err := db.Queryx(
`SELECT person.*, place.* FROM
person natural join place`)
if err != nil {
t.Error(err)
}
perp := PersonPlace{}
rows.Next()
err = rows.StructScan(&perp)
if err != nil {
t.Error(err)
}
if len(perp.Person.FirstName) == 0 {
t.Errorf("Expected non zero lengthed first name.")
}
if len(perp.Place.Country) == 0 {
t.Errorf("Expected non zero lengthed country.")
}
rows.Close()
// test the same for embedded pointer structs
peopleAndPlacesPtrs := []PersonPlacePtr{}
err = db.Select(
&peopleAndPlacesPtrs,
`SELECT person.*, place.* FROM
person natural join place`)
if err != nil {
t.Fatal(err)
}
for _, pp := range peopleAndPlacesPtrs {
if len(pp.Person.FirstName) == 0 {
t.Errorf("Expected non zero lengthed first name.")
}
if len(pp.Place.Country) == 0 {
t.Errorf("Expected non zero lengthed country.")
}
}
// test "deep nesting"
l3s := []Loop3{}
err = db.Select(&l3s, `select * from person`)
if err != nil {
t.Fatal(err)
}
for _, l3 := range l3s {
if len(l3.Loop2.Loop1.Person.FirstName) == 0 {
t.Errorf("Expected non zero lengthed first name.")
}
}
// test "embed conflicts"
ec := []EmbedConflict{}
err = db.Select(&ec, `select * from person`)
// I'm torn between erroring here or having some kind of working behavior
// in order to allow for more flexibility in destination structs
if err != nil {
t.Errorf("Was not expecting an error on embed conflicts.")
}
})
}
func TestJoinQuery(t *testing.T) {
type Employee struct {
Name string
ID int64
// BossID is an id into the employee table
BossID sql.NullInt64 `db:"boss_id"`
}
type Boss Employee
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
loadDefaultFixture(db, t)
var employees []struct {
Employee
Boss `db:"boss"`
}
err := db.Select(
&employees,
`SELECT employees.*, boss.id "boss.id", boss.name "boss.name" FROM employees
JOIN employees AS boss ON employees.boss_id = boss.id`)
if err != nil {
t.Fatal(err)
}
for _, em := range employees {
if len(em.Employee.Name) == 0 {
t.Errorf("Expected non zero lengthed name.")
}
if em.Employee.BossID.Int64 != em.Boss.ID {
t.Errorf("Expected boss ids to match")
}
}
})
}
func TestJoinQueryNamedPointerStructs(t *testing.T) {
type Employee struct {
Name string
ID int64
// BossID is an id into the employee table
BossID sql.NullInt64 `db:"boss_id"`
}
type Boss Employee
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
loadDefaultFixture(db, t)
var employees []struct {
Emp1 *Employee `db:"emp1"`
Emp2 *Employee `db:"emp2"`
*Boss `db:"boss"`
}
err := db.Select(
&employees,
`SELECT emp.name "emp1.name", emp.id "emp1.id", emp.boss_id "emp1.boss_id",
emp.name "emp2.name", emp.id "emp2.id", emp.boss_id "emp2.boss_id",
boss.id "boss.id", boss.name "boss.name" FROM employees AS emp
JOIN employees AS boss ON emp.boss_id = boss.id
`)
if err != nil {
t.Fatal(err)
}
for _, em := range employees {
if len(em.Emp1.Name) == 0 || len(em.Emp2.Name) == 0 {
t.Errorf("Expected non zero lengthed name.")
}
if em.Emp1.BossID.Int64 != em.Boss.ID || em.Emp2.BossID.Int64 != em.Boss.ID {
t.Errorf("Expected boss ids to match")
}
}
})
}
func TestSelectSliceMapTime(t *testing.T) {
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
loadDefaultFixture(db, t)
rows, err := db.Queryx("SELECT * FROM person")
if err != nil {
t.Fatal(err)
}
for rows.Next() {
_, err := rows.SliceScan()
if err != nil {
t.Error(err)
}
}
rows, err = db.Queryx("SELECT * FROM person")
if err != nil {
t.Fatal(err)
}
for rows.Next() {
m := map[string]interface{}{}
err := rows.MapScan(m)
if err != nil {
t.Error(err)
}
}
})
}
func TestNilReceiver(t *testing.T) {
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
loadDefaultFixture(db, t)
var p *Person
err := db.Get(p, "SELECT * FROM person LIMIT 1")
if err == nil {
t.Error("Expected error when getting into nil struct ptr.")
}
var pp *[]Person
err = db.Select(pp, "SELECT * FROM person")
if err == nil {
t.Error("Expected an error when selecting into nil slice ptr.")
}
})
}
func TestNamedQuery(t *testing.T) {
var schema = Schema{
create: `
CREATE TABLE place (
id integer PRIMARY KEY,
name text NULL
);
CREATE TABLE person (
first_name text NULL,
last_name text NULL,
email text NULL
);
CREATE TABLE placeperson (
first_name text NULL,
last_name text NULL,
email text NULL,
place_id integer NULL
);
CREATE TABLE jsperson (
"FIRST" text NULL,
last_name text NULL,
"EMAIL" text NULL
);`,
drop: `
drop table person;
drop table jsperson;
drop table place;
drop table placeperson;
`,
}
RunWithSchema(schema, t, func(db *DB, t *testing.T) {
type Person struct {
FirstName sql.NullString `db:"first_name"`
LastName sql.NullString `db:"last_name"`
Email sql.NullString
}
p := Person{
FirstName: sql.NullString{String: "ben", Valid: true},
LastName: sql.NullString{String: "doe", Valid: true},
Email: sql.NullString{String: "[email protected]", Valid: true},
}
q1 := `INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)`
_, err := db.NamedExec(q1, p)
if err != nil {
log.Fatal(err)
}
p2 := &Person{}
rows, err := db.NamedQuery("SELECT * FROM person WHERE first_name=:first_name", p)
if err != nil {
log.Fatal(err)
}
for rows.Next() {
err = rows.StructScan(p2)
if err != nil {
t.Error(err)
}
if p2.FirstName.String != "ben" {
t.Error("Expected first name of `ben`, got " + p2.FirstName.String)
}
if p2.LastName.String != "doe" {
t.Error("Expected first name of `doe`, got " + p2.LastName.String)
}
}
// these are tests for #73; they verify that named queries work if you've
// changed the db mapper. This code checks both NamedQuery "ad-hoc" style
// queries and NamedStmt queries, which use different code paths internally.
old := *db.Mapper
type JSONPerson struct {
FirstName sql.NullString `json:"FIRST"`
LastName sql.NullString `json:"last_name"`
Email sql.NullString
}
jp := JSONPerson{
FirstName: sql.NullString{String: "ben", Valid: true},
LastName: sql.NullString{String: "smith", Valid: true},
Email: sql.NullString{String: "[email protected]", Valid: true},
}
db.Mapper = reflectx.NewMapperFunc("json", strings.ToUpper)
// prepare queries for case sensitivity to test our ToUpper function.
// postgres and sqlite accept "", but mysql uses ``; since Go's multi-line
// strings are `` we use "" by default and swap out for MySQL
pdb := func(s string, db *DB) string {
if db.DriverName() == "mysql" {
return strings.Replace(s, `"`, "`", -1)
}
return s
}
q1 = `INSERT INTO jsperson ("FIRST", last_name, "EMAIL") VALUES (:FIRST, :last_name, :EMAIL)`
_, err = db.NamedExec(pdb(q1, db), jp)
if err != nil {
t.Fatal(err, db.DriverName())
}
// Checks that a person pulled out of the db matches the one we put in
check := func(t *testing.T, rows *Rows) {
jp = JSONPerson{}
for rows.Next() {
err = rows.StructScan(&jp)
if err != nil {
t.Error(err)
}
if jp.FirstName.String != "ben" {
t.Errorf("Expected first name of `ben`, got `%s` (%s) ", jp.FirstName.String, db.DriverName())
}
if jp.LastName.String != "smith" {
t.Errorf("Expected LastName of `smith`, got `%s` (%s)", jp.LastName.String, db.DriverName())
}
if jp.Email.String != "[email protected]" {
t.Errorf("Expected first name of `doe`, got `%s` (%s)", jp.Email.String, db.DriverName())
}
}
}
ns, err := db.PrepareNamed(pdb(`
SELECT * FROM jsperson
WHERE
"FIRST"=:FIRST AND
last_name=:last_name AND
"EMAIL"=:EMAIL
`, db))
if err != nil {
t.Fatal(err)
}
rows, err = ns.Queryx(jp)
if err != nil {
t.Fatal(err)
}
check(t, rows)
// Check exactly the same thing, but with db.NamedQuery, which does not go
// through the PrepareNamed/NamedStmt path.
rows, err = db.NamedQuery(pdb(`
SELECT * FROM jsperson
WHERE
"FIRST"=:FIRST AND
last_name=:last_name AND
"EMAIL"=:EMAIL
`, db), jp)
if err != nil {
t.Fatal(err)
}
check(t, rows)
db.Mapper = &old
// Test nested structs
type Place struct {
ID int `db:"id"`
Name sql.NullString `db:"name"`
}
type PlacePerson struct {
FirstName sql.NullString `db:"first_name"`
LastName sql.NullString `db:"last_name"`
Email sql.NullString
Place Place `db:"place"`
}
pl := Place{
Name: sql.NullString{String: "myplace", Valid: true},
}
pp := PlacePerson{
FirstName: sql.NullString{String: "ben", Valid: true},
LastName: sql.NullString{String: "doe", Valid: true},
Email: sql.NullString{String: "[email protected]", Valid: true},
}
q2 := `INSERT INTO place (id, name) VALUES (1, :name)`
_, err = db.NamedExec(q2, pl)
if err != nil {
log.Fatal(err)
}
id := 1
pp.Place.ID = id
q3 := `INSERT INTO placeperson (first_name, last_name, email, place_id) VALUES (:first_name, :last_name, :email, :place.id)`
_, err = db.NamedExec(q3, pp)
if err != nil {
log.Fatal(err)
}
pp2 := &PlacePerson{}
rows, err = db.NamedQuery(`
SELECT
first_name,
last_name,
email,
place.id AS "place.id",
place.name AS "place.name"
FROM placeperson
INNER JOIN place ON place.id = placeperson.place_id
WHERE
place.id=:place.id`, pp)
if err != nil {
log.Fatal(err)
}
for rows.Next() {
err = rows.StructScan(pp2)
if err != nil {
t.Error(err)
}
if pp2.FirstName.String != "ben" {
t.Error("Expected first name of `ben`, got " + pp2.FirstName.String)
}
if pp2.LastName.String != "doe" {
t.Error("Expected first name of `doe`, got " + pp2.LastName.String)
}
if pp2.Place.Name.String != "myplace" {
t.Error("Expected place name of `myplace`, got " + pp2.Place.Name.String)
}
if pp2.Place.ID != pp.Place.ID {
t.Errorf("Expected place name of %v, got %v", pp.Place.ID, pp2.Place.ID)
}
}
})
}
func TestNilInserts(t *testing.T) {
var schema = Schema{
create: `
CREATE TABLE tt (
id integer,
value text NULL DEFAULT NULL
);`,
drop: "drop table tt;",
}
RunWithSchema(schema, t, func(db *DB, t *testing.T) {
type TT struct {
ID int
Value *string
}
var v, v2 TT
r := db.Rebind
db.MustExec(r(`INSERT INTO tt (id) VALUES (1)`))
_ = db.Get(&v, r(`SELECT * FROM tt`))
if v.ID != 1 {
t.Errorf("Expecting id of 1, got %v", v.ID)
}
if v.Value != nil {
t.Errorf("Expecting NULL to map to nil, got %s", *v.Value)
}
v.ID = 2
// NOTE: this incidentally uncovered a bug which was that named queries with
// pointer destinations would not work if the passed value here was not addressable,
// as reflectx.FieldByIndexes attempts to allocate nil pointer receivers for
// writing. This was fixed by creating & using the reflectx.FieldByIndexesReadOnly
// function. This next line is important as it provides the only coverage for this.
_, _ = db.NamedExec(`INSERT INTO tt (id, value) VALUES (:id, :value)`, v)
_ = db.Get(&v2, r(`SELECT * FROM tt WHERE id=2`))
if v.ID != v2.ID {
t.Errorf("%v != %v", v.ID, v2.ID)
}
if v2.Value != nil {
t.Errorf("Expecting NULL to map to nil, got %s", *v.Value)
}
})
}
func TestScanError(t *testing.T) {
var schema = Schema{
create: `
CREATE TABLE kv (
k text,
v integer
);`,
drop: `drop table kv;`,
}
RunWithSchema(schema, t, func(db *DB, t *testing.T) {
type WrongTypes struct {
K int
V string
}
_, err := db.Exec(db.Rebind("INSERT INTO kv (k, v) VALUES (?, ?)"), "hi", 1)
if err != nil {
t.Error(err)
}
rows, err := db.Queryx("SELECT * FROM kv")
if err != nil {
t.Error(err)
}
for rows.Next() {
var wt WrongTypes
err := rows.StructScan(&wt)
if err == nil {
t.Errorf("%s: Scanning wrong types into keys should have errored.", db.DriverName())
}
}
})
}
// FIXME: this function is kinda big but it slows things down to be constantly
// loading and reloading the schema..
func TestUsage(t *testing.T) {
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
loadDefaultFixture(db, t)
slicemembers := []SliceMember{}
err := db.Select(&slicemembers, "SELECT * FROM place ORDER BY telcode ASC")
if err != nil {
t.Fatal(err)
}
people := []Person{}
err = db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
if err != nil {
t.Fatal(err)
}
jason, john := people[0], people[1]
if jason.FirstName != "Jason" {
t.Errorf("Expecting FirstName of Jason, got %s", jason.FirstName)
}
if jason.LastName != "Moiron" {
t.Errorf("Expecting LastName of Moiron, got %s", jason.LastName)
}
if jason.Email != "[email protected]" {
t.Errorf("Expecting Email of [email protected], got %s", jason.Email)
}
if john.FirstName != "John" || john.LastName != "Doe" || john.Email != "[email protected]" {
t.Errorf("John Doe's person record not what expected: Got %v\n", john)
}
jason = Person{}
err = db.Get(&jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Jason")
if err != nil {
t.Fatal(err)
}
if jason.FirstName != "Jason" {
t.Errorf("Expecting to get back Jason, but got %v\n", jason.FirstName)
}
err = db.Get(&jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Foobar")
if err == nil {
t.Errorf("Expecting an error, got nil\n")
}
if err != sql.ErrNoRows {
t.Errorf("Expected sql.ErrNoRows, got %v\n", err)
}
// The following tests check statement reuse, which was actually a problem
// due to copying being done when creating Stmt's which was eventually removed
stmt1, err := db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?"))
if err != nil {
t.Fatal(err)
}
jason = Person{}
row := stmt1.QueryRowx("DoesNotExist")
_ = row.Scan(&jason)
row = stmt1.QueryRowx("DoesNotExist")
_ = row.Scan(&jason)
err = stmt1.Get(&jason, "DoesNotExist User")
if err == nil {
t.Error("Expected an error")
}
err = stmt1.Get(&jason, "DoesNotExist User 2")
if err == nil {
t.Fatal(err)
}
stmt2, err := db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?"))
if err != nil {
t.Fatal(err)
}
jason = Person{}
tx, err := db.Beginx()
if err != nil {
t.Fatal(err)
}
tstmt2 := tx.Stmtx(stmt2)
row2 := tstmt2.QueryRowx("Jason")
err = row2.StructScan(&jason)
if err != nil {
t.Error(err)
}
_ = tx.Commit()
places := []*Place{}
err = db.Select(&places, "SELECT telcode FROM place ORDER BY telcode ASC")
if err != nil {
t.Fatal(err)
}
usa, singsing, honkers := places[0], places[1], places[2]
if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 {
t.Errorf("Expected integer telcodes to work, got %#v", places)
}
placesptr := []PlacePtr{}
err = db.Select(&placesptr, "SELECT * FROM place ORDER BY telcode ASC")
if err != nil {
t.Error(err)
}
//fmt.Printf("%#v\n%#v\n%#v\n", placesptr[0], placesptr[1], placesptr[2])
// if you have null fields and use SELECT *, you must use sql.Null* in your struct
// this test also verifies that you can use either a []Struct{} or a []*Struct{}
places2 := []Place{}
err = db.Select(&places2, "SELECT * FROM place ORDER BY telcode ASC")
if err != nil {
t.Fatal(err)
}
usa, singsing, honkers = &places2[0], &places2[1], &places2[2]
// this should return a type error that &p is not a pointer to a struct slice
p := Place{}
err = db.Select(&p, "SELECT * FROM place ORDER BY telcode ASC")
if err == nil {
t.Errorf("Expected an error, argument to select should be a pointer to a struct slice")
}
// this should be an error
pl := []Place{}
err = db.Select(pl, "SELECT * FROM place ORDER BY telcode ASC")
if err == nil {
t.Errorf("Expected an error, argument to select should be a pointer to a struct slice, not a slice.")
}
if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 {
t.Errorf("Expected integer telcodes to work, got %#v", places)
}
stmt, err := db.Preparex(db.Rebind("SELECT country, telcode FROM place WHERE telcode > ? ORDER BY telcode ASC"))
if err != nil {
t.Error(err)
}
places = []*Place{}
err = stmt.Select(&places, 10)
if len(places) != 2 {
t.Error("Expected 2 places, got 0.")
}
if err != nil {
t.Fatal(err)
}
singsing, honkers = places[0], places[1]
if singsing.TelCode != 65 || honkers.TelCode != 852 {
t.Errorf("Expected the right telcodes, got %#v", places)
}
rows, err := db.Queryx("SELECT * FROM place")
if err != nil {
t.Fatal(err)
}
place := Place{}
for rows.Next() {
err = rows.StructScan(&place)
if err != nil {
t.Fatal(err)
}
}
rows, err = db.Queryx("SELECT * FROM place")
if err != nil {
t.Fatal(err)
}
m := map[string]interface{}{}
for rows.Next() {
err = rows.MapScan(m)
if err != nil {
t.Fatal(err)
}
_, ok := m["country"]
if !ok {
t.Errorf("Expected key `country` in map but could not find it (%#v)\n", m)
}
}
rows, err = db.Queryx("SELECT * FROM place")
if err != nil {
t.Fatal(err)
}
for rows.Next() {
s, err := rows.SliceScan()
if err != nil {
t.Error(err)
}
if len(s) != 3 {
t.Errorf("Expected 3 columns in result, got %d\n", len(s))
}
}
// test advanced querying
// test that NamedExec works with a map as well as a struct
_, err = db.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first, :last, :email)", map[string]interface{}{
"first": "Bin",
"last": "Smuth",
"email": "[email protected]",
})
if err != nil {
t.Fatal(err)
}
// ensure that if the named param happens right at the end it still works
// ensure that NamedQuery works with a map[string]interface{}
rows, err = db.NamedQuery("SELECT * FROM person WHERE first_name=:first", map[string]interface{}{"first": "Bin"})
if err != nil {
t.Fatal(err)
}
ben := &Person{}
for rows.Next() {
err = rows.StructScan(ben)
if err != nil {
t.Fatal(err)
}
if ben.FirstName != "Bin" {
t.Fatal("Expected first name of `Bin`, got " + ben.FirstName)
}
if ben.LastName != "Smuth" {
t.Fatal("Expected first name of `Smuth`, got " + ben.LastName)
}
}
ben.FirstName = "Ben"
ben.LastName = "Smith"
ben.Email = "[email protected]"
// Insert via a named query using the struct
_, err = db.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", ben)
if err != nil {
t.Fatal(err)
}
rows, err = db.NamedQuery("SELECT * FROM person WHERE first_name=:first_name", ben)
if err != nil {
t.Fatal(err)
}
for rows.Next() {
err = rows.StructScan(ben)
if err != nil {
t.Fatal(err)
}
if ben.FirstName != "Ben" {
t.Fatal("Expected first name of `Ben`, got " + ben.FirstName)
}
if ben.LastName != "Smith" {
t.Fatal("Expected first name of `Smith`, got " + ben.LastName)
}
}
// ensure that Get does not panic on emppty result set
person := &Person{}
err = db.Get(person, "SELECT * FROM person WHERE first_name=$1", "does-not-exist")
if err == nil {
t.Fatal("Should have got an error for Get on non-existent row.")
}
// lets test prepared statements some more
stmt, err = db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?"))
if err != nil {
t.Fatal(err)
}
rows, err = stmt.Queryx("Ben")
if err != nil {
t.Fatal(err)
}
for rows.Next() {
err = rows.StructScan(ben)
if err != nil {
t.Fatal(err)
}
if ben.FirstName != "Ben" {
t.Fatal("Expected first name of `Ben`, got " + ben.FirstName)
}
if ben.LastName != "Smith" {
t.Fatal("Expected first name of `Smith`, got " + ben.LastName)
}
}
john = Person{}
stmt, err = db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?"))
if err != nil {
t.Error(err)
}
err = stmt.Get(&john, "John")
if err != nil {
t.Error(err)
}
// test name mapping
// THIS USED TO WORK BUT WILL NO LONGER WORK.
db.MapperFunc(strings.ToUpper)
rsa := CPlace{}
err = db.Get(&rsa, "SELECT * FROM capplace;")
if err != nil {
t.Error(err, "in db:", db.DriverName())
}
db.MapperFunc(strings.ToLower)
// create a copy and change the mapper, then verify the copy behaves
// differently from the original.
dbCopy := NewDb(db.DB, db.DriverName())
dbCopy.MapperFunc(strings.ToUpper)
err = dbCopy.Get(&rsa, "SELECT * FROM capplace;")
if err != nil {
fmt.Println(db.DriverName())
t.Error(err)
}
err = db.Get(&rsa, "SELECT * FROM cappplace;")
if err == nil {
t.Error("Expected no error, got ", err)
}
// test base type slices
var sdest []string
rows, err = db.Queryx("SELECT email FROM person ORDER BY email ASC;")
if err != nil {
t.Error(err)
}
err = scanAll(rows, &sdest, false)
if err != nil {
t.Error(err)
}
// test Get with base types
var count int
err = db.Get(&count, "SELECT count(*) FROM person;")
if err != nil {
t.Error(err)
}
if count != len(sdest) {
t.Errorf("Expected %d == %d (count(*) vs len(SELECT ..)", count, len(sdest))
}
// test Get and Select with time.Time, #84
var addedAt time.Time
err = db.Get(&addedAt, "SELECT added_at FROM person LIMIT 1;")
if err != nil {
t.Error(err)
}
var addedAts []time.Time
err = db.Select(&addedAts, "SELECT added_at FROM person;")
if err != nil {
t.Error(err)
}
// test it on a double pointer
var pcount *int
err = db.Get(&pcount, "SELECT count(*) FROM person;")
if err != nil {
t.Error(err)
}
if *pcount != count {
t.Errorf("expected %d = %d", *pcount, count)
}
// test Select...
sdest = []string{}
err = db.Select(&sdest, "SELECT first_name FROM person ORDER BY first_name ASC;")
if err != nil {
t.Error(err)
}
expected := []string{"Ben", "Bin", "Jason", "John"}
for i, got := range sdest {
if got != expected[i] {
t.Errorf("Expected %d result to be %s, but got %s", i, expected[i], got)
}
}
var nsdest []sql.NullString
err = db.Select(&nsdest, "SELECT city FROM place ORDER BY city ASC")
if err != nil {
t.Error(err)
}
for _, val := range nsdest {
if val.Valid && val.String != "New York" {
t.Errorf("expected single valid result to be `New York`, but got %s", val.String)
}
}
})
}
// tests that sqlx will not panic when the wrong driver is passed because
// of an automatic nil dereference in sqlx.Open(), which was fixed.
func TestDoNotPanicOnConnect(t *testing.T) {
db, err := Connect("bogus", "hehe")
if err == nil {
t.Errorf("Should return error when using bogus driverName")
}
if db != nil {
t.Errorf("Should not return the db on a connect failure")
}
}
func TestRebind(t *testing.T) {
q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)`
s1 := Rebind(DOLLAR, q1)
s2 := Rebind(DOLLAR, q2)
if s1 != `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)` {
t.Errorf("q1 failed")
}
if s2 != `INSERT INTO foo (a, b, c) VALUES ($1, $2, "foo"), ("Hi", $3, $4)` {
t.Errorf("q2 failed")
}
s1 = Rebind(AT, q1)
s2 = Rebind(AT, q2)
if s1 != `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (@p1, @p2, @p3, @p4, @p5, @p6, @p7, @p8, @p9, @p10)` {
t.Errorf("q1 failed")
}
if s2 != `INSERT INTO foo (a, b, c) VALUES (@p1, @p2, "foo"), ("Hi", @p3, @p4)` {
t.Errorf("q2 failed")
}
s1 = Rebind(NAMED, q1)
s2 = Rebind(NAMED, q2)
ex1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES ` +
`(:arg1, :arg2, :arg3, :arg4, :arg5, :arg6, :arg7, :arg8, :arg9, :arg10)`
if s1 != ex1 {
t.Error("q1 failed on Named params")
}
ex2 := `INSERT INTO foo (a, b, c) VALUES (:arg1, :arg2, "foo"), ("Hi", :arg3, :arg4)`
if s2 != ex2 {
t.Error("q2 failed on Named params")
}
}
func TestBindMap(t *testing.T) {
// Test that it works..
q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)`
am := map[string]interface{}{
"name": "Jason Moiron",
"age": 30,
"first": "Jason",
"last": "Moiron",
}
bq, args, _ := bindMap(QUESTION, q1, am)
expect := `INSERT INTO foo (a, b, c, d) VALUES (?, ?, ?, ?)`
if bq != expect {
t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect)
}
if args[0].(string) != "Jason Moiron" {
t.Errorf("Expected `Jason Moiron`, got %v\n", args[0])
}
if args[1].(int) != 30 {
t.Errorf("Expected 30, got %v\n", args[1])
}
if args[2].(string) != "Jason" {
t.Errorf("Expected Jason, got %v\n", args[2])
}
if args[3].(string) != "Moiron" {
t.Errorf("Expected Moiron, got %v\n", args[3])
}
}
// Test for #117, embedded nil maps
type Message struct {
Text string `db:"string"`
Properties PropertyMap `db:"properties"` // Stored as JSON in the database
}
type PropertyMap map[string]string
// Implement driver.Valuer and sql.Scanner interfaces on PropertyMap
func (p PropertyMap) Value() (driver.Value, error) {
if len(p) == 0 {
return nil, nil
}
return json.Marshal(p)
}
func (p PropertyMap) Scan(src interface{}) error {
v := reflect.ValueOf(src)
if !v.IsValid() || v.CanAddr() && v.IsNil() {
return nil
}
switch ts := src.(type) {
case []byte:
return json.Unmarshal(ts, &p)
case string:
return json.Unmarshal([]byte(ts), &p)
default:
return fmt.Errorf("Could not not decode type %T -> %T", src, p)
}
}
func TestEmbeddedMaps(t *testing.T) {
var schema = Schema{
create: `
CREATE TABLE message (
string text,
properties text
);`,
drop: `drop table message;`,
}
RunWithSchema(schema, t, func(db *DB, t *testing.T) {
messages := []Message{
{"Hello, World", PropertyMap{"one": "1", "two": "2"}},
{"Thanks, Joy", PropertyMap{"pull": "request"}},
}
q1 := `INSERT INTO message (string, properties) VALUES (:string, :properties);`
for _, m := range messages {
_, err := db.NamedExec(q1, m)
if err != nil {
t.Fatal(err)
}
}
var count int
err := db.Get(&count, "SELECT count(*) FROM message")
if err != nil {
t.Fatal(err)
}
if count != len(messages) {
t.Fatalf("Expected %d messages in DB, found %d", len(messages), count)
}
var m Message
err = db.Get(&m, "SELECT * FROM message LIMIT 1;")
if err != nil {
t.Fatal(err)
}
if m.Properties == nil {
t.Fatal("Expected m.Properties to not be nil, but it was.")
}
})
}
func TestIssue197(t *testing.T) {
// this test actually tests for a bug in database/sql:
// https://github.com/golang/go/issues/13905
// this potentially makes _any_ named type that is an alias for []byte
// unsafe to use in a lot of different ways (basically, unsafe to hold
// onto after loading from the database).
t.Skip()
type mybyte []byte
type Var struct{ Raw json.RawMessage }
type Var2 struct{ Raw []byte }
type Var3 struct{ Raw mybyte }
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
var err error
var v, q Var
if err = db.Get(&v, `SELECT '{"a": "b"}' AS raw`); err != nil {
t.Fatal(err)
}
if err = db.Get(&q, `SELECT 'null' AS raw`); err != nil {
t.Fatal(err)
}
var v2, q2 Var2
if err = db.Get(&v2, `SELECT '{"a": "b"}' AS raw`); err != nil {
t.Fatal(err)
}
if err = db.Get(&q2, `SELECT 'null' AS raw`); err != nil {
t.Fatal(err)
}
var v3, q3 Var3
if err = db.QueryRow(`SELECT '{"a": "b"}' AS raw`).Scan(&v3.Raw); err != nil {
t.Fatal(err)
}
if err = db.QueryRow(`SELECT '{"c": "d"}' AS raw`).Scan(&q3.Raw); err != nil {
t.Fatal(err)
}
t.Fail()
})
}
func TestIn(t *testing.T) {
// some quite normal situations
type tr struct {
q string
args []interface{}
c int
}
tests := []tr{
{"SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?",
[]interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"},
7},
{"SELECT * FROM foo WHERE x in (?)",
[]interface{}{[]int{1, 2, 3, 4, 5, 6, 7, 8}},
8},
{"SELECT * FROM foo WHERE x = ? AND y in (?)",
[]interface{}{[]byte("foo"), []int{0, 5, 3}},
4},
{"SELECT * FROM foo WHERE x = ? AND y IN (?)",
[]interface{}{sql.NullString{Valid: false}, []string{"a", "b"}},
3},
}
for _, test := range tests {
q, a, err := In(test.q, test.args...)
if err != nil {
t.Error(err)
}
if len(a) != test.c {
t.Errorf("Expected %d args, but got %d (%+v)", test.c, len(a), a)
}
if strings.Count(q, "?") != test.c {
t.Errorf("Expected %d bindVars, got %d", test.c, strings.Count(q, "?"))
}
}
// too many bindVars, but no slices, so short circuits parsing
// i'm not sure if this is the right behavior; this query/arg combo
// might not work, but we shouldn't parse if we don't need to
{
orig := "SELECT * FROM foo WHERE x = ? AND y = ?"
q, a, err := In(orig, "foo", "bar", "baz")
if err != nil {
t.Error(err)
}
if len(a) != 3 {
t.Errorf("Expected 3 args, but got %d (%+v)", len(a), a)
}
if q != orig {
t.Error("Expected unchanged query.")
}
}
tests = []tr{
// too many bindvars; slice present so should return error during parse
{"SELECT * FROM foo WHERE x = ? and y = ?",
[]interface{}{"foo", []int{1, 2, 3}, "bar"},
0},
// empty slice, should return error before parse
{"SELECT * FROM foo WHERE x = ?",
[]interface{}{[]int{}},
0},
// too *few* bindvars, should return an error
{"SELECT * FROM foo WHERE x = ? AND y in (?)",
[]interface{}{[]int{1, 2, 3}},
0},
}
for _, test := range tests {
_, _, err := In(test.q, test.args...)
if err == nil {
t.Error("Expected an error, but got nil.")
}
}
RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
loadDefaultFixture(db, t)
//tx.MustExec(tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1")
//tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852")
//tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65")
telcodes := []int{852, 65}
q := "SELECT * FROM place WHERE telcode IN(?) ORDER BY telcode"
query, args, err := In(q, telcodes)
if err != nil {
t.Error(err)
}
query = db.Rebind(query)
places := []Place{}
err = db.Select(&places, query, args...)
if err != nil {
t.Error(err)
}
if len(places) != 2 {
t.Fatalf("Expecting 2 results, got %d", len(places))
}
if places[0].TelCode != 65 {
t.Errorf("Expecting singapore first, but got %#v", places[0])
}
if places[1].TelCode != 852 {
t.Errorf("Expecting hong kong second, but got %#v", places[1])
}
})
}
func TestBindStruct(t *testing.T) {
var err error
q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)`
type tt struct {
Name string
Age int
First string
Last string
}
type tt2 struct {
Field1 string `db:"field_1"`
Field2 string `db:"field_2"`
}
type tt3 struct {
tt2
Name string
}
am := tt{"Jason Moiron", 30, "Jason", "Moiron"}
bq, args, _ := bindStruct(QUESTION, q1, am, mapper())
expect := `INSERT INTO foo (a, b, c, d) VALUES (?, ?, ?, ?)`
if bq != expect {
t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect)
}
if args[0].(string) != "Jason Moiron" {
t.Errorf("Expected `Jason Moiron`, got %v\n", args[0])
}
if args[1].(int) != 30 {
t.Errorf("Expected 30, got %v\n", args[1])
}
if args[2].(string) != "Jason" {
t.Errorf("Expected Jason, got %v\n", args[2])
}
if args[3].(string) != "Moiron" {
t.Errorf("Expected Moiron, got %v\n", args[3])
}
am2 := tt2{"Hello", "World"}
bq, args, _ = bindStruct(QUESTION, "INSERT INTO foo (a, b) VALUES (:field_2, :field_1)", am2, mapper())
expect = `INSERT INTO foo (a, b) VALUES (?, ?)`
if bq != expect {
t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect)
}
if args[0].(string) != "World" {
t.Errorf("Expected 'World', got %s\n", args[0].(string))
}
if args[1].(string) != "Hello" {
t.Errorf("Expected 'Hello', got %s\n", args[1].(string))
}
am3 := tt3{Name: "Hello!"}
am3.Field1 = "Hello"
am3.Field2 = "World"
bq, args, err = bindStruct(QUESTION, "INSERT INTO foo (a, b, c) VALUES (:name, :field_1, :field_2)", am3, mapper())
if err != nil {
t.Fatal(err)
}
expect = `INSERT INTO foo (a, b, c) VALUES (?, ?, ?)`
if bq != expect {
t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect)
}
if args[0].(string) != "Hello!" {
t.Errorf("Expected 'Hello!', got %s\n", args[0].(string))
}
if args[1].(string) != "Hello" {
t.Errorf("Expected 'Hello', got %s\n", args[1].(string))
}
if args[2].(string) != "World" {
t.Errorf("Expected 'World', got %s\n", args[0].(string))
}
}
func TestEmbeddedLiterals(t *testing.T) {
var schema = Schema{
create: `
CREATE TABLE x (
k text
);`,
drop: `drop table x;`,
}
RunWithSchema(schema, t, func(db *DB, t *testing.T) {
type t1 struct {
K *string
}
type t2 struct {
Inline struct {
F string
}
K *string
}
db.MustExec(db.Rebind("INSERT INTO x (k) VALUES (?), (?), (?);"), "one", "two", "three")
target := t1{}
err := db.Get(&target, db.Rebind("SELECT * FROM x WHERE k=?"), "one")
if err != nil {
t.Error(err)
}
if *target.K != "one" {
t.Error("Expected target.K to be `one`, got ", target.K)
}
target2 := t2{}
err = db.Get(&target2, db.Rebind("SELECT * FROM x WHERE k=?"), "one")
if err != nil {
t.Error(err)
}
if *target2.K != "one" {
t.Errorf("Expected target2.K to be `one`, got `%v`", target2.K)
}
})
}
func BenchmarkBindStruct(b *testing.B) {
b.StopTimer()
q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)`
type t struct {
Name string
Age int
First string
Last string
}
am := t{"Jason Moiron", 30, "Jason", "Moiron"}
b.StartTimer()
for i := 0; i < b.N; i++ {
_, _, _ = bindStruct(DOLLAR, q1, am, mapper())
}
}
func BenchmarkBindMap(b *testing.B) {
b.StopTimer()
q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)`
am := map[string]interface{}{
"name": "Jason Moiron",
"age": 30,
"first": "Jason",
"last": "Moiron",
}
b.StartTimer()
for i := 0; i < b.N; i++ {
_, _, _ = bindMap(DOLLAR, q1, am)
}
}
func BenchmarkIn(b *testing.B) {
q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?`
for i := 0; i < b.N; i++ {
_, _, _ = In(q, []interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"}...)
}
}
func BenchmarkIn1k(b *testing.B) {
q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?`
var vals [1000]interface{}
for i := 0; i < b.N; i++ {
_, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...)
}
}
func BenchmarkIn1kInt(b *testing.B) {
q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?`
var vals [1000]int
for i := 0; i < b.N; i++ {
_, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...)
}
}
func BenchmarkIn1kString(b *testing.B) {
q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?`
var vals [1000]string
for i := 0; i < b.N; i++ {
_, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...)
}
}
func BenchmarkRebind(b *testing.B) {
b.StopTimer()
q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`
q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)`
b.StartTimer()
for i := 0; i < b.N; i++ {
Rebind(DOLLAR, q1)
Rebind(DOLLAR, q2)
}
}
func BenchmarkRebindBuffer(b *testing.B) {
b.StopTimer()
q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`
q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)`
b.StartTimer()
for i := 0; i < b.N; i++ {
rebindBuff(DOLLAR, q1)
rebindBuff(DOLLAR, q2)
}
}
| [
"\"SQLX_POSTGRES_DSN\"",
"\"SQLX_MYSQL_DSN\"",
"\"SQLX_SQLITE_DSN\""
]
| []
| [
"SQLX_MYSQL_DSN",
"SQLX_SQLITE_DSN",
"SQLX_POSTGRES_DSN"
]
| [] | ["SQLX_MYSQL_DSN", "SQLX_SQLITE_DSN", "SQLX_POSTGRES_DSN"] | go | 3 | 0 | |
eventstore_test.go | package postgres
import (
"context"
"os"
"testing"
"time"
eh "github.com/looplab/eventhorizon"
"github.com/looplab/eventhorizon/eventstore"
"github.com/looplab/eventhorizon/mocks"
"github.com/looplab/eventhorizon/uuid"
)
var host, port, name, user, password string
func init() {
if host = os.Getenv("POSTGRES_HOST"); host == "" {
host = "localhost"
}
if port = os.Getenv("POSTGRES_PORT"); port == "" {
port = "5432"
}
if name = os.Getenv("POSTGRES_DATABASE"); name == "" {
name = "cgrates"
}
if user = os.Getenv("POSTGRES_USER"); user == "" {
user = "postgres"
}
if password = os.Getenv("POSTGRES_PASSWORD"); password == "" {
password = "mysecretpassword"
}
}
func TestCreateTablesIntegration(t *testing.T) {
store, err := NewEventStore(host, port, user, password, name)
if err != nil {
t.Fatal("there should be no error:", err)
}
if store == nil {
t.Fatal("there should be a store")
}
defer store.Close(context.Background())
err = store.createTables()
if err != nil {
t.Fatal("tables should have been created:", err)
}
}
func TestEventStoreIntegration(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
store, err := NewEventStore(host, port, user, password, name)
if err != nil {
t.Fatal("there should be no error:", err)
}
if store == nil {
t.Fatal("there should be a store")
}
defer store.Close(context.Background())
customNamespaceCtx := eh.NewContextWithNamespace(context.Background(), "ns")
// Run the actual test suite, both for default and custom namespace.
eventstore.AcceptanceTest(t, context.Background(), store)
eventstore.AcceptanceTest(t, customNamespaceCtx, store)
}
func TestWithEventHandlerIntegration(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
h := &mocks.EventBus{}
store, err := NewEventStore(host, port, user, password, name, WithEventHandler(h))
if err != nil {
t.Fatal("there should be no error:", err)
}
if store == nil {
t.Fatal("there should be a store")
}
defer store.Close(context.Background())
ctx := context.Background()
// The event handler should be called.
id1 := uuid.New()
timestamp := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
event1 := eh.NewEventForAggregate(mocks.EventType, &mocks.EventData{Content: "event1"},
timestamp, mocks.AggregateType, id1, 1)
err = store.Save(ctx, []eh.Event{event1}, 0)
if err != nil {
t.Error("there should be no error:", err)
}
expected := []eh.Event{event1}
// The saved events should be ok.
events, err := store.Load(ctx, id1)
if err != nil {
t.Error("there should be no error:", err)
}
// The stored events should be ok.
if len(events) != len(expected) {
t.Errorf("incorrect number of loaded events: %d", len(events))
}
for i, event := range events {
if err := eh.CompareEvents(event, expected[i], eh.IgnoreVersion()); err != nil {
t.Error("the stored event was incorrect:", err)
}
if event.Version() != i+1 {
t.Error("the event version should be correct:", event, event.Version())
}
}
// The handled events should be ok.
if len(h.Events) != len(expected) {
t.Errorf("incorrect number of loaded events: %d", len(events))
}
for i, event := range h.Events {
if err := eh.CompareEvents(event, expected[i], eh.IgnoreVersion()); err != nil {
t.Error("the handeled event was incorrect:", err)
}
if event.Version() != i+1 {
t.Error("the event version should be correct:", event, event.Version())
}
}
}
func BenchmarkEventStore(b *testing.B) {
store, err := NewEventStore(host, port, user, password, name)
if err != nil {
b.Fatal("there should be no error:", err)
}
if store == nil {
b.Fatal("there should be a store")
}
defer store.Close(context.Background())
eventstore.Benchmark(b, store)
}
| [
"\"POSTGRES_HOST\"",
"\"POSTGRES_PORT\"",
"\"POSTGRES_DATABASE\"",
"\"POSTGRES_USER\"",
"\"POSTGRES_PASSWORD\""
]
| []
| [
"POSTGRES_DATABASE",
"POSTGRES_USER",
"POSTGRES_HOST",
"POSTGRES_PORT",
"POSTGRES_PASSWORD"
]
| [] | ["POSTGRES_DATABASE", "POSTGRES_USER", "POSTGRES_HOST", "POSTGRES_PORT", "POSTGRES_PASSWORD"] | go | 5 | 0 | |
Day26 Django Model/Django_ORM/Django_ORM/wsgi.py | """
WSGI config for Django_ORM project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Django_ORM.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
components/server/src/routes/report.py | """Report routes."""
import os
from urllib import parse
import bottle
import requests
from pymongo.database import Database
from database import sessions
from database.datamodels import latest_datamodel
from database.measurements import recent_measurements_by_metric_uuid
from database.reports import insert_new_report, latest_reports
from initialization.report import import_json_report
from model.actions import copy_report
from model.data import ReportData
from model.transformations import hide_credentials, summarize_report
from server_utilities.functions import report_date_time, uuid
from server_utilities.type import ReportId
@bottle.post("/api/v3/report/import")
def post_report_import(database: Database):
"""Import a preconfigured report into the database."""
report = dict(bottle.request.json)
result = import_json_report(database, report)
result["new_report_uuid"] = report["report_uuid"]
return result
@bottle.post("/api/v3/report/new")
def post_report_new(database: Database):
"""Add a new report."""
report_uuid = uuid()
user = sessions.user(database)
report = dict(
report_uuid=report_uuid, title="New report", subjects={},
delta=dict(uuids=[report_uuid], email=user["email"], description=f"{user['user']} created a new report."))
result = insert_new_report(database, report)
result["new_report_uuid"] = report_uuid
return result
@bottle.post("/api/v3/report/<report_uuid>/copy")
def post_report_copy(report_uuid: ReportId, database: Database):
"""Copy a report."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
report_copy = copy_report(data.report, data.datamodel)
user = sessions.user(database)
report_copy["delta"] = dict(
uuids=[report_uuid, report_copy["report_uuid"]], email=user["email"],
description=f"{user['user']} copied the report '{data.report_name}'.")
result = insert_new_report(database, report_copy)
result["new_report_uuid"] = report_copy["report_uuid"]
return result
@bottle.get("/api/v3/report/<report_uuid>/pdf")
def export_report_as_pdf(report_uuid: ReportId):
"""Download the report as pdf."""
renderer_host = os.environ.get("RENDERER_HOST", "renderer")
renderer_port = os.environ.get("RENDERER_PORT", "9000")
render_url = f"http://{renderer_host}:{renderer_port}/api/render"
proxy_host = os.environ.get("PROXY_HOST", "www")
proxy_port = os.environ.get("PROXY_PORT", "80")
query_string = f"?{bottle.request.query_string}" if bottle.request.query_string else ""
report_url = parse.quote(f"http://{proxy_host}:{proxy_port}/{report_uuid}{query_string}")
margins = "&".join([f"pdf.margin.{side}=25" for side in ("top", "bottom", "left", "right")])
# Set pdf scale to 70% or otherwise the dashboard falls off the page
options = f"emulateScreenMedia=false&goto.timeout=60000&pdf.scale=0.7&{margins}"
response = requests.get(f"{render_url}?url={report_url}&{options}")
response.raise_for_status()
bottle.response.content_type = "application/pdf"
return response.content
@bottle.delete("/api/v3/report/<report_uuid>")
def delete_report(report_uuid: ReportId, database: Database):
"""Delete a report."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
data.report["deleted"] = "true"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user['user']} deleted the report '{data.report_name}'.")
return insert_new_report(database, data.report)
@bottle.post("/api/v3/report/<report_uuid>/attribute/<report_attribute>")
def post_report_attribute(report_uuid: ReportId, report_attribute: str, database: Database):
"""Set a report attribute."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
value = dict(bottle.request.json)[report_attribute]
old_value = data.report.get(report_attribute) or ""
data.report[report_attribute] = value
value_change_description = "" if report_attribute == "layout" else f" from '{old_value}' to '{value}'"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user['user']} changed the {report_attribute} of report '{data.report_name}'"
f"{value_change_description}.")
return insert_new_report(database, data.report)
@bottle.get("/api/v3/tagreport/<tag>")
def get_tag_report(tag: str, database: Database):
"""Get a report with all metrics that have the specified tag."""
date_time = report_date_time()
reports = latest_reports(database, date_time)
data_model = latest_datamodel(database, date_time)
subjects = _get_subjects_and_metrics_by_tag(data_model, reports, tag)
tag_report = dict(
title=f'Report for tag "{tag}"', subtitle="Note: tag reports are read-only", report_uuid=f"tag-{tag}",
timestamp=date_time, subjects=subjects)
hide_credentials(data_model, tag_report)
summarize_report(tag_report, recent_measurements_by_metric_uuid(database, date_time), data_model)
return tag_report
def _get_subjects_and_metrics_by_tag(data_model, reports, tag: str):
"""Return all subjects and metrics that have the tag."""
subjects = {}
for report in reports:
for subject_uuid, subject in list(report.get("subjects", {}).items()):
for metric_uuid, metric in list(subject.get("metrics", {}).items()):
if tag not in metric.get("tags", []):
del subject["metrics"][metric_uuid]
if subject.get("metrics", {}):
subject_name = subject.get("name") or data_model["subjects"][subject["type"]]["name"]
subject["name"] = report["title"] + " / " + subject_name
subjects[subject_uuid] = subject
return subjects
| []
| []
| [
"RENDERER_HOST",
"PROXY_PORT",
"PROXY_HOST",
"RENDERER_PORT"
]
| [] | ["RENDERER_HOST", "PROXY_PORT", "PROXY_HOST", "RENDERER_PORT"] | python | 4 | 0 | |
pkg/kubectl/cmd/util/factory_test.go | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"os"
"os/user"
"path"
"sort"
"strings"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/client/unversioned/fake"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util"
)
func TestNewFactoryDefaultFlagBindings(t *testing.T) {
factory := NewFactory(nil)
if !factory.flags.HasFlags() {
t.Errorf("Expected flags, but didn't get any")
}
}
func TestNewFactoryNoFlagBindings(t *testing.T) {
clientConfig := clientcmd.NewDefaultClientConfig(*clientcmdapi.NewConfig(), &clientcmd.ConfigOverrides{})
factory := NewFactory(clientConfig)
if factory.flags.HasFlags() {
t.Errorf("Expected zero flags, but got %v", factory.flags)
}
}
func TestPodSelectorForObject(t *testing.T) {
f := NewFactory(nil)
svc := &api.Service{
ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test"},
Spec: api.ServiceSpec{
Selector: map[string]string{
"foo": "bar",
},
},
}
expected := "foo=bar"
got, err := f.PodSelectorForObject(svc)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if expected != got {
t.Fatalf("Selector mismatch! Expected %s, got %s", expected, got)
}
}
func TestPortsForObject(t *testing.T) {
f := NewFactory(nil)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"},
Spec: api.PodSpec{
Containers: []api.Container{
{
Ports: []api.ContainerPort{
{
ContainerPort: 101,
},
},
},
},
},
}
expected := []string{"101"}
got, err := f.PortsForObject(pod)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(expected) != len(got) {
t.Fatalf("Ports size mismatch! Expected %d, got %d", len(expected), len(got))
}
sort.Strings(expected)
sort.Strings(got)
for i, port := range got {
if port != expected[i] {
t.Fatalf("Port mismatch! Expected %s, got %s", expected[i], port)
}
}
}
func TestLabelsForObject(t *testing.T) {
f := NewFactory(nil)
tests := []struct {
name string
object runtime.Object
expected string
err error
}{
{
name: "successful re-use of labels",
object: &api.Service{
ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", Labels: map[string]string{"svc": "test"}},
TypeMeta: unversioned.TypeMeta{Kind: "Service", APIVersion: "v1"},
},
expected: "svc=test",
err: nil,
},
{
name: "empty labels",
object: &api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test", Labels: map[string]string{}},
TypeMeta: unversioned.TypeMeta{Kind: "Service", APIVersion: "v1"},
},
expected: "",
err: nil,
},
{
name: "nil labels",
object: &api.Service{
ObjectMeta: api.ObjectMeta{Name: "zen", Namespace: "test", Labels: nil},
TypeMeta: unversioned.TypeMeta{Kind: "Service", APIVersion: "v1"},
},
expected: "",
err: nil,
},
}
for _, test := range tests {
gotLabels, err := f.LabelsForObject(test.object)
if err != test.err {
t.Fatalf("%s: Error mismatch: Expected %v, got %v", test.name, test.err, err)
}
got := kubectl.MakeLabels(gotLabels)
if test.expected != got {
t.Fatalf("%s: Labels mismatch! Expected %s, got %s", test.name, test.expected, got)
}
}
}
func TestCanBeExposed(t *testing.T) {
factory := NewFactory(nil)
tests := []struct {
kind unversioned.GroupKind
expectErr bool
}{
{
kind: api.Kind("ReplicationController"),
expectErr: false,
},
{
kind: api.Kind("Node"),
expectErr: true,
},
}
for _, test := range tests {
err := factory.CanBeExposed(test.kind)
if test.expectErr && err == nil {
t.Error("unexpected non-error")
}
if !test.expectErr && err != nil {
t.Errorf("unexpected error: %v", err)
}
}
}
func TestFlagUnderscoreRenaming(t *testing.T) {
factory := NewFactory(nil)
factory.flags.SetNormalizeFunc(util.WordSepNormalizeFunc)
factory.flags.Bool("valid_flag", false, "bool value")
// In case of failure of this test check this PR: spf13/pflag#23
if factory.flags.Lookup("valid_flag").Name != "valid-flag" {
t.Fatalf("Expected flag name to be valid-flag, got %s", factory.flags.Lookup("valid_flag").Name)
}
}
func loadSchemaForTest() (validation.Schema, error) {
pathToSwaggerSpec := "../../../../api/swagger-spec/" + testapi.Default.GroupVersion().Version + ".json"
data, err := ioutil.ReadFile(pathToSwaggerSpec)
if err != nil {
return nil, err
}
return validation.NewSwaggerSchemaFromBytes(data)
}
func TestRefetchSchemaWhenValidationFails(t *testing.T) {
schema, err := loadSchemaForTest()
if err != nil {
t.Errorf("Error loading schema: %v", err)
t.FailNow()
}
output, err := json.Marshal(schema)
if err != nil {
t.Errorf("Error serializing schema: %v", err)
t.FailNow()
}
requests := map[string]int{}
c := &fake.RESTClient{
Codec: testapi.Default.Codec(),
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch p, m := req.URL.Path, req.Method; {
case strings.HasPrefix(p, "/swaggerapi") && m == "GET":
requests[p] = requests[p] + 1
return &http.Response{StatusCode: 200, Body: ioutil.NopCloser(bytes.NewBuffer(output))}, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
dir := os.TempDir() + "/schemaCache"
os.RemoveAll(dir)
fullDir, err := substituteUserHome(dir)
if err != nil {
t.Errorf("Error getting fullDir: %v", err)
t.FailNow()
}
cacheFile := path.Join(fullDir, "foo", "bar", schemaFileName)
err = writeSchemaFile(output, fullDir, cacheFile, "foo", "bar")
if err != nil {
t.Errorf("Error building old cache schema: %v", err)
t.FailNow()
}
obj := &extensions.Deployment{}
data, err := runtime.Encode(testapi.Extensions.Codec(), obj)
if err != nil {
t.Errorf("unexpected error: %v", err)
t.FailNow()
}
// Re-get request, should use HTTP and write
if getSchemaAndValidate(c, data, "foo", "bar", dir); err != nil {
t.Errorf("unexpected error validating: %v", err)
}
if requests["/swaggerapi/foo/bar"] != 1 {
t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/bar"])
}
}
func TestValidateCachesSchema(t *testing.T) {
schema, err := loadSchemaForTest()
if err != nil {
t.Errorf("Error loading schema: %v", err)
t.FailNow()
}
output, err := json.Marshal(schema)
if err != nil {
t.Errorf("Error serializing schema: %v", err)
t.FailNow()
}
requests := map[string]int{}
c := &fake.RESTClient{
Codec: testapi.Default.Codec(),
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch p, m := req.URL.Path, req.Method; {
case strings.HasPrefix(p, "/swaggerapi") && m == "GET":
requests[p] = requests[p] + 1
return &http.Response{StatusCode: 200, Body: ioutil.NopCloser(bytes.NewBuffer(output))}, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
dir := os.TempDir() + "/schemaCache"
os.RemoveAll(dir)
obj := &api.Pod{}
data, err := runtime.Encode(testapi.Default.Codec(), obj)
if err != nil {
t.Errorf("unexpected error: %v", err)
t.FailNow()
}
// Initial request, should use HTTP and write
if getSchemaAndValidate(c, data, "foo", "bar", dir); err != nil {
t.Errorf("unexpected error validating: %v", err)
}
if _, err := os.Stat(path.Join(dir, "foo", "bar", schemaFileName)); err != nil {
t.Errorf("unexpected missing cache file: %v", err)
}
if requests["/swaggerapi/foo/bar"] != 1 {
t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/bar"])
}
// Same version and group, should skip HTTP
if getSchemaAndValidate(c, data, "foo", "bar", dir); err != nil {
t.Errorf("unexpected error validating: %v", err)
}
if requests["/swaggerapi/foo/bar"] != 2 {
t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/bar"])
}
// Different API group, should go to HTTP and write
if getSchemaAndValidate(c, data, "foo", "baz", dir); err != nil {
t.Errorf("unexpected error validating: %v", err)
}
if _, err := os.Stat(path.Join(dir, "foo", "baz", schemaFileName)); err != nil {
t.Errorf("unexpected missing cache file: %v", err)
}
if requests["/swaggerapi/foo/baz"] != 1 {
t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/baz"])
}
// Different version, should go to HTTP and write
if getSchemaAndValidate(c, data, "foo2", "bar", dir); err != nil {
t.Errorf("unexpected error validating: %v", err)
}
if _, err := os.Stat(path.Join(dir, "foo2", "bar", schemaFileName)); err != nil {
t.Errorf("unexpected missing cache file: %v", err)
}
if requests["/swaggerapi/foo2/bar"] != 1 {
t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo2/bar"])
}
// No cache dir, should go straight to HTTP and not write
if getSchemaAndValidate(c, data, "foo", "blah", ""); err != nil {
t.Errorf("unexpected error validating: %v", err)
}
if requests["/swaggerapi/foo/blah"] != 1 {
t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/blah"])
}
if _, err := os.Stat(path.Join(dir, "foo", "blah", schemaFileName)); err == nil || !os.IsNotExist(err) {
t.Errorf("unexpected cache file error: %v", err)
}
}
func TestSubstitueUser(t *testing.T) {
usr, err := user.Current()
if err != nil {
t.Logf("SKIPPING TEST: unexpected error: %v", err)
return
}
tests := []struct {
input string
expected string
expectErr bool
}{
{input: "~/foo", expected: path.Join(os.Getenv("HOME"), "foo")},
{input: "~" + usr.Username + "/bar", expected: usr.HomeDir + "/bar"},
{input: "/foo/bar", expected: "/foo/bar"},
{input: "~doesntexit/bar", expectErr: true},
}
for _, test := range tests {
output, err := substituteUserHome(test.input)
if test.expectErr {
if err == nil {
t.Error("unexpected non-error")
}
continue
}
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if output != test.expected {
t.Errorf("expected: %s, saw: %s", test.expected, output)
}
}
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
src/net/http/h2_bundle.go | // Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
//go:generate bundle -o h2_bundle.go -prefix http2 -underscore golang.org/x/net/http2
// Package http2 implements the HTTP/2 protocol.
//
// This package is low-level and intended to be used directly by very
// few people. Most users will use it indirectly through the automatic
// use by the net/http package (from Go 1.6 and later).
// For use in earlier Go versions see ConfigureServer. (Transport support
// requires Go 1.6 or later)
//
// See https://http2.github.io/ for more information on HTTP/2.
//
// See https://http2.golang.org/ for a test server running this code.
//
package http
import (
"bufio"
"bytes"
"compress/gzip"
"context"
"crypto/rand"
"crypto/tls"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math"
mathrand "math/rand"
"net"
"net/http/httptrace"
"net/textproto"
"net/url"
"os"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"internal/x/net/http/httpguts"
"internal/x/net/http2/hpack"
"internal/x/net/idna"
)
// A list of the possible cipher suite ids. Taken from
// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
const (
http2cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
http2cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
http2cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
http2cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
http2cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
http2cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
http2cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
http2cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
http2cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
http2cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
http2cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
http2cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
http2cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
http2cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
http2cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
http2cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
http2cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
http2cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
http2cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
http2cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
http2cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
http2cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
http2cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
http2cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
http2cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
http2cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
http2cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
http2cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
// Reserved uint16 = 0x001C-1D
http2cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
http2cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
http2cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
http2cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
http2cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
http2cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
http2cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
http2cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
http2cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
http2cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
// Reserved uint16 = 0x0047-4F
// Reserved uint16 = 0x0050-58
// Reserved uint16 = 0x0059-5C
// Unassigned uint16 = 0x005D-5F
// Reserved uint16 = 0x0060-66
http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
// Unassigned uint16 = 0x006E-83
http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
http2cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
http2cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
http2cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
http2cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
http2cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
http2cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
http2cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
http2cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
http2cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
http2cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
http2cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
http2cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
http2cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
http2cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
http2cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
http2cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
http2cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
http2cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
http2cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
http2cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
http2cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
http2cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
http2cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
http2cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
http2cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
http2cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
http2cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
http2cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
http2cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
http2cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
http2cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
http2cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
http2cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
http2cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
http2cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
http2cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
// Unassigned uint16 = 0x00C6-FE
http2cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
// Unassigned uint16 = 0x01-55,*
http2cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
// Unassigned uint16 = 0x5601 - 0xC000
http2cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
http2cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
http2cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
http2cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
http2cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
http2cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
http2cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
http2cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
http2cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
http2cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
http2cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
http2cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
http2cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
http2cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
http2cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
http2cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
http2cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
http2cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
http2cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
http2cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
http2cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
http2cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
http2cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
http2cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
http2cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
http2cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
http2cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
http2cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
http2cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
http2cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
http2cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
http2cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
http2cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
http2cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
http2cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
http2cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
http2cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
http2cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
http2cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
http2cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
http2cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
http2cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
http2cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
http2cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
http2cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
http2cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
http2cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
http2cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
http2cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
http2cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
http2cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
http2cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
http2cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
http2cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
http2cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
http2cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
http2cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
http2cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
http2cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
http2cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
http2cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
http2cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
http2cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
http2cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
http2cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
http2cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
http2cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
http2cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
http2cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
http2cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
http2cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
http2cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
http2cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
http2cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
http2cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
http2cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
http2cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
http2cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
http2cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
http2cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
http2cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
http2cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
http2cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
http2cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
http2cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
http2cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
http2cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
http2cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
http2cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
http2cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
http2cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
http2cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
// Unassigned uint16 = 0xC0B0-FF
// Unassigned uint16 = 0xC1-CB,*
// Unassigned uint16 = 0xCC00-A7
http2cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
http2cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
http2cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
http2cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
http2cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
http2cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
http2cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
)
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
// References:
// https://tools.ietf.org/html/rfc7540#appendix-A
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
func http2isBadCipher(cipher uint16) bool {
switch cipher {
case http2cipher_TLS_NULL_WITH_NULL_NULL,
http2cipher_TLS_RSA_WITH_NULL_MD5,
http2cipher_TLS_RSA_WITH_NULL_SHA,
http2cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
http2cipher_TLS_RSA_WITH_RC4_128_MD5,
http2cipher_TLS_RSA_WITH_RC4_128_SHA,
http2cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
http2cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
http2cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
http2cipher_TLS_RSA_WITH_DES_CBC_SHA,
http2cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
http2cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
http2cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
http2cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
http2cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
http2cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
http2cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
http2cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
http2cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
http2cipher_TLS_DH_anon_WITH_RC4_128_MD5,
http2cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
http2cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
http2cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_KRB5_WITH_DES_CBC_SHA,
http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_KRB5_WITH_RC4_128_SHA,
http2cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
http2cipher_TLS_KRB5_WITH_DES_CBC_MD5,
http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
http2cipher_TLS_KRB5_WITH_RC4_128_MD5,
http2cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
http2cipher_TLS_PSK_WITH_NULL_SHA,
http2cipher_TLS_DHE_PSK_WITH_NULL_SHA,
http2cipher_TLS_RSA_PSK_WITH_NULL_SHA,
http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
http2cipher_TLS_RSA_WITH_NULL_SHA256,
http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
http2cipher_TLS_PSK_WITH_RC4_128_SHA,
http2cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
http2cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
http2cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
http2cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
http2cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
http2cipher_TLS_RSA_WITH_SEED_CBC_SHA,
http2cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
http2cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
http2cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
http2cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
http2cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
http2cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
http2cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
http2cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
http2cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
http2cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
http2cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
http2cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
http2cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
http2cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
http2cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
http2cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
http2cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
http2cipher_TLS_PSK_WITH_NULL_SHA256,
http2cipher_TLS_PSK_WITH_NULL_SHA384,
http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
http2cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
http2cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
http2cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
http2cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
http2cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
http2cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
http2cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
http2cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
http2cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
http2cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
http2cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
http2cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
http2cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
http2cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
http2cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
http2cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
http2cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
http2cipher_TLS_ECDH_anon_WITH_NULL_SHA,
http2cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
http2cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
http2cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
http2cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
http2cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
http2cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
http2cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
http2cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
http2cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
http2cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
http2cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
http2cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
http2cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
http2cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
http2cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
http2cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
http2cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
http2cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
http2cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
http2cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
http2cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
http2cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
http2cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
http2cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
http2cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
http2cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
http2cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
http2cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
http2cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
http2cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
http2cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
http2cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
http2cipher_TLS_RSA_WITH_AES_128_CCM,
http2cipher_TLS_RSA_WITH_AES_256_CCM,
http2cipher_TLS_RSA_WITH_AES_128_CCM_8,
http2cipher_TLS_RSA_WITH_AES_256_CCM_8,
http2cipher_TLS_PSK_WITH_AES_128_CCM,
http2cipher_TLS_PSK_WITH_AES_256_CCM,
http2cipher_TLS_PSK_WITH_AES_128_CCM_8,
http2cipher_TLS_PSK_WITH_AES_256_CCM_8:
return true
default:
return false
}
}
// ClientConnPool manages a pool of HTTP/2 client connections.
type http2ClientConnPool interface {
GetClientConn(req *Request, addr string) (*http2ClientConn, error)
MarkDead(*http2ClientConn)
}
// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
// implementations which can close their idle connections.
type http2clientConnPoolIdleCloser interface {
http2ClientConnPool
closeIdleConnections()
}
var (
_ http2clientConnPoolIdleCloser = (*http2clientConnPool)(nil)
_ http2clientConnPoolIdleCloser = http2noDialClientConnPool{}
)
// TODO: use singleflight for dialing and addConnCalls?
type http2clientConnPool struct {
t *http2Transport
mu sync.Mutex // TODO: maybe switch to RWMutex
// TODO: add support for sharing conns based on cert names
// (e.g. share conn for googleapis.com and appspot.com)
conns map[string][]*http2ClientConn // key is host:port
dialing map[string]*http2dialCall // currently in-flight dials
keys map[*http2ClientConn][]string
addConnCalls map[string]*http2addConnCall // in-flight addConnIfNeede calls
}
func (p *http2clientConnPool) GetClientConn(req *Request, addr string) (*http2ClientConn, error) {
return p.getClientConn(req, addr, http2dialOnMiss)
}
const (
http2dialOnMiss = true
http2noDialOnMiss = false
)
// shouldTraceGetConn reports whether getClientConn should call any
// ClientTrace.GetConn hook associated with the http.Request.
//
// This complexity is needed to avoid double calls of the GetConn hook
// during the back-and-forth between net/http and x/net/http2 (when the
// net/http.Transport is upgraded to also speak http2), as well as support
// the case where x/net/http2 is being used directly.
func (p *http2clientConnPool) shouldTraceGetConn(st http2clientConnIdleState) bool {
// If our Transport wasn't made via ConfigureTransport, always
// trace the GetConn hook if provided, because that means the
// http2 package is being used directly and it's the one
// dialing, as opposed to net/http.
if _, ok := p.t.ConnPool.(http2noDialClientConnPool); !ok {
return true
}
// Otherwise, only use the GetConn hook if this connection has
// been used previously for other requests. For fresh
// connections, the net/http package does the dialing.
return !st.freshConn
}
func (p *http2clientConnPool) getClientConn(req *Request, addr string, dialOnMiss bool) (*http2ClientConn, error) {
if http2isConnectionCloseRequest(req) && dialOnMiss {
// It gets its own connection.
http2traceGetConn(req, addr)
const singleUse = true
cc, err := p.t.dialClientConn(addr, singleUse)
if err != nil {
return nil, err
}
return cc, nil
}
p.mu.Lock()
for _, cc := range p.conns[addr] {
if st := cc.idleState(); st.canTakeNewRequest {
if p.shouldTraceGetConn(st) {
http2traceGetConn(req, addr)
}
p.mu.Unlock()
return cc, nil
}
}
if !dialOnMiss {
p.mu.Unlock()
return nil, http2ErrNoCachedConn
}
http2traceGetConn(req, addr)
call := p.getStartDialLocked(addr)
p.mu.Unlock()
<-call.done
return call.res, call.err
}
// dialCall is an in-flight Transport dial call to a host.
type http2dialCall struct {
p *http2clientConnPool
done chan struct{} // closed when done
res *http2ClientConn // valid after done is closed
err error // valid after done is closed
}
// requires p.mu is held.
func (p *http2clientConnPool) getStartDialLocked(addr string) *http2dialCall {
if call, ok := p.dialing[addr]; ok {
// A dial is already in-flight. Don't start another.
return call
}
call := &http2dialCall{p: p, done: make(chan struct{})}
if p.dialing == nil {
p.dialing = make(map[string]*http2dialCall)
}
p.dialing[addr] = call
go call.dial(addr)
return call
}
// run in its own goroutine.
func (c *http2dialCall) dial(addr string) {
const singleUse = false // shared conn
c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
close(c.done)
c.p.mu.Lock()
delete(c.p.dialing, addr)
if c.err == nil {
c.p.addConnLocked(addr, c.res)
}
c.p.mu.Unlock()
}
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
// already exist. It coalesces concurrent calls with the same key.
// This is used by the http1 Transport code when it creates a new connection. Because
// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
// the protocol), it can get into a situation where it has multiple TLS connections.
// This code decides which ones live or die.
// The return value used is whether c was used.
// c is never closed.
func (p *http2clientConnPool) addConnIfNeeded(key string, t *http2Transport, c *tls.Conn) (used bool, err error) {
p.mu.Lock()
for _, cc := range p.conns[key] {
if cc.CanTakeNewRequest() {
p.mu.Unlock()
return false, nil
}
}
call, dup := p.addConnCalls[key]
if !dup {
if p.addConnCalls == nil {
p.addConnCalls = make(map[string]*http2addConnCall)
}
call = &http2addConnCall{
p: p,
done: make(chan struct{}),
}
p.addConnCalls[key] = call
go call.run(t, key, c)
}
p.mu.Unlock()
<-call.done
if call.err != nil {
return false, call.err
}
return !dup, nil
}
type http2addConnCall struct {
p *http2clientConnPool
done chan struct{} // closed when done
err error
}
func (c *http2addConnCall) run(t *http2Transport, key string, tc *tls.Conn) {
cc, err := t.NewClientConn(tc)
p := c.p
p.mu.Lock()
if err != nil {
c.err = err
} else {
p.addConnLocked(key, cc)
}
delete(p.addConnCalls, key)
p.mu.Unlock()
close(c.done)
}
func (p *http2clientConnPool) addConn(key string, cc *http2ClientConn) {
p.mu.Lock()
p.addConnLocked(key, cc)
p.mu.Unlock()
}
// p.mu must be held
func (p *http2clientConnPool) addConnLocked(key string, cc *http2ClientConn) {
for _, v := range p.conns[key] {
if v == cc {
return
}
}
if p.conns == nil {
p.conns = make(map[string][]*http2ClientConn)
}
if p.keys == nil {
p.keys = make(map[*http2ClientConn][]string)
}
p.conns[key] = append(p.conns[key], cc)
p.keys[cc] = append(p.keys[cc], key)
}
func (p *http2clientConnPool) MarkDead(cc *http2ClientConn) {
p.mu.Lock()
defer p.mu.Unlock()
for _, key := range p.keys[cc] {
vv, ok := p.conns[key]
if !ok {
continue
}
newList := http2filterOutClientConn(vv, cc)
if len(newList) > 0 {
p.conns[key] = newList
} else {
delete(p.conns, key)
}
}
delete(p.keys, cc)
}
func (p *http2clientConnPool) closeIdleConnections() {
p.mu.Lock()
defer p.mu.Unlock()
// TODO: don't close a cc if it was just added to the pool
// milliseconds ago and has never been used. There's currently
// a small race window with the HTTP/1 Transport's integration
// where it can add an idle conn just before using it, and
// somebody else can concurrently call CloseIdleConns and
// break some caller's RoundTrip.
for _, vv := range p.conns {
for _, cc := range vv {
cc.closeIfIdle()
}
}
}
func http2filterOutClientConn(in []*http2ClientConn, exclude *http2ClientConn) []*http2ClientConn {
out := in[:0]
for _, v := range in {
if v != exclude {
out = append(out, v)
}
}
// If we filtered it out, zero out the last item to prevent
// the GC from seeing it.
if len(in) != len(out) {
in[len(in)-1] = nil
}
return out
}
// noDialClientConnPool is an implementation of http2.ClientConnPool
// which never dials. We let the HTTP/1.1 client dial and use its TLS
// connection instead.
type http2noDialClientConnPool struct{ *http2clientConnPool }
func (p http2noDialClientConnPool) GetClientConn(req *Request, addr string) (*http2ClientConn, error) {
return p.getClientConn(req, addr, http2noDialOnMiss)
}
// Buffer chunks are allocated from a pool to reduce pressure on GC.
// The maximum wasted space per dataBuffer is 2x the largest size class,
// which happens when the dataBuffer has multiple chunks and there is
// one unread byte in both the first and last chunks. We use a few size
// classes to minimize overheads for servers that typically receive very
// small request bodies.
//
// TODO: Benchmark to determine if the pools are necessary. The GC may have
// improved enough that we can instead allocate chunks like this:
// make([]byte, max(16<<10, expectedBytesRemaining))
var (
http2dataChunkSizeClasses = []int{
1 << 10,
2 << 10,
4 << 10,
8 << 10,
16 << 10,
}
http2dataChunkPools = [...]sync.Pool{
{New: func() interface{} { return make([]byte, 1<<10) }},
{New: func() interface{} { return make([]byte, 2<<10) }},
{New: func() interface{} { return make([]byte, 4<<10) }},
{New: func() interface{} { return make([]byte, 8<<10) }},
{New: func() interface{} { return make([]byte, 16<<10) }},
}
)
func http2getDataBufferChunk(size int64) []byte {
i := 0
for ; i < len(http2dataChunkSizeClasses)-1; i++ {
if size <= int64(http2dataChunkSizeClasses[i]) {
break
}
}
return http2dataChunkPools[i].Get().([]byte)
}
func http2putDataBufferChunk(p []byte) {
for i, n := range http2dataChunkSizeClasses {
if len(p) == n {
http2dataChunkPools[i].Put(p)
return
}
}
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
}
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
// Each dataBuffer is used to read DATA frames on a single stream.
// The buffer is divided into chunks so the server can limit the
// total memory used by a single connection without limiting the
// request body size on any single stream.
type http2dataBuffer struct {
chunks [][]byte
r int // next byte to read is chunks[0][r]
w int // next byte to write is chunks[len(chunks)-1][w]
size int // total buffered bytes
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
}
var http2errReadEmpty = errors.New("read from empty dataBuffer")
// Read copies bytes from the buffer into p.
// It is an error to read when no data is available.
func (b *http2dataBuffer) Read(p []byte) (int, error) {
if b.size == 0 {
return 0, http2errReadEmpty
}
var ntotal int
for len(p) > 0 && b.size > 0 {
readFrom := b.bytesFromFirstChunk()
n := copy(p, readFrom)
p = p[n:]
ntotal += n
b.r += n
b.size -= n
// If the first chunk has been consumed, advance to the next chunk.
if b.r == len(b.chunks[0]) {
http2putDataBufferChunk(b.chunks[0])
end := len(b.chunks) - 1
copy(b.chunks[:end], b.chunks[1:])
b.chunks[end] = nil
b.chunks = b.chunks[:end]
b.r = 0
}
}
return ntotal, nil
}
func (b *http2dataBuffer) bytesFromFirstChunk() []byte {
if len(b.chunks) == 1 {
return b.chunks[0][b.r:b.w]
}
return b.chunks[0][b.r:]
}
// Len returns the number of bytes of the unread portion of the buffer.
func (b *http2dataBuffer) Len() int {
return b.size
}
// Write appends p to the buffer.
func (b *http2dataBuffer) Write(p []byte) (int, error) {
ntotal := len(p)
for len(p) > 0 {
// If the last chunk is empty, allocate a new chunk. Try to allocate
// enough to fully copy p plus any additional bytes we expect to
// receive. However, this may allocate less than len(p).
want := int64(len(p))
if b.expected > want {
want = b.expected
}
chunk := b.lastChunkOrAlloc(want)
n := copy(chunk[b.w:], p)
p = p[n:]
b.w += n
b.size += n
b.expected -= int64(n)
}
return ntotal, nil
}
func (b *http2dataBuffer) lastChunkOrAlloc(want int64) []byte {
if len(b.chunks) != 0 {
last := b.chunks[len(b.chunks)-1]
if b.w < len(last) {
return last
}
}
chunk := http2getDataBufferChunk(want)
b.chunks = append(b.chunks, chunk)
b.w = 0
return chunk
}
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
type http2ErrCode uint32
const (
http2ErrCodeNo http2ErrCode = 0x0
http2ErrCodeProtocol http2ErrCode = 0x1
http2ErrCodeInternal http2ErrCode = 0x2
http2ErrCodeFlowControl http2ErrCode = 0x3
http2ErrCodeSettingsTimeout http2ErrCode = 0x4
http2ErrCodeStreamClosed http2ErrCode = 0x5
http2ErrCodeFrameSize http2ErrCode = 0x6
http2ErrCodeRefusedStream http2ErrCode = 0x7
http2ErrCodeCancel http2ErrCode = 0x8
http2ErrCodeCompression http2ErrCode = 0x9
http2ErrCodeConnect http2ErrCode = 0xa
http2ErrCodeEnhanceYourCalm http2ErrCode = 0xb
http2ErrCodeInadequateSecurity http2ErrCode = 0xc
http2ErrCodeHTTP11Required http2ErrCode = 0xd
)
var http2errCodeName = map[http2ErrCode]string{
http2ErrCodeNo: "NO_ERROR",
http2ErrCodeProtocol: "PROTOCOL_ERROR",
http2ErrCodeInternal: "INTERNAL_ERROR",
http2ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
http2ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
http2ErrCodeStreamClosed: "STREAM_CLOSED",
http2ErrCodeFrameSize: "FRAME_SIZE_ERROR",
http2ErrCodeRefusedStream: "REFUSED_STREAM",
http2ErrCodeCancel: "CANCEL",
http2ErrCodeCompression: "COMPRESSION_ERROR",
http2ErrCodeConnect: "CONNECT_ERROR",
http2ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
http2ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
http2ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
}
func (e http2ErrCode) String() string {
if s, ok := http2errCodeName[e]; ok {
return s
}
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
}
// ConnectionError is an error that results in the termination of the
// entire connection.
type http2ConnectionError http2ErrCode
func (e http2ConnectionError) Error() string {
return fmt.Sprintf("connection error: %s", http2ErrCode(e))
}
// StreamError is an error that only affects one stream within an
// HTTP/2 connection.
type http2StreamError struct {
StreamID uint32
Code http2ErrCode
Cause error // optional additional detail
}
func http2streamError(id uint32, code http2ErrCode) http2StreamError {
return http2StreamError{StreamID: id, Code: code}
}
func (e http2StreamError) Error() string {
if e.Cause != nil {
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
}
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
}
// 6.9.1 The Flow Control Window
// "If a sender receives a WINDOW_UPDATE that causes a flow control
// window to exceed this maximum it MUST terminate either the stream
// or the connection, as appropriate. For streams, [...]; for the
// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
type http2goAwayFlowError struct{}
func (http2goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
// connError represents an HTTP/2 ConnectionError error code, along
// with a string (for debugging) explaining why.
//
// Errors of this type are only returned by the frame parser functions
// and converted into ConnectionError(Code), after stashing away
// the Reason into the Framer's errDetail field, accessible via
// the (*Framer).ErrorDetail method.
type http2connError struct {
Code http2ErrCode // the ConnectionError error code
Reason string // additional reason
}
func (e http2connError) Error() string {
return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
}
type http2pseudoHeaderError string
func (e http2pseudoHeaderError) Error() string {
return fmt.Sprintf("invalid pseudo-header %q", string(e))
}
type http2duplicatePseudoHeaderError string
func (e http2duplicatePseudoHeaderError) Error() string {
return fmt.Sprintf("duplicate pseudo-header %q", string(e))
}
type http2headerFieldNameError string
func (e http2headerFieldNameError) Error() string {
return fmt.Sprintf("invalid header field name %q", string(e))
}
type http2headerFieldValueError string
func (e http2headerFieldValueError) Error() string {
return fmt.Sprintf("invalid header field value %q", string(e))
}
var (
http2errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
http2errPseudoAfterRegular = errors.New("pseudo header field after regular")
)
// flow is the flow control window's size.
type http2flow struct {
// n is the number of DATA bytes we're allowed to send.
// A flow is kept both on a conn and a per-stream.
n int32
// conn points to the shared connection-level flow that is
// shared by all streams on that conn. It is nil for the flow
// that's on the conn directly.
conn *http2flow
}
func (f *http2flow) setConnFlow(cf *http2flow) { f.conn = cf }
func (f *http2flow) available() int32 {
n := f.n
if f.conn != nil && f.conn.n < n {
n = f.conn.n
}
return n
}
func (f *http2flow) take(n int32) {
if n > f.available() {
panic("internal error: took too much")
}
f.n -= n
if f.conn != nil {
f.conn.n -= n
}
}
// add adds n bytes (positive or negative) to the flow control window.
// It returns false if the sum would exceed 2^31-1.
func (f *http2flow) add(n int32) bool {
sum := f.n + n
if (sum > n) == (f.n > 0) {
f.n = sum
return true
}
return false
}
const http2frameHeaderLen = 9
var http2padZeros = make([]byte, 255) // zeros for padding
// A FrameType is a registered frame type as defined in
// http://http2.github.io/http2-spec/#rfc.section.11.2
type http2FrameType uint8
const (
http2FrameData http2FrameType = 0x0
http2FrameHeaders http2FrameType = 0x1
http2FramePriority http2FrameType = 0x2
http2FrameRSTStream http2FrameType = 0x3
http2FrameSettings http2FrameType = 0x4
http2FramePushPromise http2FrameType = 0x5
http2FramePing http2FrameType = 0x6
http2FrameGoAway http2FrameType = 0x7
http2FrameWindowUpdate http2FrameType = 0x8
http2FrameContinuation http2FrameType = 0x9
)
var http2frameName = map[http2FrameType]string{
http2FrameData: "DATA",
http2FrameHeaders: "HEADERS",
http2FramePriority: "PRIORITY",
http2FrameRSTStream: "RST_STREAM",
http2FrameSettings: "SETTINGS",
http2FramePushPromise: "PUSH_PROMISE",
http2FramePing: "PING",
http2FrameGoAway: "GOAWAY",
http2FrameWindowUpdate: "WINDOW_UPDATE",
http2FrameContinuation: "CONTINUATION",
}
func (t http2FrameType) String() string {
if s, ok := http2frameName[t]; ok {
return s
}
return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
}
// Flags is a bitmask of HTTP/2 flags.
// The meaning of flags varies depending on the frame type.
type http2Flags uint8
// Has reports whether f contains all (0 or more) flags in v.
func (f http2Flags) Has(v http2Flags) bool {
return (f & v) == v
}
// Frame-specific FrameHeader flag bits.
const (
// Data Frame
http2FlagDataEndStream http2Flags = 0x1
http2FlagDataPadded http2Flags = 0x8
// Headers Frame
http2FlagHeadersEndStream http2Flags = 0x1
http2FlagHeadersEndHeaders http2Flags = 0x4
http2FlagHeadersPadded http2Flags = 0x8
http2FlagHeadersPriority http2Flags = 0x20
// Settings Frame
http2FlagSettingsAck http2Flags = 0x1
// Ping Frame
http2FlagPingAck http2Flags = 0x1
// Continuation Frame
http2FlagContinuationEndHeaders http2Flags = 0x4
http2FlagPushPromiseEndHeaders http2Flags = 0x4
http2FlagPushPromisePadded http2Flags = 0x8
)
var http2flagName = map[http2FrameType]map[http2Flags]string{
http2FrameData: {
http2FlagDataEndStream: "END_STREAM",
http2FlagDataPadded: "PADDED",
},
http2FrameHeaders: {
http2FlagHeadersEndStream: "END_STREAM",
http2FlagHeadersEndHeaders: "END_HEADERS",
http2FlagHeadersPadded: "PADDED",
http2FlagHeadersPriority: "PRIORITY",
},
http2FrameSettings: {
http2FlagSettingsAck: "ACK",
},
http2FramePing: {
http2FlagPingAck: "ACK",
},
http2FrameContinuation: {
http2FlagContinuationEndHeaders: "END_HEADERS",
},
http2FramePushPromise: {
http2FlagPushPromiseEndHeaders: "END_HEADERS",
http2FlagPushPromisePadded: "PADDED",
},
}
// a frameParser parses a frame given its FrameHeader and payload
// bytes. The length of payload will always equal fh.Length (which
// might be 0).
type http2frameParser func(fc *http2frameCache, fh http2FrameHeader, payload []byte) (http2Frame, error)
var http2frameParsers = map[http2FrameType]http2frameParser{
http2FrameData: http2parseDataFrame,
http2FrameHeaders: http2parseHeadersFrame,
http2FramePriority: http2parsePriorityFrame,
http2FrameRSTStream: http2parseRSTStreamFrame,
http2FrameSettings: http2parseSettingsFrame,
http2FramePushPromise: http2parsePushPromise,
http2FramePing: http2parsePingFrame,
http2FrameGoAway: http2parseGoAwayFrame,
http2FrameWindowUpdate: http2parseWindowUpdateFrame,
http2FrameContinuation: http2parseContinuationFrame,
}
func http2typeFrameParser(t http2FrameType) http2frameParser {
if f := http2frameParsers[t]; f != nil {
return f
}
return http2parseUnknownFrame
}
// A FrameHeader is the 9 byte header of all HTTP/2 frames.
//
// See http://http2.github.io/http2-spec/#FrameHeader
type http2FrameHeader struct {
valid bool // caller can access []byte fields in the Frame
// Type is the 1 byte frame type. There are ten standard frame
// types, but extension frame types may be written by WriteRawFrame
// and will be returned by ReadFrame (as UnknownFrame).
Type http2FrameType
// Flags are the 1 byte of 8 potential bit flags per frame.
// They are specific to the frame type.
Flags http2Flags
// Length is the length of the frame, not including the 9 byte header.
// The maximum size is one byte less than 16MB (uint24), but only
// frames up to 16KB are allowed without peer agreement.
Length uint32
// StreamID is which stream this frame is for. Certain frames
// are not stream-specific, in which case this field is 0.
StreamID uint32
}
// Header returns h. It exists so FrameHeaders can be embedded in other
// specific frame types and implement the Frame interface.
func (h http2FrameHeader) Header() http2FrameHeader { return h }
func (h http2FrameHeader) String() string {
var buf bytes.Buffer
buf.WriteString("[FrameHeader ")
h.writeDebug(&buf)
buf.WriteByte(']')
return buf.String()
}
func (h http2FrameHeader) writeDebug(buf *bytes.Buffer) {
buf.WriteString(h.Type.String())
if h.Flags != 0 {
buf.WriteString(" flags=")
set := 0
for i := uint8(0); i < 8; i++ {
if h.Flags&(1<<i) == 0 {
continue
}
set++
if set > 1 {
buf.WriteByte('|')
}
name := http2flagName[h.Type][http2Flags(1<<i)]
if name != "" {
buf.WriteString(name)
} else {
fmt.Fprintf(buf, "0x%x", 1<<i)
}
}
}
if h.StreamID != 0 {
fmt.Fprintf(buf, " stream=%d", h.StreamID)
}
fmt.Fprintf(buf, " len=%d", h.Length)
}
func (h *http2FrameHeader) checkValid() {
if !h.valid {
panic("Frame accessor called on non-owned Frame")
}
}
func (h *http2FrameHeader) invalidate() { h.valid = false }
// frame header bytes.
// Used only by ReadFrameHeader.
var http2fhBytes = sync.Pool{
New: func() interface{} {
buf := make([]byte, http2frameHeaderLen)
return &buf
},
}
// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
// Most users should use Framer.ReadFrame instead.
func http2ReadFrameHeader(r io.Reader) (http2FrameHeader, error) {
bufp := http2fhBytes.Get().(*[]byte)
defer http2fhBytes.Put(bufp)
return http2readFrameHeader(*bufp, r)
}
func http2readFrameHeader(buf []byte, r io.Reader) (http2FrameHeader, error) {
_, err := io.ReadFull(r, buf[:http2frameHeaderLen])
if err != nil {
return http2FrameHeader{}, err
}
return http2FrameHeader{
Length: (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),
Type: http2FrameType(buf[3]),
Flags: http2Flags(buf[4]),
StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),
valid: true,
}, nil
}
// A Frame is the base interface implemented by all frame types.
// Callers will generally type-assert the specific frame type:
// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.
//
// Frames are only valid until the next call to Framer.ReadFrame.
type http2Frame interface {
Header() http2FrameHeader
// invalidate is called by Framer.ReadFrame to make this
// frame's buffers as being invalid, since the subsequent
// frame will reuse them.
invalidate()
}
// A Framer reads and writes Frames.
type http2Framer struct {
r io.Reader
lastFrame http2Frame
errDetail error
// lastHeaderStream is non-zero if the last frame was an
// unfinished HEADERS/CONTINUATION.
lastHeaderStream uint32
maxReadSize uint32
headerBuf [http2frameHeaderLen]byte
// TODO: let getReadBuf be configurable, and use a less memory-pinning
// allocator in server.go to minimize memory pinned for many idle conns.
// Will probably also need to make frame invalidation have a hook too.
getReadBuf func(size uint32) []byte
readBuf []byte // cache for default getReadBuf
maxWriteSize uint32 // zero means unlimited; TODO: implement
w io.Writer
wbuf []byte
// AllowIllegalWrites permits the Framer's Write methods to
// write frames that do not conform to the HTTP/2 spec. This
// permits using the Framer to test other HTTP/2
// implementations' conformance to the spec.
// If false, the Write methods will prefer to return an error
// rather than comply.
AllowIllegalWrites bool
// AllowIllegalReads permits the Framer's ReadFrame method
// to return non-compliant frames or frame orders.
// This is for testing and permits using the Framer to test
// other HTTP/2 implementations' conformance to the spec.
// It is not compatible with ReadMetaHeaders.
AllowIllegalReads bool
// ReadMetaHeaders if non-nil causes ReadFrame to merge
// HEADERS and CONTINUATION frames together and return
// MetaHeadersFrame instead.
ReadMetaHeaders *hpack.Decoder
// MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE.
// It's used only if ReadMetaHeaders is set; 0 means a sane default
// (currently 16MB)
// If the limit is hit, MetaHeadersFrame.Truncated is set true.
MaxHeaderListSize uint32
// TODO: track which type of frame & with which flags was sent
// last. Then return an error (unless AllowIllegalWrites) if
// we're in the middle of a header block and a
// non-Continuation or Continuation on a different stream is
// attempted to be written.
logReads, logWrites bool
debugFramer *http2Framer // only use for logging written writes
debugFramerBuf *bytes.Buffer
debugReadLoggerf func(string, ...interface{})
debugWriteLoggerf func(string, ...interface{})
frameCache *http2frameCache // nil if frames aren't reused (default)
}
func (fr *http2Framer) maxHeaderListSize() uint32 {
if fr.MaxHeaderListSize == 0 {
return 16 << 20 // sane default, per docs
}
return fr.MaxHeaderListSize
}
func (f *http2Framer) startWrite(ftype http2FrameType, flags http2Flags, streamID uint32) {
// Write the FrameHeader.
f.wbuf = append(f.wbuf[:0],
0, // 3 bytes of length, filled in in endWrite
0,
0,
byte(ftype),
byte(flags),
byte(streamID>>24),
byte(streamID>>16),
byte(streamID>>8),
byte(streamID))
}
func (f *http2Framer) endWrite() error {
// Now that we know the final size, fill in the FrameHeader in
// the space previously reserved for it. Abuse append.
length := len(f.wbuf) - http2frameHeaderLen
if length >= (1 << 24) {
return http2ErrFrameTooLarge
}
_ = append(f.wbuf[:0],
byte(length>>16),
byte(length>>8),
byte(length))
if f.logWrites {
f.logWrite()
}
n, err := f.w.Write(f.wbuf)
if err == nil && n != len(f.wbuf) {
err = io.ErrShortWrite
}
return err
}
func (f *http2Framer) logWrite() {
if f.debugFramer == nil {
f.debugFramerBuf = new(bytes.Buffer)
f.debugFramer = http2NewFramer(nil, f.debugFramerBuf)
f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
// Let us read anything, even if we accidentally wrote it
// in the wrong order:
f.debugFramer.AllowIllegalReads = true
}
f.debugFramerBuf.Write(f.wbuf)
fr, err := f.debugFramer.ReadFrame()
if err != nil {
f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f)
return
}
f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, http2summarizeFrame(fr))
}
func (f *http2Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
func (f *http2Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) }
func (f *http2Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
func (f *http2Framer) writeUint32(v uint32) {
f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
}
const (
http2minMaxFrameSize = 1 << 14
http2maxFrameSize = 1<<24 - 1
)
// SetReuseFrames allows the Framer to reuse Frames.
// If called on a Framer, Frames returned by calls to ReadFrame are only
// valid until the next call to ReadFrame.
func (fr *http2Framer) SetReuseFrames() {
if fr.frameCache != nil {
return
}
fr.frameCache = &http2frameCache{}
}
type http2frameCache struct {
dataFrame http2DataFrame
}
func (fc *http2frameCache) getDataFrame() *http2DataFrame {
if fc == nil {
return &http2DataFrame{}
}
return &fc.dataFrame
}
// NewFramer returns a Framer that writes frames to w and reads them from r.
func http2NewFramer(w io.Writer, r io.Reader) *http2Framer {
fr := &http2Framer{
w: w,
r: r,
logReads: http2logFrameReads,
logWrites: http2logFrameWrites,
debugReadLoggerf: log.Printf,
debugWriteLoggerf: log.Printf,
}
fr.getReadBuf = func(size uint32) []byte {
if cap(fr.readBuf) >= int(size) {
return fr.readBuf[:size]
}
fr.readBuf = make([]byte, size)
return fr.readBuf
}
fr.SetMaxReadFrameSize(http2maxFrameSize)
return fr
}
// SetMaxReadFrameSize sets the maximum size of a frame
// that will be read by a subsequent call to ReadFrame.
// It is the caller's responsibility to advertise this
// limit with a SETTINGS frame.
func (fr *http2Framer) SetMaxReadFrameSize(v uint32) {
if v > http2maxFrameSize {
v = http2maxFrameSize
}
fr.maxReadSize = v
}
// ErrorDetail returns a more detailed error of the last error
// returned by Framer.ReadFrame. For instance, if ReadFrame
// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail
// will say exactly what was invalid. ErrorDetail is not guaranteed
// to return a non-nil value and like the rest of the http2 package,
// its return value is not protected by an API compatibility promise.
// ErrorDetail is reset after the next call to ReadFrame.
func (fr *http2Framer) ErrorDetail() error {
return fr.errDetail
}
// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
// sends a frame that is larger than declared with SetMaxReadFrameSize.
var http2ErrFrameTooLarge = errors.New("http2: frame too large")
// terminalReadFrameError reports whether err is an unrecoverable
// error from ReadFrame and no other frames should be read.
func http2terminalReadFrameError(err error) bool {
if _, ok := err.(http2StreamError); ok {
return false
}
return err != nil
}
// ReadFrame reads a single frame. The returned Frame is only valid
// until the next call to ReadFrame.
//
// If the frame is larger than previously set with SetMaxReadFrameSize, the
// returned error is ErrFrameTooLarge. Other errors may be of type
// ConnectionError, StreamError, or anything else from the underlying
// reader.
func (fr *http2Framer) ReadFrame() (http2Frame, error) {
fr.errDetail = nil
if fr.lastFrame != nil {
fr.lastFrame.invalidate()
}
fh, err := http2readFrameHeader(fr.headerBuf[:], fr.r)
if err != nil {
return nil, err
}
if fh.Length > fr.maxReadSize {
return nil, http2ErrFrameTooLarge
}
payload := fr.getReadBuf(fh.Length)
if _, err := io.ReadFull(fr.r, payload); err != nil {
return nil, err
}
f, err := http2typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
if err != nil {
if ce, ok := err.(http2connError); ok {
return nil, fr.connError(ce.Code, ce.Reason)
}
return nil, err
}
if err := fr.checkFrameOrder(f); err != nil {
return nil, err
}
if fr.logReads {
fr.debugReadLoggerf("http2: Framer %p: read %v", fr, http2summarizeFrame(f))
}
if fh.Type == http2FrameHeaders && fr.ReadMetaHeaders != nil {
return fr.readMetaFrame(f.(*http2HeadersFrame))
}
return f, nil
}
// connError returns ConnectionError(code) but first
// stashes away a public reason to the caller can optionally relay it
// to the peer before hanging up on them. This might help others debug
// their implementations.
func (fr *http2Framer) connError(code http2ErrCode, reason string) error {
fr.errDetail = errors.New(reason)
return http2ConnectionError(code)
}
// checkFrameOrder reports an error if f is an invalid frame to return
// next from ReadFrame. Mostly it checks whether HEADERS and
// CONTINUATION frames are contiguous.
func (fr *http2Framer) checkFrameOrder(f http2Frame) error {
last := fr.lastFrame
fr.lastFrame = f
if fr.AllowIllegalReads {
return nil
}
fh := f.Header()
if fr.lastHeaderStream != 0 {
if fh.Type != http2FrameContinuation {
return fr.connError(http2ErrCodeProtocol,
fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
fh.Type, fh.StreamID,
last.Header().Type, fr.lastHeaderStream))
}
if fh.StreamID != fr.lastHeaderStream {
return fr.connError(http2ErrCodeProtocol,
fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d",
fh.StreamID, fr.lastHeaderStream))
}
} else if fh.Type == http2FrameContinuation {
return fr.connError(http2ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID))
}
switch fh.Type {
case http2FrameHeaders, http2FrameContinuation:
if fh.Flags.Has(http2FlagHeadersEndHeaders) {
fr.lastHeaderStream = 0
} else {
fr.lastHeaderStream = fh.StreamID
}
}
return nil
}
// A DataFrame conveys arbitrary, variable-length sequences of octets
// associated with a stream.
// See http://http2.github.io/http2-spec/#rfc.section.6.1
type http2DataFrame struct {
http2FrameHeader
data []byte
}
func (f *http2DataFrame) StreamEnded() bool {
return f.http2FrameHeader.Flags.Has(http2FlagDataEndStream)
}
// Data returns the frame's data octets, not including any padding
// size byte or padding suffix bytes.
// The caller must not retain the returned memory past the next
// call to ReadFrame.
func (f *http2DataFrame) Data() []byte {
f.checkValid()
return f.data
}
func http2parseDataFrame(fc *http2frameCache, fh http2FrameHeader, payload []byte) (http2Frame, error) {
if fh.StreamID == 0 {
// DATA frames MUST be associated with a stream. If a
// DATA frame is received whose stream identifier
// field is 0x0, the recipient MUST respond with a
// connection error (Section 5.4.1) of type
// PROTOCOL_ERROR.
return nil, http2connError{http2ErrCodeProtocol, "DATA frame with stream ID 0"}
}
f := fc.getDataFrame()
f.http2FrameHeader = fh
var padSize byte
if fh.Flags.Has(http2FlagDataPadded) {
var err error
payload, padSize, err = http2readByte(payload)
if err != nil {
return nil, err
}
}
if int(padSize) > len(payload) {
// If the length of the padding is greater than the
// length of the frame payload, the recipient MUST
// treat this as a connection error.
// Filed: https://github.com/http2/http2-spec/issues/610
return nil, http2connError{http2ErrCodeProtocol, "pad size larger than data payload"}
}
f.data = payload[:len(payload)-int(padSize)]
return f, nil
}
var (
http2errStreamID = errors.New("invalid stream ID")
http2errDepStreamID = errors.New("invalid dependent stream ID")
http2errPadLength = errors.New("pad length too large")
http2errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
)
func http2validStreamIDOrZero(streamID uint32) bool {
return streamID&(1<<31) == 0
}
func http2validStreamID(streamID uint32) bool {
return streamID != 0 && streamID&(1<<31) == 0
}
// WriteData writes a DATA frame.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility not to violate the maximum frame size
// and to not call other Write methods concurrently.
func (f *http2Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
return f.WriteDataPadded(streamID, endStream, data, nil)
}
// WriteData writes a DATA frame with optional padding.
//
// If pad is nil, the padding bit is not sent.
// The length of pad must not exceed 255 bytes.
// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility not to violate the maximum frame size
// and to not call other Write methods concurrently.
func (f *http2Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
return http2errStreamID
}
if len(pad) > 0 {
if len(pad) > 255 {
return http2errPadLength
}
if !f.AllowIllegalWrites {
for _, b := range pad {
if b != 0 {
// "Padding octets MUST be set to zero when sending."
return http2errPadBytes
}
}
}
}
var flags http2Flags
if endStream {
flags |= http2FlagDataEndStream
}
if pad != nil {
flags |= http2FlagDataPadded
}
f.startWrite(http2FrameData, flags, streamID)
if pad != nil {
f.wbuf = append(f.wbuf, byte(len(pad)))
}
f.wbuf = append(f.wbuf, data...)
f.wbuf = append(f.wbuf, pad...)
return f.endWrite()
}
// A SettingsFrame conveys configuration parameters that affect how
// endpoints communicate, such as preferences and constraints on peer
// behavior.
//
// See http://http2.github.io/http2-spec/#SETTINGS
type http2SettingsFrame struct {
http2FrameHeader
p []byte
}
func http2parseSettingsFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (http2Frame, error) {
if fh.Flags.Has(http2FlagSettingsAck) && fh.Length > 0 {
// When this (ACK 0x1) bit is set, the payload of the
// SETTINGS frame MUST be empty. Receipt of a
// SETTINGS frame with the ACK flag set and a length
// field value other than 0 MUST be treated as a
// connection error (Section 5.4.1) of type
// FRAME_SIZE_ERROR.
return nil, http2ConnectionError(http2ErrCodeFrameSize)
}
if fh.StreamID != 0 {
// SETTINGS frames always apply to a connection,
// never a single stream. The stream identifier for a
// SETTINGS frame MUST be zero (0x0). If an endpoint
// receives a SETTINGS frame whose stream identifier
// field is anything other than 0x0, the endpoint MUST
// respond with a connection error (Section 5.4.1) of
// type PROTOCOL_ERROR.
return nil, http2ConnectionError(http2ErrCodeProtocol)
}
if len(p)%6 != 0 {
// Expecting even number of 6 byte settings.
return nil, http2ConnectionError(http2ErrCodeFrameSize)
}
f := &http2SettingsFrame{http2FrameHeader: fh, p: p}
if v, ok := f.Value(http2SettingInitialWindowSize); ok && v > (1<<31)-1 {
// Values above the maximum flow control window size of 2^31 - 1 MUST
// be treated as a connection error (Section 5.4.1) of type
// FLOW_CONTROL_ERROR.
return nil, http2ConnectionError(http2ErrCodeFlowControl)
}
return f, nil
}
func (f *http2SettingsFrame) IsAck() bool {
return f.http2FrameHeader.Flags.Has(http2FlagSettingsAck)
}
func (f *http2SettingsFrame) Value(id http2SettingID) (v uint32, ok bool) {
f.checkValid()
for i := 0; i < f.NumSettings(); i++ {
if s := f.Setting(i); s.ID == id {
return s.Val, true
}
}
return 0, false
}
// Setting returns the setting from the frame at the given 0-based index.
// The index must be >= 0 and less than f.NumSettings().
func (f *http2SettingsFrame) Setting(i int) http2Setting {
buf := f.p
return http2Setting{
ID: http2SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])),
Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]),
}
}
func (f *http2SettingsFrame) NumSettings() int { return len(f.p) / 6 }
// HasDuplicates reports whether f contains any duplicate setting IDs.
func (f *http2SettingsFrame) HasDuplicates() bool {
num := f.NumSettings()
if num == 0 {
return false
}
// If it's small enough (the common case), just do the n^2
// thing and avoid a map allocation.
if num < 10 {
for i := 0; i < num; i++ {
idi := f.Setting(i).ID
for j := i + 1; j < num; j++ {
idj := f.Setting(j).ID
if idi == idj {
return true
}
}
}
return false
}
seen := map[http2SettingID]bool{}
for i := 0; i < num; i++ {
id := f.Setting(i).ID
if seen[id] {
return true
}
seen[id] = true
}
return false
}
// ForeachSetting runs fn for each setting.
// It stops and returns the first error.
func (f *http2SettingsFrame) ForeachSetting(fn func(http2Setting) error) error {
f.checkValid()
for i := 0; i < f.NumSettings(); i++ {
if err := fn(f.Setting(i)); err != nil {
return err
}
}
return nil
}
// WriteSettings writes a SETTINGS frame with zero or more settings
// specified and the ACK bit not set.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility to not call other Write methods concurrently.
func (f *http2Framer) WriteSettings(settings ...http2Setting) error {
f.startWrite(http2FrameSettings, 0, 0)
for _, s := range settings {
f.writeUint16(uint16(s.ID))
f.writeUint32(s.Val)
}
return f.endWrite()
}
// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility to not call other Write methods concurrently.
func (f *http2Framer) WriteSettingsAck() error {
f.startWrite(http2FrameSettings, http2FlagSettingsAck, 0)
return f.endWrite()
}
// A PingFrame is a mechanism for measuring a minimal round trip time
// from the sender, as well as determining whether an idle connection
// is still functional.
// See http://http2.github.io/http2-spec/#rfc.section.6.7
type http2PingFrame struct {
http2FrameHeader
Data [8]byte
}
func (f *http2PingFrame) IsAck() bool { return f.Flags.Has(http2FlagPingAck) }
func http2parsePingFrame(_ *http2frameCache, fh http2FrameHeader, payload []byte) (http2Frame, error) {
if len(payload) != 8 {
return nil, http2ConnectionError(http2ErrCodeFrameSize)
}
if fh.StreamID != 0 {
return nil, http2ConnectionError(http2ErrCodeProtocol)
}
f := &http2PingFrame{http2FrameHeader: fh}
copy(f.Data[:], payload)
return f, nil
}
func (f *http2Framer) WritePing(ack bool, data [8]byte) error {
var flags http2Flags
if ack {
flags = http2FlagPingAck
}
f.startWrite(http2FramePing, flags, 0)
f.writeBytes(data[:])
return f.endWrite()
}
// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
// See http://http2.github.io/http2-spec/#rfc.section.6.8
type http2GoAwayFrame struct {
http2FrameHeader
LastStreamID uint32
ErrCode http2ErrCode
debugData []byte
}
// DebugData returns any debug data in the GOAWAY frame. Its contents
// are not defined.
// The caller must not retain the returned memory past the next
// call to ReadFrame.
func (f *http2GoAwayFrame) DebugData() []byte {
f.checkValid()
return f.debugData
}
func http2parseGoAwayFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (http2Frame, error) {
if fh.StreamID != 0 {
return nil, http2ConnectionError(http2ErrCodeProtocol)
}
if len(p) < 8 {
return nil, http2ConnectionError(http2ErrCodeFrameSize)
}
return &http2GoAwayFrame{
http2FrameHeader: fh,
LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),
ErrCode: http2ErrCode(binary.BigEndian.Uint32(p[4:8])),
debugData: p[8:],
}, nil
}
func (f *http2Framer) WriteGoAway(maxStreamID uint32, code http2ErrCode, debugData []byte) error {
f.startWrite(http2FrameGoAway, 0, 0)
f.writeUint32(maxStreamID & (1<<31 - 1))
f.writeUint32(uint32(code))
f.writeBytes(debugData)
return f.endWrite()
}
// An UnknownFrame is the frame type returned when the frame type is unknown
// or no specific frame type parser exists.
type http2UnknownFrame struct {
http2FrameHeader
p []byte
}
// Payload returns the frame's payload (after the header). It is not
// valid to call this method after a subsequent call to
// Framer.ReadFrame, nor is it valid to retain the returned slice.
// The memory is owned by the Framer and is invalidated when the next
// frame is read.
func (f *http2UnknownFrame) Payload() []byte {
f.checkValid()
return f.p
}
func http2parseUnknownFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (http2Frame, error) {
return &http2UnknownFrame{fh, p}, nil
}
// A WindowUpdateFrame is used to implement flow control.
// See http://http2.github.io/http2-spec/#rfc.section.6.9
type http2WindowUpdateFrame struct {
http2FrameHeader
Increment uint32 // never read with high bit set
}
func http2parseWindowUpdateFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (http2Frame, error) {
if len(p) != 4 {
return nil, http2ConnectionError(http2ErrCodeFrameSize)
}
inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
if inc == 0 {
// A receiver MUST treat the receipt of a
// WINDOW_UPDATE frame with an flow control window
// increment of 0 as a stream error (Section 5.4.2) of
// type PROTOCOL_ERROR; errors on the connection flow
// control window MUST be treated as a connection
// error (Section 5.4.1).
if fh.StreamID == 0 {
return nil, http2ConnectionError(http2ErrCodeProtocol)
}
return nil, http2streamError(fh.StreamID, http2ErrCodeProtocol)
}
return &http2WindowUpdateFrame{
http2FrameHeader: fh,
Increment: inc,
}, nil
}
// WriteWindowUpdate writes a WINDOW_UPDATE frame.
// The increment value must be between 1 and 2,147,483,647, inclusive.
// If the Stream ID is zero, the window update applies to the
// connection as a whole.
func (f *http2Framer) WriteWindowUpdate(streamID, incr uint32) error {
// "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
return errors.New("illegal window increment value")
}
f.startWrite(http2FrameWindowUpdate, 0, streamID)
f.writeUint32(incr)
return f.endWrite()
}
// A HeadersFrame is used to open a stream and additionally carries a
// header block fragment.
type http2HeadersFrame struct {
http2FrameHeader
// Priority is set if FlagHeadersPriority is set in the FrameHeader.
Priority http2PriorityParam
headerFragBuf []byte // not owned
}
func (f *http2HeadersFrame) HeaderBlockFragment() []byte {
f.checkValid()
return f.headerFragBuf
}
func (f *http2HeadersFrame) HeadersEnded() bool {
return f.http2FrameHeader.Flags.Has(http2FlagHeadersEndHeaders)
}
func (f *http2HeadersFrame) StreamEnded() bool {
return f.http2FrameHeader.Flags.Has(http2FlagHeadersEndStream)
}
func (f *http2HeadersFrame) HasPriority() bool {
return f.http2FrameHeader.Flags.Has(http2FlagHeadersPriority)
}
func http2parseHeadersFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (_ http2Frame, err error) {
hf := &http2HeadersFrame{
http2FrameHeader: fh,
}
if fh.StreamID == 0 {
// HEADERS frames MUST be associated with a stream. If a HEADERS frame
// is received whose stream identifier field is 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR.
return nil, http2connError{http2ErrCodeProtocol, "HEADERS frame with stream ID 0"}
}
var padLength uint8
if fh.Flags.Has(http2FlagHeadersPadded) {
if p, padLength, err = http2readByte(p); err != nil {
return
}
}
if fh.Flags.Has(http2FlagHeadersPriority) {
var v uint32
p, v, err = http2readUint32(p)
if err != nil {
return nil, err
}
hf.Priority.StreamDep = v & 0x7fffffff
hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
p, hf.Priority.Weight, err = http2readByte(p)
if err != nil {
return nil, err
}
}
if len(p)-int(padLength) <= 0 {
return nil, http2streamError(fh.StreamID, http2ErrCodeProtocol)
}
hf.headerFragBuf = p[:len(p)-int(padLength)]
return hf, nil
}
// HeadersFrameParam are the parameters for writing a HEADERS frame.
type http2HeadersFrameParam struct {
// StreamID is the required Stream ID to initiate.
StreamID uint32
// BlockFragment is part (or all) of a Header Block.
BlockFragment []byte
// EndStream indicates that the header block is the last that
// the endpoint will send for the identified stream. Setting
// this flag causes the stream to enter one of "half closed"
// states.
EndStream bool
// EndHeaders indicates that this frame contains an entire
// header block and is not followed by any
// CONTINUATION frames.
EndHeaders bool
// PadLength is the optional number of bytes of zeros to add
// to this frame.
PadLength uint8
// Priority, if non-zero, includes stream priority information
// in the HEADER frame.
Priority http2PriorityParam
}
// WriteHeaders writes a single HEADERS frame.
//
// This is a low-level header writing method. Encoding headers and
// splitting them into any necessary CONTINUATION frames is handled
// elsewhere.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility to not call other Write methods concurrently.
func (f *http2Framer) WriteHeaders(p http2HeadersFrameParam) error {
if !http2validStreamID(p.StreamID) && !f.AllowIllegalWrites {
return http2errStreamID
}
var flags http2Flags
if p.PadLength != 0 {
flags |= http2FlagHeadersPadded
}
if p.EndStream {
flags |= http2FlagHeadersEndStream
}
if p.EndHeaders {
flags |= http2FlagHeadersEndHeaders
}
if !p.Priority.IsZero() {
flags |= http2FlagHeadersPriority
}
f.startWrite(http2FrameHeaders, flags, p.StreamID)
if p.PadLength != 0 {
f.writeByte(p.PadLength)
}
if !p.Priority.IsZero() {
v := p.Priority.StreamDep
if !http2validStreamIDOrZero(v) && !f.AllowIllegalWrites {
return http2errDepStreamID
}
if p.Priority.Exclusive {
v |= 1 << 31
}
f.writeUint32(v)
f.writeByte(p.Priority.Weight)
}
f.wbuf = append(f.wbuf, p.BlockFragment...)
f.wbuf = append(f.wbuf, http2padZeros[:p.PadLength]...)
return f.endWrite()
}
// A PriorityFrame specifies the sender-advised priority of a stream.
// See http://http2.github.io/http2-spec/#rfc.section.6.3
type http2PriorityFrame struct {
http2FrameHeader
http2PriorityParam
}
// PriorityParam are the stream prioritzation parameters.
type http2PriorityParam struct {
// StreamDep is a 31-bit stream identifier for the
// stream that this stream depends on. Zero means no
// dependency.
StreamDep uint32
// Exclusive is whether the dependency is exclusive.
Exclusive bool
// Weight is the stream's zero-indexed weight. It should be
// set together with StreamDep, or neither should be set. Per
// the spec, "Add one to the value to obtain a weight between
// 1 and 256."
Weight uint8
}
func (p http2PriorityParam) IsZero() bool {
return p == http2PriorityParam{}
}
func http2parsePriorityFrame(_ *http2frameCache, fh http2FrameHeader, payload []byte) (http2Frame, error) {
if fh.StreamID == 0 {
return nil, http2connError{http2ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
}
if len(payload) != 5 {
return nil, http2connError{http2ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
}
v := binary.BigEndian.Uint32(payload[:4])
streamID := v & 0x7fffffff // mask off high bit
return &http2PriorityFrame{
http2FrameHeader: fh,
http2PriorityParam: http2PriorityParam{
Weight: payload[4],
StreamDep: streamID,
Exclusive: streamID != v, // was high bit set?
},
}, nil
}
// WritePriority writes a PRIORITY frame.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility to not call other Write methods concurrently.
func (f *http2Framer) WritePriority(streamID uint32, p http2PriorityParam) error {
if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
return http2errStreamID
}
if !http2validStreamIDOrZero(p.StreamDep) {
return http2errDepStreamID
}
f.startWrite(http2FramePriority, 0, streamID)
v := p.StreamDep
if p.Exclusive {
v |= 1 << 31
}
f.writeUint32(v)
f.writeByte(p.Weight)
return f.endWrite()
}
// A RSTStreamFrame allows for abnormal termination of a stream.
// See http://http2.github.io/http2-spec/#rfc.section.6.4
type http2RSTStreamFrame struct {
http2FrameHeader
ErrCode http2ErrCode
}
func http2parseRSTStreamFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (http2Frame, error) {
if len(p) != 4 {
return nil, http2ConnectionError(http2ErrCodeFrameSize)
}
if fh.StreamID == 0 {
return nil, http2ConnectionError(http2ErrCodeProtocol)
}
return &http2RSTStreamFrame{fh, http2ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
}
// WriteRSTStream writes a RST_STREAM frame.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility to not call other Write methods concurrently.
func (f *http2Framer) WriteRSTStream(streamID uint32, code http2ErrCode) error {
if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
return http2errStreamID
}
f.startWrite(http2FrameRSTStream, 0, streamID)
f.writeUint32(uint32(code))
return f.endWrite()
}
// A ContinuationFrame is used to continue a sequence of header block fragments.
// See http://http2.github.io/http2-spec/#rfc.section.6.10
type http2ContinuationFrame struct {
http2FrameHeader
headerFragBuf []byte
}
func http2parseContinuationFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (http2Frame, error) {
if fh.StreamID == 0 {
return nil, http2connError{http2ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
}
return &http2ContinuationFrame{fh, p}, nil
}
func (f *http2ContinuationFrame) HeaderBlockFragment() []byte {
f.checkValid()
return f.headerFragBuf
}
func (f *http2ContinuationFrame) HeadersEnded() bool {
return f.http2FrameHeader.Flags.Has(http2FlagContinuationEndHeaders)
}
// WriteContinuation writes a CONTINUATION frame.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility to not call other Write methods concurrently.
func (f *http2Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
return http2errStreamID
}
var flags http2Flags
if endHeaders {
flags |= http2FlagContinuationEndHeaders
}
f.startWrite(http2FrameContinuation, flags, streamID)
f.wbuf = append(f.wbuf, headerBlockFragment...)
return f.endWrite()
}
// A PushPromiseFrame is used to initiate a server stream.
// See http://http2.github.io/http2-spec/#rfc.section.6.6
type http2PushPromiseFrame struct {
http2FrameHeader
PromiseID uint32
headerFragBuf []byte // not owned
}
func (f *http2PushPromiseFrame) HeaderBlockFragment() []byte {
f.checkValid()
return f.headerFragBuf
}
func (f *http2PushPromiseFrame) HeadersEnded() bool {
return f.http2FrameHeader.Flags.Has(http2FlagPushPromiseEndHeaders)
}
func http2parsePushPromise(_ *http2frameCache, fh http2FrameHeader, p []byte) (_ http2Frame, err error) {
pp := &http2PushPromiseFrame{
http2FrameHeader: fh,
}
if pp.StreamID == 0 {
// PUSH_PROMISE frames MUST be associated with an existing,
// peer-initiated stream. The stream identifier of a
// PUSH_PROMISE frame indicates the stream it is associated
// with. If the stream identifier field specifies the value
// 0x0, a recipient MUST respond with a connection error
// (Section 5.4.1) of type PROTOCOL_ERROR.
return nil, http2ConnectionError(http2ErrCodeProtocol)
}
// The PUSH_PROMISE frame includes optional padding.
// Padding fields and flags are identical to those defined for DATA frames
var padLength uint8
if fh.Flags.Has(http2FlagPushPromisePadded) {
if p, padLength, err = http2readByte(p); err != nil {
return
}
}
p, pp.PromiseID, err = http2readUint32(p)
if err != nil {
return
}
pp.PromiseID = pp.PromiseID & (1<<31 - 1)
if int(padLength) > len(p) {
// like the DATA frame, error out if padding is longer than the body.
return nil, http2ConnectionError(http2ErrCodeProtocol)
}
pp.headerFragBuf = p[:len(p)-int(padLength)]
return pp, nil
}
// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.
type http2PushPromiseParam struct {
// StreamID is the required Stream ID to initiate.
StreamID uint32
// PromiseID is the required Stream ID which this
// Push Promises
PromiseID uint32
// BlockFragment is part (or all) of a Header Block.
BlockFragment []byte
// EndHeaders indicates that this frame contains an entire
// header block and is not followed by any
// CONTINUATION frames.
EndHeaders bool
// PadLength is the optional number of bytes of zeros to add
// to this frame.
PadLength uint8
}
// WritePushPromise writes a single PushPromise Frame.
//
// As with Header Frames, This is the low level call for writing
// individual frames. Continuation frames are handled elsewhere.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility to not call other Write methods concurrently.
func (f *http2Framer) WritePushPromise(p http2PushPromiseParam) error {
if !http2validStreamID(p.StreamID) && !f.AllowIllegalWrites {
return http2errStreamID
}
var flags http2Flags
if p.PadLength != 0 {
flags |= http2FlagPushPromisePadded
}
if p.EndHeaders {
flags |= http2FlagPushPromiseEndHeaders
}
f.startWrite(http2FramePushPromise, flags, p.StreamID)
if p.PadLength != 0 {
f.writeByte(p.PadLength)
}
if !http2validStreamID(p.PromiseID) && !f.AllowIllegalWrites {
return http2errStreamID
}
f.writeUint32(p.PromiseID)
f.wbuf = append(f.wbuf, p.BlockFragment...)
f.wbuf = append(f.wbuf, http2padZeros[:p.PadLength]...)
return f.endWrite()
}
// WriteRawFrame writes a raw frame. This can be used to write
// extension frames unknown to this package.
func (f *http2Framer) WriteRawFrame(t http2FrameType, flags http2Flags, streamID uint32, payload []byte) error {
f.startWrite(t, flags, streamID)
f.writeBytes(payload)
return f.endWrite()
}
func http2readByte(p []byte) (remain []byte, b byte, err error) {
if len(p) == 0 {
return nil, 0, io.ErrUnexpectedEOF
}
return p[1:], p[0], nil
}
func http2readUint32(p []byte) (remain []byte, v uint32, err error) {
if len(p) < 4 {
return nil, 0, io.ErrUnexpectedEOF
}
return p[4:], binary.BigEndian.Uint32(p[:4]), nil
}
type http2streamEnder interface {
StreamEnded() bool
}
type http2headersEnder interface {
HeadersEnded() bool
}
type http2headersOrContinuation interface {
http2headersEnder
HeaderBlockFragment() []byte
}
// A MetaHeadersFrame is the representation of one HEADERS frame and
// zero or more contiguous CONTINUATION frames and the decoding of
// their HPACK-encoded contents.
//
// This type of frame does not appear on the wire and is only returned
// by the Framer when Framer.ReadMetaHeaders is set.
type http2MetaHeadersFrame struct {
*http2HeadersFrame
// Fields are the fields contained in the HEADERS and
// CONTINUATION frames. The underlying slice is owned by the
// Framer and must not be retained after the next call to
// ReadFrame.
//
// Fields are guaranteed to be in the correct http2 order and
// not have unknown pseudo header fields or invalid header
// field names or values. Required pseudo header fields may be
// missing, however. Use the MetaHeadersFrame.Pseudo accessor
// method access pseudo headers.
Fields []hpack.HeaderField
// Truncated is whether the max header list size limit was hit
// and Fields is incomplete. The hpack decoder state is still
// valid, however.
Truncated bool
}
// PseudoValue returns the given pseudo header field's value.
// The provided pseudo field should not contain the leading colon.
func (mh *http2MetaHeadersFrame) PseudoValue(pseudo string) string {
for _, hf := range mh.Fields {
if !hf.IsPseudo() {
return ""
}
if hf.Name[1:] == pseudo {
return hf.Value
}
}
return ""
}
// RegularFields returns the regular (non-pseudo) header fields of mh.
// The caller does not own the returned slice.
func (mh *http2MetaHeadersFrame) RegularFields() []hpack.HeaderField {
for i, hf := range mh.Fields {
if !hf.IsPseudo() {
return mh.Fields[i:]
}
}
return nil
}
// PseudoFields returns the pseudo header fields of mh.
// The caller does not own the returned slice.
func (mh *http2MetaHeadersFrame) PseudoFields() []hpack.HeaderField {
for i, hf := range mh.Fields {
if !hf.IsPseudo() {
return mh.Fields[:i]
}
}
return mh.Fields
}
func (mh *http2MetaHeadersFrame) checkPseudos() error {
var isRequest, isResponse bool
pf := mh.PseudoFields()
for i, hf := range pf {
switch hf.Name {
case ":method", ":path", ":scheme", ":authority":
isRequest = true
case ":status":
isResponse = true
default:
return http2pseudoHeaderError(hf.Name)
}
// Check for duplicates.
// This would be a bad algorithm, but N is 4.
// And this doesn't allocate.
for _, hf2 := range pf[:i] {
if hf.Name == hf2.Name {
return http2duplicatePseudoHeaderError(hf.Name)
}
}
}
if isRequest && isResponse {
return http2errMixPseudoHeaderTypes
}
return nil
}
func (fr *http2Framer) maxHeaderStringLen() int {
v := fr.maxHeaderListSize()
if uint32(int(v)) == v {
return int(v)
}
// They had a crazy big number for MaxHeaderBytes anyway,
// so give them unlimited header lengths:
return 0
}
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
// merge them into the provided hf and returns a MetaHeadersFrame
// with the decoded hpack values.
func (fr *http2Framer) readMetaFrame(hf *http2HeadersFrame) (*http2MetaHeadersFrame, error) {
if fr.AllowIllegalReads {
return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
}
mh := &http2MetaHeadersFrame{
http2HeadersFrame: hf,
}
var remainSize = fr.maxHeaderListSize()
var sawRegular bool
var invalid error // pseudo header field errors
hdec := fr.ReadMetaHeaders
hdec.SetEmitEnabled(true)
hdec.SetMaxStringLength(fr.maxHeaderStringLen())
hdec.SetEmitFunc(func(hf hpack.HeaderField) {
if http2VerboseLogs && fr.logReads {
fr.debugReadLoggerf("http2: decoded hpack field %+v", hf)
}
if !httpguts.ValidHeaderFieldValue(hf.Value) {
invalid = http2headerFieldValueError(hf.Value)
}
isPseudo := strings.HasPrefix(hf.Name, ":")
if isPseudo {
if sawRegular {
invalid = http2errPseudoAfterRegular
}
} else {
sawRegular = true
if !http2validWireHeaderFieldName(hf.Name) {
invalid = http2headerFieldNameError(hf.Name)
}
}
if invalid != nil {
hdec.SetEmitEnabled(false)
return
}
size := hf.Size()
if size > remainSize {
hdec.SetEmitEnabled(false)
mh.Truncated = true
return
}
remainSize -= size
mh.Fields = append(mh.Fields, hf)
})
// Lose reference to MetaHeadersFrame:
defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})
var hc http2headersOrContinuation = hf
for {
frag := hc.HeaderBlockFragment()
if _, err := hdec.Write(frag); err != nil {
return nil, http2ConnectionError(http2ErrCodeCompression)
}
if hc.HeadersEnded() {
break
}
if f, err := fr.ReadFrame(); err != nil {
return nil, err
} else {
hc = f.(*http2ContinuationFrame) // guaranteed by checkFrameOrder
}
}
mh.http2HeadersFrame.headerFragBuf = nil
mh.http2HeadersFrame.invalidate()
if err := hdec.Close(); err != nil {
return nil, http2ConnectionError(http2ErrCodeCompression)
}
if invalid != nil {
fr.errDetail = invalid
if http2VerboseLogs {
log.Printf("http2: invalid header: %v", invalid)
}
return nil, http2StreamError{mh.StreamID, http2ErrCodeProtocol, invalid}
}
if err := mh.checkPseudos(); err != nil {
fr.errDetail = err
if http2VerboseLogs {
log.Printf("http2: invalid pseudo headers: %v", err)
}
return nil, http2StreamError{mh.StreamID, http2ErrCodeProtocol, err}
}
return mh, nil
}
func http2summarizeFrame(f http2Frame) string {
var buf bytes.Buffer
f.Header().writeDebug(&buf)
switch f := f.(type) {
case *http2SettingsFrame:
n := 0
f.ForeachSetting(func(s http2Setting) error {
n++
if n == 1 {
buf.WriteString(", settings:")
}
fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val)
return nil
})
if n > 0 {
buf.Truncate(buf.Len() - 1) // remove trailing comma
}
case *http2DataFrame:
data := f.Data()
const max = 256
if len(data) > max {
data = data[:max]
}
fmt.Fprintf(&buf, " data=%q", data)
if len(f.Data()) > max {
fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max)
}
case *http2WindowUpdateFrame:
if f.StreamID == 0 {
buf.WriteString(" (conn)")
}
fmt.Fprintf(&buf, " incr=%v", f.Increment)
case *http2PingFrame:
fmt.Fprintf(&buf, " ping=%q", f.Data[:])
case *http2GoAwayFrame:
fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q",
f.LastStreamID, f.ErrCode, f.debugData)
case *http2RSTStreamFrame:
fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode)
}
return buf.String()
}
func http2traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
return trace != nil && trace.WroteHeaderField != nil
}
func http2traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(k, []string{v})
}
}
func http2traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
if trace != nil {
return trace.Got1xxResponse
}
return nil
}
var http2DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
type http2goroutineLock uint64
func http2newGoroutineLock() http2goroutineLock {
if !http2DebugGoroutines {
return 0
}
return http2goroutineLock(http2curGoroutineID())
}
func (g http2goroutineLock) check() {
if !http2DebugGoroutines {
return
}
if http2curGoroutineID() != uint64(g) {
panic("running on the wrong goroutine")
}
}
func (g http2goroutineLock) checkNotOn() {
if !http2DebugGoroutines {
return
}
if http2curGoroutineID() == uint64(g) {
panic("running on the wrong goroutine")
}
}
var http2goroutineSpace = []byte("goroutine ")
func http2curGoroutineID() uint64 {
bp := http2littleBuf.Get().(*[]byte)
defer http2littleBuf.Put(bp)
b := *bp
b = b[:runtime.Stack(b, false)]
// Parse the 4707 out of "goroutine 4707 ["
b = bytes.TrimPrefix(b, http2goroutineSpace)
i := bytes.IndexByte(b, ' ')
if i < 0 {
panic(fmt.Sprintf("No space found in %q", b))
}
b = b[:i]
n, err := http2parseUintBytes(b, 10, 64)
if err != nil {
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
}
return n
}
var http2littleBuf = sync.Pool{
New: func() interface{} {
buf := make([]byte, 64)
return &buf
},
}
// parseUintBytes is like strconv.ParseUint, but using a []byte.
func http2parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
var cutoff, maxVal uint64
if bitSize == 0 {
bitSize = int(strconv.IntSize)
}
s0 := s
switch {
case len(s) < 1:
err = strconv.ErrSyntax
goto Error
case 2 <= base && base <= 36:
// valid base; nothing to do
case base == 0:
// Look for octal, hex prefix.
switch {
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
base = 16
s = s[2:]
if len(s) < 1 {
err = strconv.ErrSyntax
goto Error
}
case s[0] == '0':
base = 8
default:
base = 10
}
default:
err = errors.New("invalid base " + strconv.Itoa(base))
goto Error
}
n = 0
cutoff = http2cutoff64(base)
maxVal = 1<<uint(bitSize) - 1
for i := 0; i < len(s); i++ {
var v byte
d := s[i]
switch {
case '0' <= d && d <= '9':
v = d - '0'
case 'a' <= d && d <= 'z':
v = d - 'a' + 10
case 'A' <= d && d <= 'Z':
v = d - 'A' + 10
default:
n = 0
err = strconv.ErrSyntax
goto Error
}
if int(v) >= base {
n = 0
err = strconv.ErrSyntax
goto Error
}
if n >= cutoff {
// n*base overflows
n = 1<<64 - 1
err = strconv.ErrRange
goto Error
}
n *= uint64(base)
n1 := n + uint64(v)
if n1 < n || n1 > maxVal {
// n+v overflows
n = 1<<64 - 1
err = strconv.ErrRange
goto Error
}
n = n1
}
return n, nil
Error:
return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
}
// Return the first number n such that n*base >= 1<<64.
func http2cutoff64(base int) uint64 {
if base < 2 {
return 0
}
return (1<<64-1)/uint64(base) + 1
}
var (
http2commonBuildOnce sync.Once
http2commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
http2commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
)
func http2buildCommonHeaderMapsOnce() {
http2commonBuildOnce.Do(http2buildCommonHeaderMaps)
}
func http2buildCommonHeaderMaps() {
common := []string{
"accept",
"accept-charset",
"accept-encoding",
"accept-language",
"accept-ranges",
"age",
"access-control-allow-origin",
"allow",
"authorization",
"cache-control",
"content-disposition",
"content-encoding",
"content-language",
"content-length",
"content-location",
"content-range",
"content-type",
"cookie",
"date",
"etag",
"expect",
"expires",
"from",
"host",
"if-match",
"if-modified-since",
"if-none-match",
"if-unmodified-since",
"last-modified",
"link",
"location",
"max-forwards",
"proxy-authenticate",
"proxy-authorization",
"range",
"referer",
"refresh",
"retry-after",
"server",
"set-cookie",
"strict-transport-security",
"trailer",
"transfer-encoding",
"user-agent",
"vary",
"via",
"www-authenticate",
}
http2commonLowerHeader = make(map[string]string, len(common))
http2commonCanonHeader = make(map[string]string, len(common))
for _, v := range common {
chk := CanonicalHeaderKey(v)
http2commonLowerHeader[chk] = v
http2commonCanonHeader[v] = chk
}
}
func http2lowerHeader(v string) string {
http2buildCommonHeaderMapsOnce()
if s, ok := http2commonLowerHeader[v]; ok {
return s
}
return strings.ToLower(v)
}
var (
http2VerboseLogs bool
http2logFrameWrites bool
http2logFrameReads bool
http2inTests bool
)
func init() {
e := os.Getenv("GODEBUG")
if strings.Contains(e, "http2debug=1") {
http2VerboseLogs = true
}
if strings.Contains(e, "http2debug=2") {
http2VerboseLogs = true
http2logFrameWrites = true
http2logFrameReads = true
}
}
const (
// ClientPreface is the string that must be sent by new
// connections from clients.
http2ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
// SETTINGS_MAX_FRAME_SIZE default
// http://http2.github.io/http2-spec/#rfc.section.6.5.2
http2initialMaxFrameSize = 16384
// NextProtoTLS is the NPN/ALPN protocol negotiated during
// HTTP/2's TLS setup.
http2NextProtoTLS = "h2"
// http://http2.github.io/http2-spec/#SettingValues
http2initialHeaderTableSize = 4096
http2initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
http2defaultMaxReadFrameSize = 1 << 20
)
var (
http2clientPreface = []byte(http2ClientPreface)
)
type http2streamState int
// HTTP/2 stream states.
//
// See http://tools.ietf.org/html/rfc7540#section-5.1.
//
// For simplicity, the server code merges "reserved (local)" into
// "half-closed (remote)". This is one less state transition to track.
// The only downside is that we send PUSH_PROMISEs slightly less
// liberally than allowable. More discussion here:
// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
//
// "reserved (remote)" is omitted since the client code does not
// support server push.
const (
http2stateIdle http2streamState = iota
http2stateOpen
http2stateHalfClosedLocal
http2stateHalfClosedRemote
http2stateClosed
)
var http2stateName = [...]string{
http2stateIdle: "Idle",
http2stateOpen: "Open",
http2stateHalfClosedLocal: "HalfClosedLocal",
http2stateHalfClosedRemote: "HalfClosedRemote",
http2stateClosed: "Closed",
}
func (st http2streamState) String() string {
return http2stateName[st]
}
// Setting is a setting parameter: which setting it is, and its value.
type http2Setting struct {
// ID is which setting is being set.
// See http://http2.github.io/http2-spec/#SettingValues
ID http2SettingID
// Val is the value.
Val uint32
}
func (s http2Setting) String() string {
return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
}
// Valid reports whether the setting is valid.
func (s http2Setting) Valid() error {
// Limits and error codes from 6.5.2 Defined SETTINGS Parameters
switch s.ID {
case http2SettingEnablePush:
if s.Val != 1 && s.Val != 0 {
return http2ConnectionError(http2ErrCodeProtocol)
}
case http2SettingInitialWindowSize:
if s.Val > 1<<31-1 {
return http2ConnectionError(http2ErrCodeFlowControl)
}
case http2SettingMaxFrameSize:
if s.Val < 16384 || s.Val > 1<<24-1 {
return http2ConnectionError(http2ErrCodeProtocol)
}
}
return nil
}
// A SettingID is an HTTP/2 setting as defined in
// http://http2.github.io/http2-spec/#iana-settings
type http2SettingID uint16
const (
http2SettingHeaderTableSize http2SettingID = 0x1
http2SettingEnablePush http2SettingID = 0x2
http2SettingMaxConcurrentStreams http2SettingID = 0x3
http2SettingInitialWindowSize http2SettingID = 0x4
http2SettingMaxFrameSize http2SettingID = 0x5
http2SettingMaxHeaderListSize http2SettingID = 0x6
)
var http2settingName = map[http2SettingID]string{
http2SettingHeaderTableSize: "HEADER_TABLE_SIZE",
http2SettingEnablePush: "ENABLE_PUSH",
http2SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
http2SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
http2SettingMaxFrameSize: "MAX_FRAME_SIZE",
http2SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
}
func (s http2SettingID) String() string {
if v, ok := http2settingName[s]; ok {
return v
}
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
}
var (
http2errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
http2errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
)
// validWireHeaderFieldName reports whether v is a valid header field
// name (key). See httpguts.ValidHeaderName for the base rules.
//
// Further, http2 says:
// "Just as in HTTP/1.x, header field names are strings of ASCII
// characters that are compared in a case-insensitive
// fashion. However, header field names MUST be converted to
// lowercase prior to their encoding in HTTP/2. "
func http2validWireHeaderFieldName(v string) bool {
if len(v) == 0 {
return false
}
for _, r := range v {
if !httpguts.IsTokenRune(r) {
return false
}
if 'A' <= r && r <= 'Z' {
return false
}
}
return true
}
func http2httpCodeString(code int) string {
switch code {
case 200:
return "200"
case 404:
return "404"
}
return strconv.Itoa(code)
}
// from pkg io
type http2stringWriter interface {
WriteString(s string) (n int, err error)
}
// A gate lets two goroutines coordinate their activities.
type http2gate chan struct{}
func (g http2gate) Done() { g <- struct{}{} }
func (g http2gate) Wait() { <-g }
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
type http2closeWaiter chan struct{}
// Init makes a closeWaiter usable.
// It exists because so a closeWaiter value can be placed inside a
// larger struct and have the Mutex and Cond's memory in the same
// allocation.
func (cw *http2closeWaiter) Init() {
*cw = make(chan struct{})
}
// Close marks the closeWaiter as closed and unblocks any waiters.
func (cw http2closeWaiter) Close() {
close(cw)
}
// Wait waits for the closeWaiter to become closed.
func (cw http2closeWaiter) Wait() {
<-cw
}
// bufferedWriter is a buffered writer that writes to w.
// Its buffered writer is lazily allocated as needed, to minimize
// idle memory usage with many connections.
type http2bufferedWriter struct {
w io.Writer // immutable
bw *bufio.Writer // non-nil when data is buffered
}
func http2newBufferedWriter(w io.Writer) *http2bufferedWriter {
return &http2bufferedWriter{w: w}
}
// bufWriterPoolBufferSize is the size of bufio.Writer's
// buffers created using bufWriterPool.
//
// TODO: pick a less arbitrary value? this is a bit under
// (3 x typical 1500 byte MTU) at least. Other than that,
// not much thought went into it.
const http2bufWriterPoolBufferSize = 4 << 10
var http2bufWriterPool = sync.Pool{
New: func() interface{} {
return bufio.NewWriterSize(nil, http2bufWriterPoolBufferSize)
},
}
func (w *http2bufferedWriter) Available() int {
if w.bw == nil {
return http2bufWriterPoolBufferSize
}
return w.bw.Available()
}
func (w *http2bufferedWriter) Write(p []byte) (n int, err error) {
if w.bw == nil {
bw := http2bufWriterPool.Get().(*bufio.Writer)
bw.Reset(w.w)
w.bw = bw
}
return w.bw.Write(p)
}
func (w *http2bufferedWriter) Flush() error {
bw := w.bw
if bw == nil {
return nil
}
err := bw.Flush()
bw.Reset(nil)
http2bufWriterPool.Put(bw)
w.bw = nil
return err
}
func http2mustUint31(v int32) uint32 {
if v < 0 || v > 2147483647 {
panic("out of range")
}
return uint32(v)
}
// bodyAllowedForStatus reports whether a given response status code
// permits a body. See RFC 7230, section 3.3.
func http2bodyAllowedForStatus(status int) bool {
switch {
case status >= 100 && status <= 199:
return false
case status == 204:
return false
case status == 304:
return false
}
return true
}
type http2httpError struct {
msg string
timeout bool
}
func (e *http2httpError) Error() string { return e.msg }
func (e *http2httpError) Timeout() bool { return e.timeout }
func (e *http2httpError) Temporary() bool { return true }
var http2errTimeout error = &http2httpError{msg: "http2: timeout awaiting response headers", timeout: true}
type http2connectionStater interface {
ConnectionState() tls.ConnectionState
}
var http2sorterPool = sync.Pool{New: func() interface{} { return new(http2sorter) }}
type http2sorter struct {
v []string // owned by sorter
}
func (s *http2sorter) Len() int { return len(s.v) }
func (s *http2sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
func (s *http2sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
// Keys returns the sorted keys of h.
//
// The returned slice is only valid until s used again or returned to
// its pool.
func (s *http2sorter) Keys(h Header) []string {
keys := s.v[:0]
for k := range h {
keys = append(keys, k)
}
s.v = keys
sort.Sort(s)
return keys
}
func (s *http2sorter) SortStrings(ss []string) {
// Our sorter works on s.v, which sorter owns, so
// stash it away while we sort the user's buffer.
save := s.v
s.v = ss
sort.Sort(s)
s.v = save
}
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
// *) a non-empty string starting with '/'
// *) the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847
//
// We used to enforce that the path also didn't start with "//", but
// Google's GFE accepts such paths and Chrome sends them, so ignore
// that part of the spec. See golang.org/issue/19103.
func http2validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/') || v == "*"
}
// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
// underlying buffer is an interface. (io.Pipe is always unbuffered)
type http2pipe struct {
mu sync.Mutex
c sync.Cond // c.L lazily initialized to &p.mu
b http2pipeBuffer // nil when done reading
err error // read error once empty. non-nil means closed.
breakErr error // immediate read error (caller doesn't see rest of b)
donec chan struct{} // closed on error
readFn func() // optional code to run in Read before error
}
type http2pipeBuffer interface {
Len() int
io.Writer
io.Reader
}
func (p *http2pipe) Len() int {
p.mu.Lock()
defer p.mu.Unlock()
if p.b == nil {
return 0
}
return p.b.Len()
}
// Read waits until data is available and copies bytes
// from the buffer into p.
func (p *http2pipe) Read(d []byte) (n int, err error) {
p.mu.Lock()
defer p.mu.Unlock()
if p.c.L == nil {
p.c.L = &p.mu
}
for {
if p.breakErr != nil {
return 0, p.breakErr
}
if p.b != nil && p.b.Len() > 0 {
return p.b.Read(d)
}
if p.err != nil {
if p.readFn != nil {
p.readFn() // e.g. copy trailers
p.readFn = nil // not sticky like p.err
}
p.b = nil
return 0, p.err
}
p.c.Wait()
}
}
var http2errClosedPipeWrite = errors.New("write on closed buffer")
// Write copies bytes from p into the buffer and wakes a reader.
// It is an error to write more data than the buffer can hold.
func (p *http2pipe) Write(d []byte) (n int, err error) {
p.mu.Lock()
defer p.mu.Unlock()
if p.c.L == nil {
p.c.L = &p.mu
}
defer p.c.Signal()
if p.err != nil {
return 0, http2errClosedPipeWrite
}
if p.breakErr != nil {
return len(d), nil // discard when there is no reader
}
return p.b.Write(d)
}
// CloseWithError causes the next Read (waking up a current blocked
// Read if needed) to return the provided err after all data has been
// read.
//
// The error must be non-nil.
func (p *http2pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
// BreakWithError causes the next Read (waking up a current blocked
// Read if needed) to return the provided err immediately, without
// waiting for unread data.
func (p *http2pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
// closeWithErrorAndCode is like CloseWithError but also sets some code to run
// in the caller's goroutine before returning the error.
func (p *http2pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
func (p *http2pipe) closeWithError(dst *error, err error, fn func()) {
if err == nil {
panic("err must be non-nil")
}
p.mu.Lock()
defer p.mu.Unlock()
if p.c.L == nil {
p.c.L = &p.mu
}
defer p.c.Signal()
if *dst != nil {
// Already been done.
return
}
p.readFn = fn
if dst == &p.breakErr {
p.b = nil
}
*dst = err
p.closeDoneLocked()
}
// requires p.mu be held.
func (p *http2pipe) closeDoneLocked() {
if p.donec == nil {
return
}
// Close if unclosed. This isn't racy since we always
// hold p.mu while closing.
select {
case <-p.donec:
default:
close(p.donec)
}
}
// Err returns the error (if any) first set by BreakWithError or CloseWithError.
func (p *http2pipe) Err() error {
p.mu.Lock()
defer p.mu.Unlock()
if p.breakErr != nil {
return p.breakErr
}
return p.err
}
// Done returns a channel which is closed if and when this pipe is closed
// with CloseWithError.
func (p *http2pipe) Done() <-chan struct{} {
p.mu.Lock()
defer p.mu.Unlock()
if p.donec == nil {
p.donec = make(chan struct{})
if p.err != nil || p.breakErr != nil {
// Already hit an error.
p.closeDoneLocked()
}
}
return p.donec
}
const (
http2prefaceTimeout = 10 * time.Second
http2firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
http2handlerChunkWriteSize = 4 << 10
http2defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
)
var (
http2errClientDisconnected = errors.New("client disconnected")
http2errClosedBody = errors.New("body closed by handler")
http2errHandlerComplete = errors.New("http2: request body closed due to handler exiting")
http2errStreamClosed = errors.New("http2: stream closed")
)
var http2responseWriterStatePool = sync.Pool{
New: func() interface{} {
rws := &http2responseWriterState{}
rws.bw = bufio.NewWriterSize(http2chunkWriter{rws}, http2handlerChunkWriteSize)
return rws
},
}
// Test hooks.
var (
http2testHookOnConn func()
http2testHookGetServerConn func(*http2serverConn)
http2testHookOnPanicMu *sync.Mutex // nil except in tests
http2testHookOnPanic func(sc *http2serverConn, panicVal interface{}) (rePanic bool)
)
// Server is an HTTP/2 server.
type http2Server struct {
// MaxHandlers limits the number of http.Handler ServeHTTP goroutines
// which may run at a time over all connections.
// Negative or zero no limit.
// TODO: implement
MaxHandlers int
// MaxConcurrentStreams optionally specifies the number of
// concurrent streams that each client may have open at a
// time. This is unrelated to the number of http.Handler goroutines
// which may be active globally, which is MaxHandlers.
// If zero, MaxConcurrentStreams defaults to at least 100, per
// the HTTP/2 spec's recommendations.
MaxConcurrentStreams uint32
// MaxReadFrameSize optionally specifies the largest frame
// this server is willing to read. A valid value is between
// 16k and 16M, inclusive. If zero or otherwise invalid, a
// default value is used.
MaxReadFrameSize uint32
// PermitProhibitedCipherSuites, if true, permits the use of
// cipher suites prohibited by the HTTP/2 spec.
PermitProhibitedCipherSuites bool
// IdleTimeout specifies how long until idle clients should be
// closed with a GOAWAY frame. PING frames are not considered
// activity for the purposes of IdleTimeout.
IdleTimeout time.Duration
// MaxUploadBufferPerConnection is the size of the initial flow
// control window for each connections. The HTTP/2 spec does not
// allow this to be smaller than 65535 or larger than 2^32-1.
// If the value is outside this range, a default value will be
// used instead.
MaxUploadBufferPerConnection int32
// MaxUploadBufferPerStream is the size of the initial flow control
// window for each stream. The HTTP/2 spec does not allow this to
// be larger than 2^32-1. If the value is zero or larger than the
// maximum, a default value will be used instead.
MaxUploadBufferPerStream int32
// NewWriteScheduler constructs a write scheduler for a connection.
// If nil, a default scheduler is chosen.
NewWriteScheduler func() http2WriteScheduler
// Internal state. This is a pointer (rather than embedded directly)
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *http2serverInternalState
}
func (s *http2Server) initialConnRecvWindowSize() int32 {
if s.MaxUploadBufferPerConnection > http2initialWindowSize {
return s.MaxUploadBufferPerConnection
}
return 1 << 20
}
func (s *http2Server) initialStreamRecvWindowSize() int32 {
if s.MaxUploadBufferPerStream > 0 {
return s.MaxUploadBufferPerStream
}
return 1 << 20
}
func (s *http2Server) maxReadFrameSize() uint32 {
if v := s.MaxReadFrameSize; v >= http2minMaxFrameSize && v <= http2maxFrameSize {
return v
}
return http2defaultMaxReadFrameSize
}
func (s *http2Server) maxConcurrentStreams() uint32 {
if v := s.MaxConcurrentStreams; v > 0 {
return v
}
return http2defaultMaxStreams
}
type http2serverInternalState struct {
mu sync.Mutex
activeConns map[*http2serverConn]struct{}
}
func (s *http2serverInternalState) registerConn(sc *http2serverConn) {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
s.activeConns[sc] = struct{}{}
s.mu.Unlock()
}
func (s *http2serverInternalState) unregisterConn(sc *http2serverConn) {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
delete(s.activeConns, sc)
s.mu.Unlock()
}
func (s *http2serverInternalState) startGracefulShutdown() {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
for sc := range s.activeConns {
sc.startGracefulShutdown()
}
s.mu.Unlock()
}
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
//
// ConfigureServer must be called before s begins serving.
func http2ConfigureServer(s *Server, conf *http2Server) error {
if s == nil {
panic("nil *http.Server")
}
if conf == nil {
conf = new(http2Server)
}
conf.state = &http2serverInternalState{activeConns: make(map[*http2serverConn]struct{})}
if h1, h2 := s, conf; h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout
} else {
h2.IdleTimeout = h1.ReadTimeout
}
}
s.RegisterOnShutdown(conf.state.startGracefulShutdown)
if s.TLSConfig == nil {
s.TLSConfig = new(tls.Config)
} else if s.TLSConfig.CipherSuites != nil {
// If they already provided a CipherSuite list, return
// an error if it has a bad order or is missing
// ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
haveRequired := false
sawBad := false
for i, cs := range s.TLSConfig.CipherSuites {
switch cs {
case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
// Alternative MTI cipher to not discourage ECDSA-only servers.
// See http://golang.org/cl/30721 for further information.
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
haveRequired = true
}
if http2isBadCipher(cs) {
sawBad = true
} else if sawBad {
return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs)
}
}
if !haveRequired {
return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.")
}
}
// Note: not setting MinVersion to tls.VersionTLS12,
// as we don't want to interfere with HTTP/1.1 traffic
// on the user's server. We enforce TLS 1.2 later once
// we accept a connection. Ideally this should be done
// during next-proto selection, but using TLS <1.2 with
// HTTP/2 is still the client's bug.
s.TLSConfig.PreferServerCipherSuites = true
haveNPN := false
for _, p := range s.TLSConfig.NextProtos {
if p == http2NextProtoTLS {
haveNPN = true
break
}
}
if !haveNPN {
s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, http2NextProtoTLS)
}
if s.TLSNextProto == nil {
s.TLSNextProto = map[string]func(*Server, *tls.Conn, Handler){}
}
protoHandler := func(hs *Server, c *tls.Conn, h Handler) {
if http2testHookOnConn != nil {
http2testHookOnConn()
}
conf.ServeConn(c, &http2ServeConnOpts{
Handler: h,
BaseConfig: hs,
})
}
s.TLSNextProto[http2NextProtoTLS] = protoHandler
return nil
}
// ServeConnOpts are options for the Server.ServeConn method.
type http2ServeConnOpts struct {
// BaseConfig optionally sets the base configuration
// for values. If nil, defaults are used.
BaseConfig *Server
// Handler specifies which handler to use for processing
// requests. If nil, BaseConfig.Handler is used. If BaseConfig
// or BaseConfig.Handler is nil, http.DefaultServeMux is used.
Handler Handler
}
func (o *http2ServeConnOpts) baseConfig() *Server {
if o != nil && o.BaseConfig != nil {
return o.BaseConfig
}
return new(Server)
}
func (o *http2ServeConnOpts) handler() Handler {
if o != nil {
if o.Handler != nil {
return o.Handler
}
if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
return o.BaseConfig.Handler
}
}
return DefaultServeMux
}
// ServeConn serves HTTP/2 requests on the provided connection and
// blocks until the connection is no longer readable.
//
// ServeConn starts speaking HTTP/2 assuming that c has not had any
// reads or writes. It writes its initial settings frame and expects
// to be able to read the preface and settings frame from the
// client. If c has a ConnectionState method like a *tls.Conn, the
// ConnectionState is used to verify the TLS ciphersuite and to set
// the Request.TLS field in Handlers.
//
// ServeConn does not support h2c by itself. Any h2c support must be
// implemented in terms of providing a suitably-behaving net.Conn.
//
// The opts parameter is optional. If nil, default values are used.
func (s *http2Server) ServeConn(c net.Conn, opts *http2ServeConnOpts) {
baseCtx, cancel := http2serverConnBaseContext(c, opts)
defer cancel()
sc := &http2serverConn{
srv: s,
hs: opts.baseConfig(),
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
bw: http2newBufferedWriter(c),
handler: opts.handler(),
streams: make(map[uint32]*http2stream),
readFrameCh: make(chan http2readFrameResult),
wantWriteFrameCh: make(chan http2FrameWriteRequest, 8),
serveMsgCh: make(chan interface{}, 8),
wroteFrameCh: make(chan http2frameWriteResult, 1), // buffered; one send in writeFrameAsync
bodyReadCh: make(chan http2bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}),
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
advMaxStreams: s.maxConcurrentStreams(),
initialStreamSendWindowSize: http2initialWindowSize,
maxFrameSize: http2initialMaxFrameSize,
headerTableSize: http2initialHeaderTableSize,
serveG: http2newGoroutineLock(),
pushEnabled: true,
}
s.state.registerConn(sc)
defer s.state.unregisterConn(sc)
// The net/http package sets the write deadline from the
// http.Server.WriteTimeout during the TLS handshake, but then
// passes the connection off to us with the deadline already set.
// Write deadlines are set per stream in serverConn.newStream.
// Disarm the net.Conn write deadline here.
if sc.hs.WriteTimeout != 0 {
sc.conn.SetWriteDeadline(time.Time{})
}
if s.NewWriteScheduler != nil {
sc.writeSched = s.NewWriteScheduler()
} else {
sc.writeSched = http2NewRandomWriteScheduler()
}
// These start at the RFC-specified defaults. If there is a higher
// configured value for inflow, that will be updated when we send a
// WINDOW_UPDATE shortly after sending SETTINGS.
sc.flow.add(http2initialWindowSize)
sc.inflow.add(http2initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
fr := http2NewFramer(sc.bw, c)
fr.ReadMetaHeaders = hpack.NewDecoder(http2initialHeaderTableSize, nil)
fr.MaxHeaderListSize = sc.maxHeaderListSize()
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
sc.framer = fr
if tc, ok := c.(http2connectionStater); ok {
sc.tlsState = new(tls.ConnectionState)
*sc.tlsState = tc.ConnectionState()
// 9.2 Use of TLS Features
// An implementation of HTTP/2 over TLS MUST use TLS
// 1.2 or higher with the restrictions on feature set
// and cipher suite described in this section. Due to
// implementation limitations, it might not be
// possible to fail TLS negotiation. An endpoint MUST
// immediately terminate an HTTP/2 connection that
// does not meet the TLS requirements described in
// this section with a connection error (Section
// 5.4.1) of type INADEQUATE_SECURITY.
if sc.tlsState.Version < tls.VersionTLS12 {
sc.rejectConn(http2ErrCodeInadequateSecurity, "TLS version too low")
return
}
if sc.tlsState.ServerName == "" {
// Client must use SNI, but we don't enforce that anymore,
// since it was causing problems when connecting to bare IP
// addresses during development.
//
// TODO: optionally enforce? Or enforce at the time we receive
// a new request, and verify the ServerName matches the :authority?
// But that precludes proxy situations, perhaps.
//
// So for now, do nothing here again.
}
if !s.PermitProhibitedCipherSuites && http2isBadCipher(sc.tlsState.CipherSuite) {
// "Endpoints MAY choose to generate a connection error
// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
// the prohibited cipher suites are negotiated."
//
// We choose that. In my opinion, the spec is weak
// here. It also says both parties must support at least
// TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
// excuses here. If we really must, we could allow an
// "AllowInsecureWeakCiphers" option on the server later.
// Let's see how it plays out first.
sc.rejectConn(http2ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
return
}
}
if hook := http2testHookGetServerConn; hook != nil {
hook(sc)
}
sc.serve()
}
func http2serverConnBaseContext(c net.Conn, opts *http2ServeConnOpts) (ctx context.Context, cancel func()) {
ctx, cancel = context.WithCancel(context.Background())
ctx = context.WithValue(ctx, LocalAddrContextKey, c.LocalAddr())
if hs := opts.baseConfig(); hs != nil {
ctx = context.WithValue(ctx, ServerContextKey, hs)
}
return
}
func (sc *http2serverConn) rejectConn(err http2ErrCode, debug string) {
sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
// ignoring errors. hanging up anyway.
sc.framer.WriteGoAway(0, err, []byte(debug))
sc.bw.Flush()
sc.conn.Close()
}
type http2serverConn struct {
// Immutable:
srv *http2Server
hs *Server
conn net.Conn
bw *http2bufferedWriter // writing to conn
handler Handler
baseCtx context.Context
framer *http2Framer
doneServing chan struct{} // closed when serverConn.serve ends
readFrameCh chan http2readFrameResult // written by serverConn.readFrames
wantWriteFrameCh chan http2FrameWriteRequest // from handlers -> serve
wroteFrameCh chan http2frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
bodyReadCh chan http2bodyReadMsg // from handlers -> serve
serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
flow http2flow // conn-wide (not stream-specific) outbound flow control
inflow http2flow // conn-wide inbound flow control
tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string
writeSched http2WriteScheduler
// Everything following is owned by the serve loop; use serveG.check():
serveG http2goroutineLock // used to verify funcs are on serve()
pushEnabled bool
sawFirstSettings bool // got the initial SETTINGS frame after the preface
needToSendSettingsAck bool
unackedSettings int // how many SETTINGS have we sent without ACKs?
clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
curClientStreams uint32 // number of open streams initiated by the client
curPushedStreams uint32 // number of open streams initiated by server push
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
streams map[uint32]*http2stream
initialStreamSendWindowSize int32
maxFrameSize int32
headerTableSize uint32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
writingFrame bool // started writing a frame (on serve goroutine or separate)
writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
needsFrameFlush bool // last frame write wasn't a flush
inGoAway bool // we've started to or sent GOAWAY
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write
goAwayCode http2ErrCode
shutdownTimer *time.Timer // nil until used
idleTimer *time.Timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
hpackEncoder *hpack.Encoder
// Used by startGracefulShutdown.
shutdownOnce sync.Once
}
func (sc *http2serverConn) maxHeaderListSize() uint32 {
n := sc.hs.MaxHeaderBytes
if n <= 0 {
n = DefaultMaxHeaderBytes
}
// http2's count is in a slightly different unit and includes 32 bytes per pair.
// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
const perFieldOverhead = 32 // per http2 spec
const typicalHeaders = 10 // conservative
return uint32(n + typicalHeaders*perFieldOverhead)
}
func (sc *http2serverConn) curOpenStreams() uint32 {
sc.serveG.check()
return sc.curClientStreams + sc.curPushedStreams
}
// stream represents a stream. This is the minimal metadata needed by
// the serve goroutine. Most of the actual stream state is owned by
// the http.Handler's goroutine in the responseWriter. Because the
// responseWriter's responseWriterState is recycled at the end of a
// handler, this struct intentionally has no pointer to the
// *responseWriter{,State} itself, as the Handler ending nils out the
// responseWriter's state field.
type http2stream struct {
// immutable:
sc *http2serverConn
id uint32
body *http2pipe // non-nil if expecting DATA frames
cw http2closeWaiter // closed wait stream transitions to closed state
ctx context.Context
cancelCtx func()
// owned by serverConn's serve loop:
bodyBytes int64 // body bytes seen so far
declBodyBytes int64 // or -1 if undeclared
flow http2flow // limits writing from Handler to client
inflow http2flow // what the client is allowed to POST/etc to us
parent *http2stream // or nil
numTrailerValues int64
weight uint8
state http2streamState
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
gotTrailerHeader bool // HEADER frame for trailers was seen
wroteHeaders bool // whether we wrote headers (not status 100)
writeDeadline *time.Timer // nil if unused
trailer Header // accumulated trailers
reqTrailer Header // handler's Request.Trailer
}
func (sc *http2serverConn) Framer() *http2Framer { return sc.framer }
func (sc *http2serverConn) CloseConn() error { return sc.conn.Close() }
func (sc *http2serverConn) Flush() error { return sc.bw.Flush() }
func (sc *http2serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
return sc.hpackEncoder, &sc.headerWriteBuf
}
func (sc *http2serverConn) state(streamID uint32) (http2streamState, *http2stream) {
sc.serveG.check()
// http://tools.ietf.org/html/rfc7540#section-5.1
if st, ok := sc.streams[streamID]; ok {
return st.state, st
}
// "The first use of a new stream identifier implicitly closes all
// streams in the "idle" state that might have been initiated by
// that peer with a lower-valued stream identifier. For example, if
// a client sends a HEADERS frame on stream 7 without ever sending a
// frame on stream 5, then stream 5 transitions to the "closed"
// state when the first frame for stream 7 is sent or received."
if streamID%2 == 1 {
if streamID <= sc.maxClientStreamID {
return http2stateClosed, nil
}
} else {
if streamID <= sc.maxPushPromiseID {
return http2stateClosed, nil
}
}
return http2stateIdle, nil
}
// setConnState calls the net/http ConnState hook for this connection, if configured.
// Note that the net/http package does StateNew and StateClosed for us.
// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
func (sc *http2serverConn) setConnState(state ConnState) {
if sc.hs.ConnState != nil {
sc.hs.ConnState(sc.conn, state)
}
}
func (sc *http2serverConn) vlogf(format string, args ...interface{}) {
if http2VerboseLogs {
sc.logf(format, args...)
}
}
func (sc *http2serverConn) logf(format string, args ...interface{}) {
if lg := sc.hs.ErrorLog; lg != nil {
lg.Printf(format, args...)
} else {
log.Printf(format, args...)
}
}
// errno returns v's underlying uintptr, else 0.
//
// TODO: remove this helper function once http2 can use build
// tags. See comment in isClosedConnError.
func http2errno(v error) uintptr {
if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
return uintptr(rv.Uint())
}
return 0
}
// isClosedConnError reports whether err is an error from use of a closed
// network connection.
func http2isClosedConnError(err error) bool {
if err == nil {
return false
}
// TODO: remove this string search and be more like the Windows
// case below. That might involve modifying the standard library
// to return better error types.
str := err.Error()
if strings.Contains(str, "use of closed network connection") {
return true
}
// TODO(bradfitz): x/tools/cmd/bundle doesn't really support
// build tags, so I can't make an http2_windows.go file with
// Windows-specific stuff. Fix that and move this, once we
// have a way to bundle this into std's net/http somehow.
if runtime.GOOS == "windows" {
if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
const WSAECONNABORTED = 10053
const WSAECONNRESET = 10054
if n := http2errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
return true
}
}
}
}
return false
}
func (sc *http2serverConn) condlogf(err error, format string, args ...interface{}) {
if err == nil {
return
}
if err == io.EOF || err == io.ErrUnexpectedEOF || http2isClosedConnError(err) || err == http2errPrefaceTimeout {
// Boring, expected errors.
sc.vlogf(format, args...)
} else {
sc.logf(format, args...)
}
}
func (sc *http2serverConn) canonicalHeader(v string) string {
sc.serveG.check()
http2buildCommonHeaderMapsOnce()
cv, ok := http2commonCanonHeader[v]
if ok {
return cv
}
cv, ok = sc.canonHeader[v]
if ok {
return cv
}
if sc.canonHeader == nil {
sc.canonHeader = make(map[string]string)
}
cv = CanonicalHeaderKey(v)
sc.canonHeader[v] = cv
return cv
}
type http2readFrameResult struct {
f http2Frame // valid until readMore is called
err error
// readMore should be called once the consumer no longer needs or
// retains f. After readMore, f is invalid and more frames can be
// read.
readMore func()
}
// readFrames is the loop that reads incoming frames.
// It takes care to only read one frame at a time, blocking until the
// consumer is done with the frame.
// It's run on its own goroutine.
func (sc *http2serverConn) readFrames() {
gate := make(http2gate)
gateDone := gate.Done
for {
f, err := sc.framer.ReadFrame()
select {
case sc.readFrameCh <- http2readFrameResult{f, err, gateDone}:
case <-sc.doneServing:
return
}
select {
case <-gate:
case <-sc.doneServing:
return
}
if http2terminalReadFrameError(err) {
return
}
}
}
// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
type http2frameWriteResult struct {
wr http2FrameWriteRequest // what was written (or attempted)
err error // result of the writeFrame call
}
// writeFrameAsync runs in its own goroutine and writes a single frame
// and then reports when it's done.
// At most one goroutine can be running writeFrameAsync at a time per
// serverConn.
func (sc *http2serverConn) writeFrameAsync(wr http2FrameWriteRequest) {
err := wr.write.writeFrame(sc)
sc.wroteFrameCh <- http2frameWriteResult{wr, err}
}
func (sc *http2serverConn) closeAllStreamsOnConnClose() {
sc.serveG.check()
for _, st := range sc.streams {
sc.closeStream(st, http2errClientDisconnected)
}
}
func (sc *http2serverConn) stopShutdownTimer() {
sc.serveG.check()
if t := sc.shutdownTimer; t != nil {
t.Stop()
}
}
func (sc *http2serverConn) notePanic() {
// Note: this is for serverConn.serve panicking, not http.Handler code.
if http2testHookOnPanicMu != nil {
http2testHookOnPanicMu.Lock()
defer http2testHookOnPanicMu.Unlock()
}
if http2testHookOnPanic != nil {
if e := recover(); e != nil {
if http2testHookOnPanic(sc, e) {
panic(e)
}
}
}
}
func (sc *http2serverConn) serve() {
sc.serveG.check()
defer sc.notePanic()
defer sc.conn.Close()
defer sc.closeAllStreamsOnConnClose()
defer sc.stopShutdownTimer()
defer close(sc.doneServing) // unblocks handlers trying to send
if http2VerboseLogs {
sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
}
sc.writeFrame(http2FrameWriteRequest{
write: http2writeSettings{
{http2SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
{http2SettingMaxConcurrentStreams, sc.advMaxStreams},
{http2SettingMaxHeaderListSize, sc.maxHeaderListSize()},
{http2SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
},
})
sc.unackedSettings++
// Each connection starts with intialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens.
if diff := sc.srv.initialConnRecvWindowSize() - http2initialWindowSize; diff > 0 {
sc.sendWindowUpdate(nil, int(diff))
}
if err := sc.readPreface(); err != nil {
sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
return
}
// Now that we've got the preface, get us out of the
// "StateNew" state. We can't go directly to idle, though.
// Active means we read some data and anticipate a request. We'll
// do another Active when we get a HEADERS frame.
sc.setConnState(StateActive)
sc.setConnState(StateIdle)
if sc.srv.IdleTimeout != 0 {
sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
}
go sc.readFrames() // closed by defer sc.conn.Close above
settingsTimer := time.AfterFunc(http2firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
loopNum := 0
for {
loopNum++
select {
case wr := <-sc.wantWriteFrameCh:
if se, ok := wr.write.(http2StreamError); ok {
sc.resetStream(se)
break
}
sc.writeFrame(wr)
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
if !sc.processFrameFromReader(res) {
return
}
res.readMore()
if settingsTimer != nil {
settingsTimer.Stop()
settingsTimer = nil
}
case m := <-sc.bodyReadCh:
sc.noteBodyRead(m.st, m.n)
case msg := <-sc.serveMsgCh:
switch v := msg.(type) {
case func(int):
v(loopNum) // for testing
case *http2serverMessage:
switch v {
case http2settingsTimerMsg:
sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
return
case http2idleTimerMsg:
sc.vlogf("connection is idle")
sc.goAway(http2ErrCodeNo)
case http2shutdownTimerMsg:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
case http2gracefulShutdownMsg:
sc.startGracefulShutdownInternal()
default:
panic("unknown timer")
}
case *http2startPushRequest:
sc.startPush(v)
default:
panic(fmt.Sprintf("unexpected type %T", v))
}
}
// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
// with no error code (graceful shutdown), don't start the timer until
// all open streams have been completed.
sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
gracefulShutdownComplete := sc.goAwayCode == http2ErrCodeNo && sc.curOpenStreams() == 0
if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != http2ErrCodeNo || gracefulShutdownComplete) {
sc.shutDownIn(http2goAwayTimeout)
}
}
}
func (sc *http2serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
select {
case <-sc.doneServing:
case <-sharedCh:
close(privateCh)
}
}
type http2serverMessage int
// Message values sent to serveMsgCh.
var (
http2settingsTimerMsg = new(http2serverMessage)
http2idleTimerMsg = new(http2serverMessage)
http2shutdownTimerMsg = new(http2serverMessage)
http2gracefulShutdownMsg = new(http2serverMessage)
)
func (sc *http2serverConn) onSettingsTimer() { sc.sendServeMsg(http2settingsTimerMsg) }
func (sc *http2serverConn) onIdleTimer() { sc.sendServeMsg(http2idleTimerMsg) }
func (sc *http2serverConn) onShutdownTimer() { sc.sendServeMsg(http2shutdownTimerMsg) }
func (sc *http2serverConn) sendServeMsg(msg interface{}) {
sc.serveG.checkNotOn() // NOT
select {
case sc.serveMsgCh <- msg:
case <-sc.doneServing:
}
}
var http2errPrefaceTimeout = errors.New("timeout waiting for client preface")
// readPreface reads the ClientPreface greeting from the peer or
// returns errPrefaceTimeout on timeout, or an error if the greeting
// is invalid.
func (sc *http2serverConn) readPreface() error {
errc := make(chan error, 1)
go func() {
// Read the client preface
buf := make([]byte, len(http2ClientPreface))
if _, err := io.ReadFull(sc.conn, buf); err != nil {
errc <- err
} else if !bytes.Equal(buf, http2clientPreface) {
errc <- fmt.Errorf("bogus greeting %q", buf)
} else {
errc <- nil
}
}()
timer := time.NewTimer(http2prefaceTimeout) // TODO: configurable on *Server?
defer timer.Stop()
select {
case <-timer.C:
return http2errPrefaceTimeout
case err := <-errc:
if err == nil {
if http2VerboseLogs {
sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
}
}
return err
}
}
var http2errChanPool = sync.Pool{
New: func() interface{} { return make(chan error, 1) },
}
var http2writeDataPool = sync.Pool{
New: func() interface{} { return new(http2writeData) },
}
// writeDataFromHandler writes DATA response frames from a handler on
// the given stream.
func (sc *http2serverConn) writeDataFromHandler(stream *http2stream, data []byte, endStream bool) error {
ch := http2errChanPool.Get().(chan error)
writeArg := http2writeDataPool.Get().(*http2writeData)
*writeArg = http2writeData{stream.id, data, endStream}
err := sc.writeFrameFromHandler(http2FrameWriteRequest{
write: writeArg,
stream: stream,
done: ch,
})
if err != nil {
return err
}
var frameWriteDone bool // the frame write is done (successfully or not)
select {
case err = <-ch:
frameWriteDone = true
case <-sc.doneServing:
return http2errClientDisconnected
case <-stream.cw:
// If both ch and stream.cw were ready (as might
// happen on the final Write after an http.Handler
// ends), prefer the write result. Otherwise this
// might just be us successfully closing the stream.
// The writeFrameAsync and serve goroutines guarantee
// that the ch send will happen before the stream.cw
// close.
select {
case err = <-ch:
frameWriteDone = true
default:
return http2errStreamClosed
}
}
http2errChanPool.Put(ch)
if frameWriteDone {
http2writeDataPool.Put(writeArg)
}
return err
}
// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
// if the connection has gone away.
//
// This must not be run from the serve goroutine itself, else it might
// deadlock writing to sc.wantWriteFrameCh (which is only mildly
// buffered and is read by serve itself). If you're on the serve
// goroutine, call writeFrame instead.
func (sc *http2serverConn) writeFrameFromHandler(wr http2FrameWriteRequest) error {
sc.serveG.checkNotOn() // NOT
select {
case sc.wantWriteFrameCh <- wr:
return nil
case <-sc.doneServing:
// Serve loop is gone.
// Client has closed their connection to the server.
return http2errClientDisconnected
}
}
// writeFrame schedules a frame to write and sends it if there's nothing
// already being written.
//
// There is no pushback here (the serve goroutine never blocks). It's
// the http.Handlers that block, waiting for their previous frames to
// make it onto the wire
//
// If you're not on the serve goroutine, use writeFrameFromHandler instead.
func (sc *http2serverConn) writeFrame(wr http2FrameWriteRequest) {
sc.serveG.check()
// If true, wr will not be written and wr.done will not be signaled.
var ignoreWrite bool
// We are not allowed to write frames on closed streams. RFC 7540 Section
// 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
// a closed stream." Our server never sends PRIORITY, so that exception
// does not apply.
//
// The serverConn might close an open stream while the stream's handler
// is still running. For example, the server might close a stream when it
// receives bad data from the client. If this happens, the handler might
// attempt to write a frame after the stream has been closed (since the
// handler hasn't yet been notified of the close). In this case, we simply
// ignore the frame. The handler will notice that the stream is closed when
// it waits for the frame to be written.
//
// As an exception to this rule, we allow sending RST_STREAM after close.
// This allows us to immediately reject new streams without tracking any
// state for those streams (except for the queued RST_STREAM frame). This
// may result in duplicate RST_STREAMs in some cases, but the client should
// ignore those.
if wr.StreamID() != 0 {
_, isReset := wr.write.(http2StreamError)
if state, _ := sc.state(wr.StreamID()); state == http2stateClosed && !isReset {
ignoreWrite = true
}
}
// Don't send a 100-continue response if we've already sent headers.
// See golang.org/issue/14030.
switch wr.write.(type) {
case *http2writeResHeaders:
wr.stream.wroteHeaders = true
case http2write100ContinueHeadersFrame:
if wr.stream.wroteHeaders {
// We do not need to notify wr.done because this frame is
// never written with wr.done != nil.
if wr.done != nil {
panic("wr.done != nil for write100ContinueHeadersFrame")
}
ignoreWrite = true
}
}
if !ignoreWrite {
sc.writeSched.Push(wr)
}
sc.scheduleFrameWrite()
}
// startFrameWrite starts a goroutine to write wr (in a separate
// goroutine since that might block on the network), and updates the
// serve goroutine's state about the world, updated from info in wr.
func (sc *http2serverConn) startFrameWrite(wr http2FrameWriteRequest) {
sc.serveG.check()
if sc.writingFrame {
panic("internal error: can only be writing one frame at a time")
}
st := wr.stream
if st != nil {
switch st.state {
case http2stateHalfClosedLocal:
switch wr.write.(type) {
case http2StreamError, http2handlerPanicRST, http2writeWindowUpdate:
// RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
// in this state. (We never send PRIORITY from the server, so that is not checked.)
default:
panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
}
case http2stateClosed:
panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
}
}
if wpp, ok := wr.write.(*http2writePushPromise); ok {
var err error
wpp.promisedID, err = wpp.allocatePromisedID()
if err != nil {
sc.writingFrameAsync = false
wr.replyToWriter(err)
return
}
}
sc.writingFrame = true
sc.needsFrameFlush = true
if wr.write.staysWithinBuffer(sc.bw.Available()) {
sc.writingFrameAsync = false
err := wr.write.writeFrame(sc)
sc.wroteFrame(http2frameWriteResult{wr, err})
} else {
sc.writingFrameAsync = true
go sc.writeFrameAsync(wr)
}
}
// errHandlerPanicked is the error given to any callers blocked in a read from
// Request.Body when the main goroutine panics. Since most handlers read in the
// main ServeHTTP goroutine, this will show up rarely.
var http2errHandlerPanicked = errors.New("http2: handler panicked")
// wroteFrame is called on the serve goroutine with the result of
// whatever happened on writeFrameAsync.
func (sc *http2serverConn) wroteFrame(res http2frameWriteResult) {
sc.serveG.check()
if !sc.writingFrame {
panic("internal error: expected to be already writing a frame")
}
sc.writingFrame = false
sc.writingFrameAsync = false
wr := res.wr
if http2writeEndsStream(wr.write) {
st := wr.stream
if st == nil {
panic("internal error: expecting non-nil stream")
}
switch st.state {
case http2stateOpen:
// Here we would go to stateHalfClosedLocal in
// theory, but since our handler is done and
// the net/http package provides no mechanism
// for closing a ResponseWriter while still
// reading data (see possible TODO at top of
// this file), we go into closed state here
// anyway, after telling the peer we're
// hanging up on them. We'll transition to
// stateClosed after the RST_STREAM frame is
// written.
st.state = http2stateHalfClosedLocal
// Section 8.1: a server MAY request that the client abort
// transmission of a request without error by sending a
// RST_STREAM with an error code of NO_ERROR after sending
// a complete response.
sc.resetStream(http2streamError(st.id, http2ErrCodeNo))
case http2stateHalfClosedRemote:
sc.closeStream(st, http2errHandlerComplete)
}
} else {
switch v := wr.write.(type) {
case http2StreamError:
// st may be unknown if the RST_STREAM was generated to reject bad input.
if st, ok := sc.streams[v.StreamID]; ok {
sc.closeStream(st, v)
}
case http2handlerPanicRST:
sc.closeStream(wr.stream, http2errHandlerPanicked)
}
}
// Reply (if requested) to unblock the ServeHTTP goroutine.
wr.replyToWriter(res.err)
sc.scheduleFrameWrite()
}
// scheduleFrameWrite tickles the frame writing scheduler.
//
// If a frame is already being written, nothing happens. This will be called again
// when the frame is done being written.
//
// If a frame isn't being written we need to send one, the best frame
// to send is selected, preferring first things that aren't
// stream-specific (e.g. ACKing settings), and then finding the
// highest priority stream.
//
// If a frame isn't being written and there's nothing else to send, we
// flush the write buffer.
func (sc *http2serverConn) scheduleFrameWrite() {
sc.serveG.check()
if sc.writingFrame || sc.inFrameScheduleLoop {
return
}
sc.inFrameScheduleLoop = true
for !sc.writingFrameAsync {
if sc.needToSendGoAway {
sc.needToSendGoAway = false
sc.startFrameWrite(http2FrameWriteRequest{
write: &http2writeGoAway{
maxStreamID: sc.maxClientStreamID,
code: sc.goAwayCode,
},
})
continue
}
if sc.needToSendSettingsAck {
sc.needToSendSettingsAck = false
sc.startFrameWrite(http2FrameWriteRequest{write: http2writeSettingsAck{}})
continue
}
if !sc.inGoAway || sc.goAwayCode == http2ErrCodeNo {
if wr, ok := sc.writeSched.Pop(); ok {
sc.startFrameWrite(wr)
continue
}
}
if sc.needsFrameFlush {
sc.startFrameWrite(http2FrameWriteRequest{write: http2flushFrameWriter{}})
sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
continue
}
break
}
sc.inFrameScheduleLoop = false
}
// startGracefulShutdown gracefully shuts down a connection. This
// sends GOAWAY with ErrCodeNo to tell the client we're gracefully
// shutting down. The connection isn't closed until all current
// streams are done.
//
// startGracefulShutdown returns immediately; it does not wait until
// the connection has shut down.
func (sc *http2serverConn) startGracefulShutdown() {
sc.serveG.checkNotOn() // NOT
sc.shutdownOnce.Do(func() { sc.sendServeMsg(http2gracefulShutdownMsg) })
}
// After sending GOAWAY, the connection will close after goAwayTimeout.
// If we close the connection immediately after sending GOAWAY, there may
// be unsent data in our kernel receive buffer, which will cause the kernel
// to send a TCP RST on close() instead of a FIN. This RST will abort the
// connection immediately, whether or not the client had received the GOAWAY.
//
// Ideally we should delay for at least 1 RTT + epsilon so the client has
// a chance to read the GOAWAY and stop sending messages. Measuring RTT
// is hard, so we approximate with 1 second. See golang.org/issue/18701.
//
// This is a var so it can be shorter in tests, where all requests uses the
// loopback interface making the expected RTT very small.
//
// TODO: configurable?
var http2goAwayTimeout = 1 * time.Second
func (sc *http2serverConn) startGracefulShutdownInternal() {
sc.goAway(http2ErrCodeNo)
}
func (sc *http2serverConn) goAway(code http2ErrCode) {
sc.serveG.check()
if sc.inGoAway {
return
}
sc.inGoAway = true
sc.needToSendGoAway = true
sc.goAwayCode = code
sc.scheduleFrameWrite()
}
func (sc *http2serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
}
func (sc *http2serverConn) resetStream(se http2StreamError) {
sc.serveG.check()
sc.writeFrame(http2FrameWriteRequest{write: se})
if st, ok := sc.streams[se.StreamID]; ok {
st.resetQueued = true
}
}
// processFrameFromReader processes the serve loop's read from readFrameCh from the
// frame-reading goroutine.
// processFrameFromReader returns whether the connection should be kept open.
func (sc *http2serverConn) processFrameFromReader(res http2readFrameResult) bool {
sc.serveG.check()
err := res.err
if err != nil {
if err == http2ErrFrameTooLarge {
sc.goAway(http2ErrCodeFrameSize)
return true // goAway will close the loop
}
clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || http2isClosedConnError(err)
if clientGone {
// TODO: could we also get into this state if
// the peer does a half close
// (e.g. CloseWrite) because they're done
// sending frames but they're still wanting
// our open replies? Investigate.
// TODO: add CloseWrite to crypto/tls.Conn first
// so we have a way to test this? I suppose
// just for testing we could have a non-TLS mode.
return false
}
} else {
f := res.f
if http2VerboseLogs {
sc.vlogf("http2: server read frame %v", http2summarizeFrame(f))
}
err = sc.processFrame(f)
if err == nil {
return true
}
}
switch ev := err.(type) {
case http2StreamError:
sc.resetStream(ev)
return true
case http2goAwayFlowError:
sc.goAway(http2ErrCodeFlowControl)
return true
case http2ConnectionError:
sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
sc.goAway(http2ErrCode(ev))
return true // goAway will handle shutdown
default:
if res.err != nil {
sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
} else {
sc.logf("http2: server closing client connection: %v", err)
}
return false
}
}
func (sc *http2serverConn) processFrame(f http2Frame) error {
sc.serveG.check()
// First frame received must be SETTINGS.
if !sc.sawFirstSettings {
if _, ok := f.(*http2SettingsFrame); !ok {
return http2ConnectionError(http2ErrCodeProtocol)
}
sc.sawFirstSettings = true
}
switch f := f.(type) {
case *http2SettingsFrame:
return sc.processSettings(f)
case *http2MetaHeadersFrame:
return sc.processHeaders(f)
case *http2WindowUpdateFrame:
return sc.processWindowUpdate(f)
case *http2PingFrame:
return sc.processPing(f)
case *http2DataFrame:
return sc.processData(f)
case *http2RSTStreamFrame:
return sc.processResetStream(f)
case *http2PriorityFrame:
return sc.processPriority(f)
case *http2GoAwayFrame:
return sc.processGoAway(f)
case *http2PushPromiseFrame:
// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
return http2ConnectionError(http2ErrCodeProtocol)
default:
sc.vlogf("http2: server ignoring frame: %v", f.Header())
return nil
}
}
func (sc *http2serverConn) processPing(f *http2PingFrame) error {
sc.serveG.check()
if f.IsAck() {
// 6.7 PING: " An endpoint MUST NOT respond to PING frames
// containing this flag."
return nil
}
if f.StreamID != 0 {
// "PING frames are not associated with any individual
// stream. If a PING frame is received with a stream
// identifier field value other than 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR."
return http2ConnectionError(http2ErrCodeProtocol)
}
if sc.inGoAway && sc.goAwayCode != http2ErrCodeNo {
return nil
}
sc.writeFrame(http2FrameWriteRequest{write: http2writePingAck{f}})
return nil
}
func (sc *http2serverConn) processWindowUpdate(f *http2WindowUpdateFrame) error {
sc.serveG.check()
switch {
case f.StreamID != 0: // stream-level flow control
state, st := sc.state(f.StreamID)
if state == http2stateIdle {
// Section 5.1: "Receiving any frame other than HEADERS
// or PRIORITY on a stream in this state MUST be
// treated as a connection error (Section 5.4.1) of
// type PROTOCOL_ERROR."
return http2ConnectionError(http2ErrCodeProtocol)
}
if st == nil {
// "WINDOW_UPDATE can be sent by a peer that has sent a
// frame bearing the END_STREAM flag. This means that a
// receiver could receive a WINDOW_UPDATE frame on a "half
// closed (remote)" or "closed" stream. A receiver MUST
// NOT treat this as an error, see Section 5.1."
return nil
}
if !st.flow.add(int32(f.Increment)) {
return http2streamError(f.StreamID, http2ErrCodeFlowControl)
}
default: // connection-level flow control
if !sc.flow.add(int32(f.Increment)) {
return http2goAwayFlowError{}
}
}
sc.scheduleFrameWrite()
return nil
}
func (sc *http2serverConn) processResetStream(f *http2RSTStreamFrame) error {
sc.serveG.check()
state, st := sc.state(f.StreamID)
if state == http2stateIdle {
// 6.4 "RST_STREAM frames MUST NOT be sent for a
// stream in the "idle" state. If a RST_STREAM frame
// identifying an idle stream is received, the
// recipient MUST treat this as a connection error
// (Section 5.4.1) of type PROTOCOL_ERROR.
return http2ConnectionError(http2ErrCodeProtocol)
}
if st != nil {
st.cancelCtx()
sc.closeStream(st, http2streamError(f.StreamID, f.ErrCode))
}
return nil
}
func (sc *http2serverConn) closeStream(st *http2stream, err error) {
sc.serveG.check()
if st.state == http2stateIdle || st.state == http2stateClosed {
panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
}
st.state = http2stateClosed
if st.writeDeadline != nil {
st.writeDeadline.Stop()
}
if st.isPushed() {
sc.curPushedStreams--
} else {
sc.curClientStreams--
}
delete(sc.streams, st.id)
if len(sc.streams) == 0 {
sc.setConnState(StateIdle)
if sc.srv.IdleTimeout != 0 {
sc.idleTimer.Reset(sc.srv.IdleTimeout)
}
if http2h1ServerKeepAlivesDisabled(sc.hs) {
sc.startGracefulShutdownInternal()
}
}
if p := st.body; p != nil {
// Return any buffered unread bytes worth of conn-level flow control.
// See golang.org/issue/16481
sc.sendWindowUpdate(nil, p.Len())
p.CloseWithError(err)
}
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
sc.writeSched.CloseStream(st.id)
}
func (sc *http2serverConn) processSettings(f *http2SettingsFrame) error {
sc.serveG.check()
if f.IsAck() {
sc.unackedSettings--
if sc.unackedSettings < 0 {
// Why is the peer ACKing settings we never sent?
// The spec doesn't mention this case, but
// hang up on them anyway.
return http2ConnectionError(http2ErrCodeProtocol)
}
return nil
}
if f.NumSettings() > 100 || f.HasDuplicates() {
// This isn't actually in the spec, but hang up on
// suspiciously large settings frames or those with
// duplicate entries.
return http2ConnectionError(http2ErrCodeProtocol)
}
if err := f.ForeachSetting(sc.processSetting); err != nil {
return err
}
sc.needToSendSettingsAck = true
sc.scheduleFrameWrite()
return nil
}
func (sc *http2serverConn) processSetting(s http2Setting) error {
sc.serveG.check()
if err := s.Valid(); err != nil {
return err
}
if http2VerboseLogs {
sc.vlogf("http2: server processing setting %v", s)
}
switch s.ID {
case http2SettingHeaderTableSize:
sc.headerTableSize = s.Val
sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
case http2SettingEnablePush:
sc.pushEnabled = s.Val != 0
case http2SettingMaxConcurrentStreams:
sc.clientMaxStreams = s.Val
case http2SettingInitialWindowSize:
return sc.processSettingInitialWindowSize(s.Val)
case http2SettingMaxFrameSize:
sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
case http2SettingMaxHeaderListSize:
sc.peerMaxHeaderListSize = s.Val
default:
// Unknown setting: "An endpoint that receives a SETTINGS
// frame with any unknown or unsupported identifier MUST
// ignore that setting."
if http2VerboseLogs {
sc.vlogf("http2: server ignoring unknown setting %v", s)
}
}
return nil
}
func (sc *http2serverConn) processSettingInitialWindowSize(val uint32) error {
sc.serveG.check()
// Note: val already validated to be within range by
// processSetting's Valid call.
// "A SETTINGS frame can alter the initial flow control window
// size for all current streams. When the value of
// SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
// adjust the size of all stream flow control windows that it
// maintains by the difference between the new value and the
// old value."
old := sc.initialStreamSendWindowSize
sc.initialStreamSendWindowSize = int32(val)
growth := int32(val) - old // may be negative
for _, st := range sc.streams {
if !st.flow.add(growth) {
// 6.9.2 Initial Flow Control Window Size
// "An endpoint MUST treat a change to
// SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
// control window to exceed the maximum size as a
// connection error (Section 5.4.1) of type
// FLOW_CONTROL_ERROR."
return http2ConnectionError(http2ErrCodeFlowControl)
}
}
return nil
}
func (sc *http2serverConn) processData(f *http2DataFrame) error {
sc.serveG.check()
if sc.inGoAway && sc.goAwayCode != http2ErrCodeNo {
return nil
}
data := f.Data()
// "If a DATA frame is received whose stream is not in "open"
// or "half closed (local)" state, the recipient MUST respond
// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
id := f.Header().StreamID
state, st := sc.state(id)
if id == 0 || state == http2stateIdle {
// Section 5.1: "Receiving any frame other than HEADERS
// or PRIORITY on a stream in this state MUST be
// treated as a connection error (Section 5.4.1) of
// type PROTOCOL_ERROR."
return http2ConnectionError(http2ErrCodeProtocol)
}
if st == nil || state != http2stateOpen || st.gotTrailerHeader || st.resetQueued {
// This includes sending a RST_STREAM if the stream is
// in stateHalfClosedLocal (which currently means that
// the http.Handler returned, so it's done reading &
// done writing). Try to stop the client from sending
// more DATA.
// But still enforce their connection-level flow control,
// and return any flow control bytes since we're not going
// to consume them.
if sc.inflow.available() < int32(f.Length) {
return http2streamError(id, http2ErrCodeFlowControl)
}
// Deduct the flow control from inflow, since we're
// going to immediately add it back in
// sendWindowUpdate, which also schedules sending the
// frames.
sc.inflow.take(int32(f.Length))
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
if st != nil && st.resetQueued {
// Already have a stream error in flight. Don't send another.
return nil
}
return http2streamError(id, http2ErrCodeStreamClosed)
}
if st.body == nil {
panic("internal error: should have a body in this state")
}
// Sender sending more than they'd declared?
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
// value of a content-length header field does not equal the sum of the
// DATA frame payload lengths that form the body.
return http2streamError(id, http2ErrCodeProtocol)
}
if f.Length > 0 {
// Check whether the client has flow control quota.
if st.inflow.available() < int32(f.Length) {
return http2streamError(id, http2ErrCodeFlowControl)
}
st.inflow.take(int32(f.Length))
if len(data) > 0 {
wrote, err := st.body.Write(data)
if err != nil {
return http2streamError(id, http2ErrCodeStreamClosed)
}
if wrote != len(data) {
panic("internal error: bad Writer")
}
st.bodyBytes += int64(len(data))
}
// Return any padded flow control now, since we won't
// refund it later on body reads.
if pad := int32(f.Length) - int32(len(data)); pad > 0 {
sc.sendWindowUpdate32(nil, pad)
sc.sendWindowUpdate32(st, pad)
}
}
if f.StreamEnded() {
st.endStream()
}
return nil
}
func (sc *http2serverConn) processGoAway(f *http2GoAwayFrame) error {
sc.serveG.check()
if f.ErrCode != http2ErrCodeNo {
sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
} else {
sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
}
sc.startGracefulShutdownInternal()
// http://tools.ietf.org/html/rfc7540#section-6.8
// We should not create any new streams, which means we should disable push.
sc.pushEnabled = false
return nil
}
// isPushed reports whether the stream is server-initiated.
func (st *http2stream) isPushed() bool {
return st.id%2 == 0
}
// endStream closes a Request.Body's pipe. It is called when a DATA
// frame says a request body is over (or after trailers).
func (st *http2stream) endStream() {
sc := st.sc
sc.serveG.check()
if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
st.declBodyBytes, st.bodyBytes))
} else {
st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
st.body.CloseWithError(io.EOF)
}
st.state = http2stateHalfClosedRemote
}
// copyTrailersToHandlerRequest is run in the Handler's goroutine in
// its Request.Body.Read just before it gets io.EOF.
func (st *http2stream) copyTrailersToHandlerRequest() {
for k, vv := range st.trailer {
if _, ok := st.reqTrailer[k]; ok {
// Only copy it over it was pre-declared.
st.reqTrailer[k] = vv
}
}
}
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
// when the stream's WriteTimeout has fired.
func (st *http2stream) onWriteTimeout() {
st.sc.writeFrameFromHandler(http2FrameWriteRequest{write: http2streamError(st.id, http2ErrCodeInternal)})
}
func (sc *http2serverConn) processHeaders(f *http2MetaHeadersFrame) error {
sc.serveG.check()
id := f.StreamID
if sc.inGoAway {
// Ignore.
return nil
}
// http://tools.ietf.org/html/rfc7540#section-5.1.1
// Streams initiated by a client MUST use odd-numbered stream
// identifiers. [...] An endpoint that receives an unexpected
// stream identifier MUST respond with a connection error
// (Section 5.4.1) of type PROTOCOL_ERROR.
if id%2 != 1 {
return http2ConnectionError(http2ErrCodeProtocol)
}
// A HEADERS frame can be used to create a new stream or
// send a trailer for an open one. If we already have a stream
// open, let it process its own HEADERS frame (trailers at this
// point, if it's valid).
if st := sc.streams[f.StreamID]; st != nil {
if st.resetQueued {
// We're sending RST_STREAM to close the stream, so don't bother
// processing this frame.
return nil
}
// RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
// WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
// this state, it MUST respond with a stream error (Section 5.4.2) of
// type STREAM_CLOSED.
if st.state == http2stateHalfClosedRemote {
return http2streamError(id, http2ErrCodeStreamClosed)
}
return st.processTrailerHeaders(f)
}
// [...] The identifier of a newly established stream MUST be
// numerically greater than all streams that the initiating
// endpoint has opened or reserved. [...] An endpoint that
// receives an unexpected stream identifier MUST respond with
// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
if id <= sc.maxClientStreamID {
return http2ConnectionError(http2ErrCodeProtocol)
}
sc.maxClientStreamID = id
if sc.idleTimer != nil {
sc.idleTimer.Stop()
}
// http://tools.ietf.org/html/rfc7540#section-5.1.2
// [...] Endpoints MUST NOT exceed the limit set by their peer. An
// endpoint that receives a HEADERS frame that causes their
// advertised concurrent stream limit to be exceeded MUST treat
// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
// or REFUSED_STREAM.
if sc.curClientStreams+1 > sc.advMaxStreams {
if sc.unackedSettings == 0 {
// They should know better.
return http2streamError(id, http2ErrCodeProtocol)
}
// Assume it's a network race, where they just haven't
// received our last SETTINGS update. But actually
// this can't happen yet, because we don't yet provide
// a way for users to adjust server parameters at
// runtime.
return http2streamError(id, http2ErrCodeRefusedStream)
}
initialState := http2stateOpen
if f.StreamEnded() {
initialState = http2stateHalfClosedRemote
}
st := sc.newStream(id, 0, initialState)
if f.HasPriority() {
if err := http2checkPriority(f.StreamID, f.Priority); err != nil {
return err
}
sc.writeSched.AdjustStream(st.id, f.Priority)
}
rw, req, err := sc.newWriterAndRequest(st, f)
if err != nil {
return err
}
st.reqTrailer = req.Trailer
if st.reqTrailer != nil {
st.trailer = make(Header)
}
st.body = req.Body.(*http2requestBody).pipe // may be nil
st.declBodyBytes = req.ContentLength
handler := sc.handler.ServeHTTP
if f.Truncated {
// Their header list was too long. Send a 431 error.
handler = http2handleHeaderListTooLong
} else if err := http2checkValidHTTP2RequestHeaders(req.Header); err != nil {
handler = http2new400Handler(err)
}
// The net/http package sets the read deadline from the
// http.Server.ReadTimeout during the TLS handshake, but then
// passes the connection off to us with the deadline already
// set. Disarm it here after the request headers are read,
// similar to how the http1 server works. Here it's
// technically more like the http1 Server's ReadHeaderTimeout
// (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout != 0 {
sc.conn.SetReadDeadline(time.Time{})
}
go sc.runHandler(rw, req, handler)
return nil
}
func (st *http2stream) processTrailerHeaders(f *http2MetaHeadersFrame) error {
sc := st.sc
sc.serveG.check()
if st.gotTrailerHeader {
return http2ConnectionError(http2ErrCodeProtocol)
}
st.gotTrailerHeader = true
if !f.StreamEnded() {
return http2streamError(st.id, http2ErrCodeProtocol)
}
if len(f.PseudoFields()) > 0 {
return http2streamError(st.id, http2ErrCodeProtocol)
}
if st.trailer != nil {
for _, hf := range f.RegularFields() {
key := sc.canonicalHeader(hf.Name)
if !httpguts.ValidTrailerHeader(key) {
// TODO: send more details to the peer somehow. But http2 has
// no way to send debug data at a stream level. Discuss with
// HTTP folk.
return http2streamError(st.id, http2ErrCodeProtocol)
}
st.trailer[key] = append(st.trailer[key], hf.Value)
}
}
st.endStream()
return nil
}
func http2checkPriority(streamID uint32, p http2PriorityParam) error {
if streamID == p.StreamDep {
// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
// Section 5.3.3 says that a stream can depend on one of its dependencies,
// so it's only self-dependencies that are forbidden.
return http2streamError(streamID, http2ErrCodeProtocol)
}
return nil
}
func (sc *http2serverConn) processPriority(f *http2PriorityFrame) error {
if sc.inGoAway {
return nil
}
if err := http2checkPriority(f.StreamID, f.http2PriorityParam); err != nil {
return err
}
sc.writeSched.AdjustStream(f.StreamID, f.http2PriorityParam)
return nil
}
func (sc *http2serverConn) newStream(id, pusherID uint32, state http2streamState) *http2stream {
sc.serveG.check()
if id == 0 {
panic("internal error: cannot create stream with id 0")
}
ctx, cancelCtx := context.WithCancel(sc.baseCtx)
st := &http2stream{
sc: sc,
id: id,
state: state,
ctx: ctx,
cancelCtx: cancelCtx,
}
st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.conn = &sc.inflow // link to conn-level counter
st.inflow.add(sc.srv.initialStreamRecvWindowSize())
if sc.hs.WriteTimeout != 0 {
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st
sc.writeSched.OpenStream(st.id, http2OpenStreamOptions{PusherID: pusherID})
if st.isPushed() {
sc.curPushedStreams++
} else {
sc.curClientStreams++
}
if sc.curOpenStreams() == 1 {
sc.setConnState(StateActive)
}
return st
}
func (sc *http2serverConn) newWriterAndRequest(st *http2stream, f *http2MetaHeadersFrame) (*http2responseWriter, *Request, error) {
sc.serveG.check()
rp := http2requestParam{
method: f.PseudoValue("method"),
scheme: f.PseudoValue("scheme"),
authority: f.PseudoValue("authority"),
path: f.PseudoValue("path"),
}
isConnect := rp.method == "CONNECT"
if isConnect {
if rp.path != "" || rp.scheme != "" || rp.authority == "" {
return nil, nil, http2streamError(f.StreamID, http2ErrCodeProtocol)
}
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
// See 8.1.2.6 Malformed Requests and Responses:
//
// Malformed requests or responses that are detected
// MUST be treated as a stream error (Section 5.4.2)
// of type PROTOCOL_ERROR."
//
// 8.1.2.3 Request Pseudo-Header Fields
// "All HTTP/2 requests MUST include exactly one valid
// value for the :method, :scheme, and :path
// pseudo-header fields"
return nil, nil, http2streamError(f.StreamID, http2ErrCodeProtocol)
}
bodyOpen := !f.StreamEnded()
if rp.method == "HEAD" && bodyOpen {
// HEAD requests can't have bodies
return nil, nil, http2streamError(f.StreamID, http2ErrCodeProtocol)
}
rp.header = make(Header)
for _, hf := range f.RegularFields() {
rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
}
if rp.authority == "" {
rp.authority = rp.header.Get("Host")
}
rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
if err != nil {
return nil, nil, err
}
if bodyOpen {
if vv, ok := rp.header["Content-Length"]; ok {
req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
} else {
req.ContentLength = -1
}
req.Body.(*http2requestBody).pipe = &http2pipe{
b: &http2dataBuffer{expected: req.ContentLength},
}
}
return rw, req, nil
}
type http2requestParam struct {
method string
scheme, authority, path string
header Header
}
func (sc *http2serverConn) newWriterAndRequestNoBody(st *http2stream, rp http2requestParam) (*http2responseWriter, *Request, error) {
sc.serveG.check()
var tlsState *tls.ConnectionState // nil if not scheme https
if rp.scheme == "https" {
tlsState = sc.tlsState
}
needsContinue := rp.header.Get("Expect") == "100-continue"
if needsContinue {
rp.header.Del("Expect")
}
// Merge Cookie headers into one "; "-delimited value.
if cookies := rp.header["Cookie"]; len(cookies) > 1 {
rp.header.Set("Cookie", strings.Join(cookies, "; "))
}
// Setup Trailers
var trailer Header
for _, v := range rp.header["Trailer"] {
for _, key := range strings.Split(v, ",") {
key = CanonicalHeaderKey(strings.TrimSpace(key))
switch key {
case "Transfer-Encoding", "Trailer", "Content-Length":
// Bogus. (copy of http1 rules)
// Ignore.
default:
if trailer == nil {
trailer = make(Header)
}
trailer[key] = nil
}
}
}
delete(rp.header, "Trailer")
var url_ *url.URL
var requestURI string
if rp.method == "CONNECT" {
url_ = &url.URL{Host: rp.authority}
requestURI = rp.authority // mimic HTTP/1 server behavior
} else {
var err error
url_, err = url.ParseRequestURI(rp.path)
if err != nil {
return nil, nil, http2streamError(st.id, http2ErrCodeProtocol)
}
requestURI = rp.path
}
body := &http2requestBody{
conn: sc,
stream: st,
needsContinue: needsContinue,
}
req := &Request{
Method: rp.method,
URL: url_,
RemoteAddr: sc.remoteAddrStr,
Header: rp.header,
RequestURI: requestURI,
Proto: "HTTP/2.0",
ProtoMajor: 2,
ProtoMinor: 0,
TLS: tlsState,
Host: rp.authority,
Body: body,
Trailer: trailer,
}
req = req.WithContext(st.ctx)
rws := http2responseWriterStatePool.Get().(*http2responseWriterState)
bwSave := rws.bw
*rws = http2responseWriterState{} // zero all the fields
rws.conn = sc
rws.bw = bwSave
rws.bw.Reset(http2chunkWriter{rws})
rws.stream = st
rws.req = req
rws.body = body
rw := &http2responseWriter{rws: rws}
return rw, req, nil
}
// Run on its own goroutine.
func (sc *http2serverConn) runHandler(rw *http2responseWriter, req *Request, handler func(ResponseWriter, *Request)) {
didPanic := true
defer func() {
rw.rws.stream.cancelCtx()
if didPanic {
e := recover()
sc.writeFrameFromHandler(http2FrameWriteRequest{
write: http2handlerPanicRST{rw.rws.stream.id},
stream: rw.rws.stream,
})
// Same as net/http:
if e != nil && e != ErrAbortHandler {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
}
return
}
rw.handlerDone()
}()
handler(rw, req)
didPanic = false
}
func http2handleHeaderListTooLong(w ResponseWriter, r *Request) {
// 10.5.1 Limits on Header Block Size:
// .. "A server that receives a larger header block than it is
// willing to handle can send an HTTP 431 (Request Header Fields Too
// Large) status code"
const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
w.WriteHeader(statusRequestHeaderFieldsTooLarge)
io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
}
// called from handler goroutines.
// h may be nil.
func (sc *http2serverConn) writeHeaders(st *http2stream, headerData *http2writeResHeaders) error {
sc.serveG.checkNotOn() // NOT on
var errc chan error
if headerData.h != nil {
// If there's a header map (which we don't own), so we have to block on
// waiting for this frame to be written, so an http.Flush mid-handler
// writes out the correct value of keys, before a handler later potentially
// mutates it.
errc = http2errChanPool.Get().(chan error)
}
if err := sc.writeFrameFromHandler(http2FrameWriteRequest{
write: headerData,
stream: st,
done: errc,
}); err != nil {
return err
}
if errc != nil {
select {
case err := <-errc:
http2errChanPool.Put(errc)
return err
case <-sc.doneServing:
return http2errClientDisconnected
case <-st.cw:
return http2errStreamClosed
}
}
return nil
}
// called from handler goroutines.
func (sc *http2serverConn) write100ContinueHeaders(st *http2stream) {
sc.writeFrameFromHandler(http2FrameWriteRequest{
write: http2write100ContinueHeadersFrame{st.id},
stream: st,
})
}
// A bodyReadMsg tells the server loop that the http.Handler read n
// bytes of the DATA from the client on the given stream.
type http2bodyReadMsg struct {
st *http2stream
n int
}
// called from handler goroutines.
// Notes that the handler for the given stream ID read n bytes of its body
// and schedules flow control tokens to be sent.
func (sc *http2serverConn) noteBodyReadFromHandler(st *http2stream, n int, err error) {
sc.serveG.checkNotOn() // NOT on
if n > 0 {
select {
case sc.bodyReadCh <- http2bodyReadMsg{st, n}:
case <-sc.doneServing:
}
}
}
func (sc *http2serverConn) noteBodyRead(st *http2stream, n int) {
sc.serveG.check()
sc.sendWindowUpdate(nil, n) // conn-level
if st.state != http2stateHalfClosedRemote && st.state != http2stateClosed {
// Don't send this WINDOW_UPDATE if the stream is closed
// remotely.
sc.sendWindowUpdate(st, n)
}
}
// st may be nil for conn-level
func (sc *http2serverConn) sendWindowUpdate(st *http2stream, n int) {
sc.serveG.check()
// "The legal range for the increment to the flow control
// window is 1 to 2^31-1 (2,147,483,647) octets."
// A Go Read call on 64-bit machines could in theory read
// a larger Read than this. Very unlikely, but we handle it here
// rather than elsewhere for now.
const maxUint31 = 1<<31 - 1
for n >= maxUint31 {
sc.sendWindowUpdate32(st, maxUint31)
n -= maxUint31
}
sc.sendWindowUpdate32(st, int32(n))
}
// st may be nil for conn-level
func (sc *http2serverConn) sendWindowUpdate32(st *http2stream, n int32) {
sc.serveG.check()
if n == 0 {
return
}
if n < 0 {
panic("negative update")
}
var streamID uint32
if st != nil {
streamID = st.id
}
sc.writeFrame(http2FrameWriteRequest{
write: http2writeWindowUpdate{streamID: streamID, n: uint32(n)},
stream: st,
})
var ok bool
if st == nil {
ok = sc.inflow.add(n)
} else {
ok = st.inflow.add(n)
}
if !ok {
panic("internal error; sent too many window updates without decrements?")
}
}
// requestBody is the Handler's Request.Body type.
// Read and Close may be called concurrently.
type http2requestBody struct {
stream *http2stream
conn *http2serverConn
closed bool // for use by Close only
sawEOF bool // for use by Read only
pipe *http2pipe // non-nil if we have a HTTP entity message body
needsContinue bool // need to send a 100-continue
}
func (b *http2requestBody) Close() error {
if b.pipe != nil && !b.closed {
b.pipe.BreakWithError(http2errClosedBody)
}
b.closed = true
return nil
}
func (b *http2requestBody) Read(p []byte) (n int, err error) {
if b.needsContinue {
b.needsContinue = false
b.conn.write100ContinueHeaders(b.stream)
}
if b.pipe == nil || b.sawEOF {
return 0, io.EOF
}
n, err = b.pipe.Read(p)
if err == io.EOF {
b.sawEOF = true
}
if b.conn == nil && http2inTests {
return
}
b.conn.noteBodyReadFromHandler(b.stream, n, err)
return
}
// responseWriter is the http.ResponseWriter implementation. It's
// intentionally small (1 pointer wide) to minimize garbage. The
// responseWriterState pointer inside is zeroed at the end of a
// request (in handlerDone) and calls on the responseWriter thereafter
// simply crash (caller's mistake), but the much larger responseWriterState
// and buffers are reused between multiple requests.
type http2responseWriter struct {
rws *http2responseWriterState
}
// Optional http.ResponseWriter interfaces implemented.
var (
_ CloseNotifier = (*http2responseWriter)(nil)
_ Flusher = (*http2responseWriter)(nil)
_ http2stringWriter = (*http2responseWriter)(nil)
)
type http2responseWriterState struct {
// immutable within a request:
stream *http2stream
req *Request
body *http2requestBody // to close at end of request, if DATA frames didn't
conn *http2serverConn
// TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
// mutated by http.Handler goroutine:
handlerHeader Header // nil until called
snapHeader Header // snapshot of handlerHeader at WriteHeader time
trailers []string // set in writeChunk
status int // status code passed to WriteHeader
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
sentHeader bool // have we sent the header frame?
handlerDone bool // handler has finished
dirty bool // a Write failed; don't reuse this responseWriterState
sentContentLen int64 // non-zero if handler set a Content-Length header
wroteBytes int64
closeNotifierMu sync.Mutex // guards closeNotifierCh
closeNotifierCh chan bool // nil until first used
}
type http2chunkWriter struct{ rws *http2responseWriterState }
func (cw http2chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
func (rws *http2responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
// declareTrailer is called for each Trailer header when the
// response header is written. It notes that a header will need to be
// written in the trailers at the end of the response.
func (rws *http2responseWriterState) declareTrailer(k string) {
k = CanonicalHeaderKey(k)
if !httpguts.ValidTrailerHeader(k) {
// Forbidden by RFC 7230, section 4.1.2.
rws.conn.logf("ignoring invalid trailer %q", k)
return
}
if !http2strSliceContains(rws.trailers, k) {
rws.trailers = append(rws.trailers, k)
}
}
// writeChunk writes chunks from the bufio.Writer. But because
// bufio.Writer may bypass its chunking, sometimes p may be
// arbitrarily large.
//
// writeChunk is also responsible (on the first chunk) for sending the
// HEADER response.
func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) {
if !rws.wroteHeader {
rws.writeHeader(200)
}
isHeadResp := rws.req.Method == "HEAD"
if !rws.sentHeader {
rws.sentHeader = true
var ctype, clen string
if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
rws.snapHeader.Del("Content-Length")
clen64, err := strconv.ParseInt(clen, 10, 64)
if err == nil && clen64 >= 0 {
rws.sentContentLen = clen64
} else {
clen = ""
}
}
if clen == "" && rws.handlerDone && http2bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
clen = strconv.Itoa(len(p))
}
_, hasContentType := rws.snapHeader["Content-Type"]
if !hasContentType && http2bodyAllowedForStatus(rws.status) && len(p) > 0 {
ctype = DetectContentType(p)
}
var date string
if _, ok := rws.snapHeader["Date"]; !ok {
// TODO(bradfitz): be faster here, like net/http? measure.
date = time.Now().UTC().Format(TimeFormat)
}
for _, v := range rws.snapHeader["Trailer"] {
http2foreachHeaderElement(v, rws.declareTrailer)
}
// "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
// but respect "Connection" == "close" to mean sending a GOAWAY and tearing
// down the TCP connection when idle, like we do for HTTP/1.
// TODO: remove more Connection-specific header fields here, in addition
// to "Connection".
if _, ok := rws.snapHeader["Connection"]; ok {
v := rws.snapHeader.Get("Connection")
delete(rws.snapHeader, "Connection")
if v == "close" {
rws.conn.startGracefulShutdown()
}
}
endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
err = rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{
streamID: rws.stream.id,
httpResCode: rws.status,
h: rws.snapHeader,
endStream: endStream,
contentType: ctype,
contentLength: clen,
date: date,
})
if err != nil {
rws.dirty = true
return 0, err
}
if endStream {
return 0, nil
}
}
if isHeadResp {
return len(p), nil
}
if len(p) == 0 && !rws.handlerDone {
return 0, nil
}
if rws.handlerDone {
rws.promoteUndeclaredTrailers()
}
endStream := rws.handlerDone && !rws.hasTrailers()
if len(p) > 0 || endStream {
// only send a 0 byte DATA frame if we're ending the stream.
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
rws.dirty = true
return 0, err
}
}
if rws.handlerDone && rws.hasTrailers() {
err = rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{
streamID: rws.stream.id,
h: rws.handlerHeader,
trailers: rws.trailers,
endStream: true,
})
if err != nil {
rws.dirty = true
}
return len(p), err
}
return len(p), nil
}
// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
// that, if present, signals that the map entry is actually for
// the response trailers, and not the response headers. The prefix
// is stripped after the ServeHTTP call finishes and the values are
// sent in the trailers.
//
// This mechanism is intended only for trailers that are not known
// prior to the headers being written. If the set of trailers is fixed
// or known before the header is written, the normal Go trailers mechanism
// is preferred:
// https://golang.org/pkg/net/http/#ResponseWriter
// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
const http2TrailerPrefix = "Trailer:"
// promoteUndeclaredTrailers permits http.Handlers to set trailers
// after the header has already been flushed. Because the Go
// ResponseWriter interface has no way to set Trailers (only the
// Header), and because we didn't want to expand the ResponseWriter
// interface, and because nobody used trailers, and because RFC 7230
// says you SHOULD (but not must) predeclare any trailers in the
// header, the official ResponseWriter rules said trailers in Go must
// be predeclared, and then we reuse the same ResponseWriter.Header()
// map to mean both Headers and Trailers. When it's time to write the
// Trailers, we pick out the fields of Headers that were declared as
// trailers. That worked for a while, until we found the first major
// user of Trailers in the wild: gRPC (using them only over http2),
// and gRPC libraries permit setting trailers mid-stream without
// predeclarnig them. So: change of plans. We still permit the old
// way, but we also permit this hack: if a Header() key begins with
// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
// invalid token byte anyway, there is no ambiguity. (And it's already
// filtered out) It's mildly hacky, but not terrible.
//
// This method runs after the Handler is done and promotes any Header
// fields to be trailers.
func (rws *http2responseWriterState) promoteUndeclaredTrailers() {
for k, vv := range rws.handlerHeader {
if !strings.HasPrefix(k, http2TrailerPrefix) {
continue
}
trailerKey := strings.TrimPrefix(k, http2TrailerPrefix)
rws.declareTrailer(trailerKey)
rws.handlerHeader[CanonicalHeaderKey(trailerKey)] = vv
}
if len(rws.trailers) > 1 {
sorter := http2sorterPool.Get().(*http2sorter)
sorter.SortStrings(rws.trailers)
http2sorterPool.Put(sorter)
}
}
func (w *http2responseWriter) Flush() {
rws := w.rws
if rws == nil {
panic("Header called after Handler finished")
}
if rws.bw.Buffered() > 0 {
if err := rws.bw.Flush(); err != nil {
// Ignore the error. The frame writer already knows.
return
}
} else {
// The bufio.Writer won't call chunkWriter.Write
// (writeChunk with zero bytes, so we have to do it
// ourselves to force the HTTP response header and/or
// final DATA frame (with END_STREAM) to be sent.
rws.writeChunk(nil)
}
}
func (w *http2responseWriter) CloseNotify() <-chan bool {
rws := w.rws
if rws == nil {
panic("CloseNotify called after Handler finished")
}
rws.closeNotifierMu.Lock()
ch := rws.closeNotifierCh
if ch == nil {
ch = make(chan bool, 1)
rws.closeNotifierCh = ch
cw := rws.stream.cw
go func() {
cw.Wait() // wait for close
ch <- true
}()
}
rws.closeNotifierMu.Unlock()
return ch
}
func (w *http2responseWriter) Header() Header {
rws := w.rws
if rws == nil {
panic("Header called after Handler finished")
}
if rws.handlerHeader == nil {
rws.handlerHeader = make(Header)
}
return rws.handlerHeader
}
// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode.
func http2checkWriteHeaderCode(code int) {
// Issue 22880: require valid WriteHeader status codes.
// For now we only enforce that it's three digits.
// In the future we might block things over 599 (600 and above aren't defined
// at http://httpwg.org/specs/rfc7231.html#status.codes)
// and we might block under 200 (once we have more mature 1xx support).
// But for now any three digits.
//
// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
// no equivalent bogus thing we can realistically send in HTTP/2,
// so we'll consistently panic instead and help people find their bugs
// early. (We can't return an error from WriteHeader even if we wanted to.)
if code < 100 || code > 999 {
panic(fmt.Sprintf("invalid WriteHeader code %v", code))
}
}
func (w *http2responseWriter) WriteHeader(code int) {
rws := w.rws
if rws == nil {
panic("WriteHeader called after Handler finished")
}
rws.writeHeader(code)
}
func (rws *http2responseWriterState) writeHeader(code int) {
if !rws.wroteHeader {
http2checkWriteHeaderCode(code)
rws.wroteHeader = true
rws.status = code
if len(rws.handlerHeader) > 0 {
rws.snapHeader = http2cloneHeader(rws.handlerHeader)
}
}
}
func http2cloneHeader(h Header) Header {
h2 := make(Header, len(h))
for k, vv := range h {
vv2 := make([]string, len(vv))
copy(vv2, vv)
h2[k] = vv2
}
return h2
}
// The Life Of A Write is like this:
//
// * Handler calls w.Write or w.WriteString ->
// * -> rws.bw (*bufio.Writer) ->
// * (Handler might call Flush)
// * -> chunkWriter{rws}
// * -> responseWriterState.writeChunk(p []byte)
// * -> responseWriterState.writeChunk (most of the magic; see comment there)
func (w *http2responseWriter) Write(p []byte) (n int, err error) {
return w.write(len(p), p, "")
}
func (w *http2responseWriter) WriteString(s string) (n int, err error) {
return w.write(len(s), nil, s)
}
// either dataB or dataS is non-zero.
func (w *http2responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
rws := w.rws
if rws == nil {
panic("Write called after Handler finished")
}
if !rws.wroteHeader {
w.WriteHeader(200)
}
if !http2bodyAllowedForStatus(rws.status) {
return 0, ErrBodyNotAllowed
}
rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
// TODO: send a RST_STREAM
return 0, errors.New("http2: handler wrote more than declared Content-Length")
}
if dataB != nil {
return rws.bw.Write(dataB)
} else {
return rws.bw.WriteString(dataS)
}
}
func (w *http2responseWriter) handlerDone() {
rws := w.rws
dirty := rws.dirty
rws.handlerDone = true
w.Flush()
w.rws = nil
if !dirty {
// Only recycle the pool if all prior Write calls to
// the serverConn goroutine completed successfully. If
// they returned earlier due to resets from the peer
// there might still be write goroutines outstanding
// from the serverConn referencing the rws memory. See
// issue 20704.
http2responseWriterStatePool.Put(rws)
}
}
// Push errors.
var (
http2ErrRecursivePush = errors.New("http2: recursive push not allowed")
http2ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
)
var _ Pusher = (*http2responseWriter)(nil)
func (w *http2responseWriter) Push(target string, opts *PushOptions) error {
st := w.rws.stream
sc := st.sc
sc.serveG.checkNotOn()
// No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
// http://tools.ietf.org/html/rfc7540#section-6.6
if st.isPushed() {
return http2ErrRecursivePush
}
if opts == nil {
opts = new(PushOptions)
}
// Default options.
if opts.Method == "" {
opts.Method = "GET"
}
if opts.Header == nil {
opts.Header = Header{}
}
wantScheme := "http"
if w.rws.req.TLS != nil {
wantScheme = "https"
}
// Validate the request.
u, err := url.Parse(target)
if err != nil {
return err
}
if u.Scheme == "" {
if !strings.HasPrefix(target, "/") {
return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
}
u.Scheme = wantScheme
u.Host = w.rws.req.Host
} else {
if u.Scheme != wantScheme {
return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
}
if u.Host == "" {
return errors.New("URL must have a host")
}
}
for k := range opts.Header {
if strings.HasPrefix(k, ":") {
return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
}
// These headers are meaningful only if the request has a body,
// but PUSH_PROMISE requests cannot have a body.
// http://tools.ietf.org/html/rfc7540#section-8.2
// Also disallow Host, since the promised URL must be absolute.
switch strings.ToLower(k) {
case "content-length", "content-encoding", "trailer", "te", "expect", "host":
return fmt.Errorf("promised request headers cannot include %q", k)
}
}
if err := http2checkValidHTTP2RequestHeaders(opts.Header); err != nil {
return err
}
// The RFC effectively limits promised requests to GET and HEAD:
// "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
// http://tools.ietf.org/html/rfc7540#section-8.2
if opts.Method != "GET" && opts.Method != "HEAD" {
return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
}
msg := &http2startPushRequest{
parent: st,
method: opts.Method,
url: u,
header: http2cloneHeader(opts.Header),
done: http2errChanPool.Get().(chan error),
}
select {
case <-sc.doneServing:
return http2errClientDisconnected
case <-st.cw:
return http2errStreamClosed
case sc.serveMsgCh <- msg:
}
select {
case <-sc.doneServing:
return http2errClientDisconnected
case <-st.cw:
return http2errStreamClosed
case err := <-msg.done:
http2errChanPool.Put(msg.done)
return err
}
}
type http2startPushRequest struct {
parent *http2stream
method string
url *url.URL
header Header
done chan error
}
func (sc *http2serverConn) startPush(msg *http2startPushRequest) {
sc.serveG.check()
// http://tools.ietf.org/html/rfc7540#section-6.6.
// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
// is in either the "open" or "half-closed (remote)" state.
if msg.parent.state != http2stateOpen && msg.parent.state != http2stateHalfClosedRemote {
// responseWriter.Push checks that the stream is peer-initiaed.
msg.done <- http2errStreamClosed
return
}
// http://tools.ietf.org/html/rfc7540#section-6.6.
if !sc.pushEnabled {
msg.done <- ErrNotSupported
return
}
// PUSH_PROMISE frames must be sent in increasing order by stream ID, so
// we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
// is written. Once the ID is allocated, we start the request handler.
allocatePromisedID := func() (uint32, error) {
sc.serveG.check()
// Check this again, just in case. Technically, we might have received
// an updated SETTINGS by the time we got around to writing this frame.
if !sc.pushEnabled {
return 0, ErrNotSupported
}
// http://tools.ietf.org/html/rfc7540#section-6.5.2.
if sc.curPushedStreams+1 > sc.clientMaxStreams {
return 0, http2ErrPushLimitReached
}
// http://tools.ietf.org/html/rfc7540#section-5.1.1.
// Streams initiated by the server MUST use even-numbered identifiers.
// A server that is unable to establish a new stream identifier can send a GOAWAY
// frame so that the client is forced to open a new connection for new streams.
if sc.maxPushPromiseID+2 >= 1<<31 {
sc.startGracefulShutdownInternal()
return 0, http2ErrPushLimitReached
}
sc.maxPushPromiseID += 2
promisedID := sc.maxPushPromiseID
// http://tools.ietf.org/html/rfc7540#section-8.2.
// Strictly speaking, the new stream should start in "reserved (local)", then
// transition to "half closed (remote)" after sending the initial HEADERS, but
// we start in "half closed (remote)" for simplicity.
// See further comments at the definition of stateHalfClosedRemote.
promised := sc.newStream(promisedID, msg.parent.id, http2stateHalfClosedRemote)
rw, req, err := sc.newWriterAndRequestNoBody(promised, http2requestParam{
method: msg.method,
scheme: msg.url.Scheme,
authority: msg.url.Host,
path: msg.url.RequestURI(),
header: http2cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
})
if err != nil {
// Should not happen, since we've already validated msg.url.
panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
}
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
return promisedID, nil
}
sc.writeFrame(http2FrameWriteRequest{
write: &http2writePushPromise{
streamID: msg.parent.id,
method: msg.method,
url: msg.url,
h: msg.header,
allocatePromisedID: allocatePromisedID,
},
stream: msg.parent,
done: msg.done,
})
}
// foreachHeaderElement splits v according to the "#rule" construction
// in RFC 7230 section 7 and calls fn for each non-empty element.
func http2foreachHeaderElement(v string, fn func(string)) {
v = textproto.TrimString(v)
if v == "" {
return
}
if !strings.Contains(v, ",") {
fn(v)
return
}
for _, f := range strings.Split(v, ",") {
if f = textproto.TrimString(f); f != "" {
fn(f)
}
}
}
// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
var http2connHeaders = []string{
"Connection",
"Keep-Alive",
"Proxy-Connection",
"Transfer-Encoding",
"Upgrade",
}
// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
// per RFC 7540 Section 8.1.2.2.
// The returned error is reported to users.
func http2checkValidHTTP2RequestHeaders(h Header) error {
for _, k := range http2connHeaders {
if _, ok := h[k]; ok {
return fmt.Errorf("request header %q is not valid in HTTP/2", k)
}
}
te := h["Te"]
if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
}
return nil
}
func http2new400Handler(err error) HandlerFunc {
return func(w ResponseWriter, r *Request) {
Error(w, err.Error(), StatusBadRequest)
}
}
// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
// disabled. See comments on h1ServerShutdownChan above for why
// the code is written this way.
func http2h1ServerKeepAlivesDisabled(hs *Server) bool {
var x interface{} = hs
type I interface {
doKeepAlives() bool
}
if hs, ok := x.(I); ok {
return !hs.doKeepAlives()
}
return false
}
const (
// transportDefaultConnFlow is how many connection-level flow control
// tokens we give the server at start-up, past the default 64k.
http2transportDefaultConnFlow = 1 << 30
// transportDefaultStreamFlow is how many stream-level flow
// control tokens we announce to the peer, and how many bytes
// we buffer per stream.
http2transportDefaultStreamFlow = 4 << 20
// transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
// a stream-level WINDOW_UPDATE for at a time.
http2transportDefaultStreamMinRefresh = 4 << 10
http2defaultUserAgent = "Go-http-client/2.0"
)
// Transport is an HTTP/2 Transport.
//
// A Transport internally caches connections to servers. It is safe
// for concurrent use by multiple goroutines.
type http2Transport struct {
// DialTLS specifies an optional dial function for creating
// TLS connections for requests.
//
// If DialTLS is nil, tls.Dial is used.
//
// If the returned net.Conn has a ConnectionState method like tls.Conn,
// it will be used to set http.Response.TLS.
DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
// TLSClientConfig specifies the TLS configuration to use with
// tls.Client. If nil, the default configuration is used.
TLSClientConfig *tls.Config
// ConnPool optionally specifies an alternate connection pool to use.
// If nil, the default is used.
ConnPool http2ClientConnPool
// DisableCompression, if true, prevents the Transport from
// requesting compression with an "Accept-Encoding: gzip"
// request header when the Request contains no existing
// Accept-Encoding value. If the Transport requests gzip on
// its own and gets a gzipped response, it's transparently
// decoded in the Response.Body. However, if the user
// explicitly requested gzip it is not automatically
// uncompressed.
DisableCompression bool
// AllowHTTP, if true, permits HTTP/2 requests using the insecure,
// plain-text "http" scheme. Note that this does not enable h2c support.
AllowHTTP bool
// MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
// send in the initial settings frame. It is how many bytes
// of response headers are allowed. Unlike the http2 spec, zero here
// means to use a default limit (currently 10MB). If you actually
// want to advertise an ulimited value to the peer, Transport
// interprets the highest possible value here (0xffffffff or 1<<32-1)
// to mean no limit.
MaxHeaderListSize uint32
// StrictMaxConcurrentStreams controls whether the server's
// SETTINGS_MAX_CONCURRENT_STREAMS should be respected
// globally. If false, new TCP connections are created to the
// server as needed to keep each under the per-connection
// SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the
// server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as
// a global limit and callers of RoundTrip block when needed,
// waiting for their turn.
StrictMaxConcurrentStreams bool
// t1, if non-nil, is the standard library Transport using
// this transport. Its settings are used (but not its
// RoundTrip method, etc).
t1 *Transport
connPoolOnce sync.Once
connPoolOrDef http2ClientConnPool // non-nil version of ConnPool
}
func (t *http2Transport) maxHeaderListSize() uint32 {
if t.MaxHeaderListSize == 0 {
return 10 << 20
}
if t.MaxHeaderListSize == 0xffffffff {
return 0
}
return t.MaxHeaderListSize
}
func (t *http2Transport) disableCompression() bool {
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
}
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
// It returns an error if t1 has already been HTTP/2-enabled.
func http2ConfigureTransport(t1 *Transport) error {
_, err := http2configureTransport(t1)
return err
}
func http2configureTransport(t1 *Transport) (*http2Transport, error) {
connPool := new(http2clientConnPool)
t2 := &http2Transport{
ConnPool: http2noDialClientConnPool{connPool},
t1: t1,
}
connPool.t = t2
if err := http2registerHTTPSProtocol(t1, http2noDialH2RoundTripper{t2}); err != nil {
return nil, err
}
if t1.TLSClientConfig == nil {
t1.TLSClientConfig = new(tls.Config)
}
if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
}
if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
}
upgradeFn := func(authority string, c *tls.Conn) RoundTripper {
addr := http2authorityAddr("https", authority)
if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
go c.Close()
return http2erringRoundTripper{err}
} else if !used {
// Turns out we don't need this c.
// For example, two goroutines made requests to the same host
// at the same time, both kicking off TCP dials. (since protocol
// was unknown)
go c.Close()
}
return t2
}
if m := t1.TLSNextProto; len(m) == 0 {
t1.TLSNextProto = map[string]func(string, *tls.Conn) RoundTripper{
"h2": upgradeFn,
}
} else {
m["h2"] = upgradeFn
}
return t2, nil
}
func (t *http2Transport) connPool() http2ClientConnPool {
t.connPoolOnce.Do(t.initConnPool)
return t.connPoolOrDef
}
func (t *http2Transport) initConnPool() {
if t.ConnPool != nil {
t.connPoolOrDef = t.ConnPool
} else {
t.connPoolOrDef = &http2clientConnPool{t: t}
}
}
// ClientConn is the state of a single HTTP/2 client connection to an
// HTTP/2 server.
type http2ClientConn struct {
t *http2Transport
tconn net.Conn // usually *tls.Conn, except specialized impls
tlsState *tls.ConnectionState // nil only for specialized impls
singleUse bool // whether being used for a single http.Request
// readLoop goroutine fields:
readerDone chan struct{} // closed on error
readerErr error // set before readerDone is closed
idleTimeout time.Duration // or 0 for never
idleTimer *time.Timer
mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
flow http2flow // our conn-level flow control quota (cs.flow is per stream)
inflow http2flow // peer's conn-level flow control
closing bool
closed bool
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
goAway *http2GoAwayFrame // if non-nil, the GoAwayFrame we received
goAwayDebug string // goAway frame's debug data, retained as a string
streams map[uint32]*http2clientStream // client-initiated
nextStreamID uint32
pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
pings map[[8]byte]chan struct{} // in flight ping data to notification channel
bw *bufio.Writer
br *bufio.Reader
fr *http2Framer
lastActive time.Time
// Settings from peer: (also guarded by mu)
maxFrameSize uint32
maxConcurrentStreams uint32
peerMaxHeaderListSize uint64
initialWindowSize uint32
hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder
freeBuf [][]byte
wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
werr error // first write error that has occurred
}
// clientStream is the state for a single HTTP/2 stream. One of these
// is created for each Transport.RoundTrip call.
type http2clientStream struct {
cc *http2ClientConn
req *Request
trace *httptrace.ClientTrace // or nil
ID uint32
resc chan http2resAndError
bufPipe http2pipe // buffered pipe with the flow-controlled response payload
startedWrite bool // started request body write; guarded by cc.mu
requestedGzip bool
on100 func() // optional code to run if get a 100 continue response
flow http2flow // guarded by cc.mu
inflow http2flow // guarded by cc.mu
bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
readErr error // sticky read error; owned by transportResponseBody.Read
stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu
peerReset chan struct{} // closed on peer reset
resetErr error // populated before peerReset is closed
done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
// owned by clientConnReadLoop:
firstByte bool // got the first response byte
pastHeaders bool // got first MetaHeadersFrame (actual headers)
pastTrailers bool // got optional second MetaHeadersFrame (trailers)
num1xx uint8 // number of 1xx responses seen
trailer Header // accumulated trailers
resTrailer *Header // client's Response.Trailer
}
// awaitRequestCancel waits for the user to cancel a request or for the done
// channel to be signaled. A non-nil error is returned only if the request was
// canceled.
func http2awaitRequestCancel(req *Request, done <-chan struct{}) error {
ctx := req.Context()
if req.Cancel == nil && ctx.Done() == nil {
return nil
}
select {
case <-req.Cancel:
return http2errRequestCanceled
case <-ctx.Done():
return ctx.Err()
case <-done:
return nil
}
}
var http2got1xxFuncForTests func(int, textproto.MIMEHeader) error
// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func,
// if any. It returns nil if not set or if the Go version is too old.
func (cs *http2clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error {
if fn := http2got1xxFuncForTests; fn != nil {
return fn
}
return http2traceGot1xxResponseFunc(cs.trace)
}
// awaitRequestCancel waits for the user to cancel a request, its context to
// expire, or for the request to be done (any way it might be removed from the
// cc.streams map: peer reset, successful completion, TCP connection breakage,
// etc). If the request is canceled, then cs will be canceled and closed.
func (cs *http2clientStream) awaitRequestCancel(req *Request) {
if err := http2awaitRequestCancel(req, cs.done); err != nil {
cs.cancelStream()
cs.bufPipe.CloseWithError(err)
}
}
func (cs *http2clientStream) cancelStream() {
cc := cs.cc
cc.mu.Lock()
didReset := cs.didReset
cs.didReset = true
cc.mu.Unlock()
if !didReset {
cc.writeStreamReset(cs.ID, http2ErrCodeCancel, nil)
cc.forgetStreamID(cs.ID)
}
}
// checkResetOrDone reports any error sent in a RST_STREAM frame by the
// server, or errStreamClosed if the stream is complete.
func (cs *http2clientStream) checkResetOrDone() error {
select {
case <-cs.peerReset:
return cs.resetErr
case <-cs.done:
return http2errStreamClosed
default:
return nil
}
}
func (cs *http2clientStream) getStartedWrite() bool {
cc := cs.cc
cc.mu.Lock()
defer cc.mu.Unlock()
return cs.startedWrite
}
func (cs *http2clientStream) abortRequestBodyWrite(err error) {
if err == nil {
panic("nil error")
}
cc := cs.cc
cc.mu.Lock()
cs.stopReqBody = err
cc.cond.Broadcast()
cc.mu.Unlock()
}
type http2stickyErrWriter struct {
w io.Writer
err *error
}
func (sew http2stickyErrWriter) Write(p []byte) (n int, err error) {
if *sew.err != nil {
return 0, *sew.err
}
n, err = sew.w.Write(p)
*sew.err = err
return
}
// noCachedConnError is the concrete type of ErrNoCachedConn, which
// needs to be detected by net/http regardless of whether it's its
// bundled version (in h2_bundle.go with a rewritten type name) or
// from a user's x/net/http2. As such, as it has a unique method name
// (IsHTTP2NoCachedConnError) that net/http sniffs for via func
// isNoCachedConnError.
type http2noCachedConnError struct{}
func (http2noCachedConnError) IsHTTP2NoCachedConnError() {}
func (http2noCachedConnError) Error() string { return "http2: no cached connection was available" }
// isNoCachedConnError reports whether err is of type noCachedConnError
// or its equivalent renamed type in net/http2's h2_bundle.go. Both types
// may coexist in the same running program.
func http2isNoCachedConnError(err error) bool {
_, ok := err.(interface{ IsHTTP2NoCachedConnError() })
return ok
}
var http2ErrNoCachedConn error = http2noCachedConnError{}
// RoundTripOpt are options for the Transport.RoundTripOpt method.
type http2RoundTripOpt struct {
// OnlyCachedConn controls whether RoundTripOpt may
// create a new TCP connection. If set true and
// no cached connection is available, RoundTripOpt
// will return ErrNoCachedConn.
OnlyCachedConn bool
}
func (t *http2Transport) RoundTrip(req *Request) (*Response, error) {
return t.RoundTripOpt(req, http2RoundTripOpt{})
}
// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
// and returns a host:port. The port 443 is added if needed.
func http2authorityAddr(scheme string, authority string) (addr string) {
host, port, err := net.SplitHostPort(authority)
if err != nil { // authority didn't have a port
port = "443"
if scheme == "http" {
port = "80"
}
host = authority
}
if a, err := idna.ToASCII(host); err == nil {
host = a
}
// IPv6 address literal, without a port:
if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
return host + ":" + port
}
return net.JoinHostPort(host, port)
}
// RoundTripOpt is like RoundTrip, but takes options.
func (t *http2Transport) RoundTripOpt(req *Request, opt http2RoundTripOpt) (*Response, error) {
if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
return nil, errors.New("http2: unsupported scheme")
}
addr := http2authorityAddr(req.URL.Scheme, req.URL.Host)
for retry := 0; ; retry++ {
cc, err := t.connPool().GetClientConn(req, addr)
if err != nil {
t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
return nil, err
}
http2traceGotConn(req, cc)
res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req)
if err != nil && retry <= 6 {
if req, err = http2shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
continue
}
backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64())
select {
case <-time.After(time.Second * time.Duration(backoff)):
continue
case <-req.Context().Done():
return nil, req.Context().Err()
}
}
}
if err != nil {
t.vlogf("RoundTrip failure: %v", err)
return nil, err
}
return res, nil
}
}
// CloseIdleConnections closes any connections which were previously
// connected from previous requests but are now sitting idle.
// It does not interrupt any connections currently in use.
func (t *http2Transport) CloseIdleConnections() {
if cp, ok := t.connPool().(http2clientConnPoolIdleCloser); ok {
cp.closeIdleConnections()
}
}
var (
http2errClientConnClosed = errors.New("http2: client conn is closed")
http2errClientConnUnusable = errors.New("http2: client conn not usable")
http2errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
)
// shouldRetryRequest is called by RoundTrip when a request fails to get
// response headers. It is always called with a non-nil error.
// It returns either a request to retry (either the same request, or a
// modified clone), or an error if the request can't be replayed.
func http2shouldRetryRequest(req *Request, err error, afterBodyWrite bool) (*Request, error) {
if !http2canRetryError(err) {
return nil, err
}
// If the Body is nil (or http.NoBody), it's safe to reuse
// this request and its Body.
if req.Body == nil || req.Body == NoBody {
return req, nil
}
// If the request body can be reset back to its original
// state via the optional req.GetBody, do that.
if req.GetBody != nil {
// TODO: consider a req.Body.Close here? or audit that all caller paths do?
body, err := req.GetBody()
if err != nil {
return nil, err
}
newReq := *req
newReq.Body = body
return &newReq, nil
}
// The Request.Body can't reset back to the beginning, but we
// don't seem to have started to read from it yet, so reuse
// the request directly. The "afterBodyWrite" means the
// bodyWrite process has started, which becomes true before
// the first Read.
if !afterBodyWrite {
return req, nil
}
return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
}
func http2canRetryError(err error) bool {
if err == http2errClientConnUnusable || err == http2errClientConnGotGoAway {
return true
}
if se, ok := err.(http2StreamError); ok {
return se.Code == http2ErrCodeRefusedStream
}
return false
}
func (t *http2Transport) dialClientConn(addr string, singleUse bool) (*http2ClientConn, error) {
host, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host))
if err != nil {
return nil, err
}
return t.newClientConn(tconn, singleUse)
}
func (t *http2Transport) newTLSConfig(host string) *tls.Config {
cfg := new(tls.Config)
if t.TLSClientConfig != nil {
*cfg = *t.TLSClientConfig.Clone()
}
if !http2strSliceContains(cfg.NextProtos, http2NextProtoTLS) {
cfg.NextProtos = append([]string{http2NextProtoTLS}, cfg.NextProtos...)
}
if cfg.ServerName == "" {
cfg.ServerName = host
}
return cfg
}
func (t *http2Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) {
if t.DialTLS != nil {
return t.DialTLS
}
return t.dialTLSDefault
}
func (t *http2Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) {
cn, err := tls.Dial(network, addr, cfg)
if err != nil {
return nil, err
}
if err := cn.Handshake(); err != nil {
return nil, err
}
if !cfg.InsecureSkipVerify {
if err := cn.VerifyHostname(cfg.ServerName); err != nil {
return nil, err
}
}
state := cn.ConnectionState()
if p := state.NegotiatedProtocol; p != http2NextProtoTLS {
return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, http2NextProtoTLS)
}
if !state.NegotiatedProtocolIsMutual {
return nil, errors.New("http2: could not negotiate protocol mutually")
}
return cn, nil
}
// disableKeepAlives reports whether connections should be closed as
// soon as possible after handling the first request.
func (t *http2Transport) disableKeepAlives() bool {
return t.t1 != nil && t.t1.DisableKeepAlives
}
func (t *http2Transport) expectContinueTimeout() time.Duration {
if t.t1 == nil {
return 0
}
return t.t1.ExpectContinueTimeout
}
func (t *http2Transport) NewClientConn(c net.Conn) (*http2ClientConn, error) {
return t.newClientConn(c, false)
}
func (t *http2Transport) newClientConn(c net.Conn, singleUse bool) (*http2ClientConn, error) {
cc := &http2ClientConn{
t: t,
tconn: c,
readerDone: make(chan struct{}),
nextStreamID: 1,
maxFrameSize: 16 << 10, // spec default
initialWindowSize: 65535, // spec default
maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
streams: make(map[uint32]*http2clientStream),
singleUse: singleUse,
wantSettingsAck: true,
pings: make(map[[8]byte]chan struct{}),
}
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
}
if http2VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
}
cc.cond = sync.NewCond(&cc.mu)
cc.flow.add(int32(http2initialWindowSize))
// TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(http2stickyErrWriter{c, &cc.werr})
cc.br = bufio.NewReader(c)
cc.fr = http2NewFramer(cc.bw, cc.br)
cc.fr.ReadMetaHeaders = hpack.NewDecoder(http2initialHeaderTableSize, nil)
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
// TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
// henc in response to SETTINGS frames?
cc.henc = hpack.NewEncoder(&cc.hbuf)
if t.AllowHTTP {
cc.nextStreamID = 3
}
if cs, ok := c.(http2connectionStater); ok {
state := cs.ConnectionState()
cc.tlsState = &state
}
initialSettings := []http2Setting{
{ID: http2SettingEnablePush, Val: 0},
{ID: http2SettingInitialWindowSize, Val: http2transportDefaultStreamFlow},
}
if max := t.maxHeaderListSize(); max != 0 {
initialSettings = append(initialSettings, http2Setting{ID: http2SettingMaxHeaderListSize, Val: max})
}
cc.bw.Write(http2clientPreface)
cc.fr.WriteSettings(initialSettings...)
cc.fr.WriteWindowUpdate(0, http2transportDefaultConnFlow)
cc.inflow.add(http2transportDefaultConnFlow + http2initialWindowSize)
cc.bw.Flush()
if cc.werr != nil {
return nil, cc.werr
}
go cc.readLoop()
return cc, nil
}
func (cc *http2ClientConn) setGoAway(f *http2GoAwayFrame) {
cc.mu.Lock()
defer cc.mu.Unlock()
old := cc.goAway
cc.goAway = f
// Merge the previous and current GoAway error frames.
if cc.goAwayDebug == "" {
cc.goAwayDebug = string(f.DebugData())
}
if old != nil && old.ErrCode != http2ErrCodeNo {
cc.goAway.ErrCode = old.ErrCode
}
last := f.LastStreamID
for streamID, cs := range cc.streams {
if streamID > last {
select {
case cs.resc <- http2resAndError{err: http2errClientConnGotGoAway}:
default:
}
}
}
}
// CanTakeNewRequest reports whether the connection can take a new request,
// meaning it has not been closed or received or sent a GOAWAY.
func (cc *http2ClientConn) CanTakeNewRequest() bool {
cc.mu.Lock()
defer cc.mu.Unlock()
return cc.canTakeNewRequestLocked()
}
// clientConnIdleState describes the suitability of a client
// connection to initiate a new RoundTrip request.
type http2clientConnIdleState struct {
canTakeNewRequest bool
freshConn bool // whether it's unused by any previous request
}
func (cc *http2ClientConn) idleState() http2clientConnIdleState {
cc.mu.Lock()
defer cc.mu.Unlock()
return cc.idleStateLocked()
}
func (cc *http2ClientConn) idleStateLocked() (st http2clientConnIdleState) {
if cc.singleUse && cc.nextStreamID > 1 {
return
}
var maxConcurrentOkay bool
if cc.t.StrictMaxConcurrentStreams {
// We'll tell the caller we can take a new request to
// prevent the caller from dialing a new TCP
// connection, but then we'll block later before
// writing it.
maxConcurrentOkay = true
} else {
maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams)
}
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32
st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
return
}
func (cc *http2ClientConn) canTakeNewRequestLocked() bool {
st := cc.idleStateLocked()
return st.canTakeNewRequest
}
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
// only be called when we're idle, but because we're coming from a new
// goroutine, there could be a new request coming in at the same time,
// so this simply calls the synchronized closeIfIdle to shut down this
// connection. The timer could just call closeIfIdle, but this is more
// clear.
func (cc *http2ClientConn) onIdleTimeout() {
cc.closeIfIdle()
}
func (cc *http2ClientConn) closeIfIdle() {
cc.mu.Lock()
if len(cc.streams) > 0 {
cc.mu.Unlock()
return
}
cc.closed = true
nextID := cc.nextStreamID
// TODO: do clients send GOAWAY too? maybe? Just Close:
cc.mu.Unlock()
if http2VerboseLogs {
cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
}
cc.tconn.Close()
}
var http2shutdownEnterWaitStateHook = func() {}
// Shutdown gracefully close the client connection, waiting for running streams to complete.
func (cc *http2ClientConn) Shutdown(ctx context.Context) error {
if err := cc.sendGoAway(); err != nil {
return err
}
// Wait for all in-flight streams to complete or connection to close
done := make(chan error, 1)
cancelled := false // guarded by cc.mu
go func() {
cc.mu.Lock()
defer cc.mu.Unlock()
for {
if len(cc.streams) == 0 || cc.closed {
cc.closed = true
done <- cc.tconn.Close()
break
}
if cancelled {
break
}
cc.cond.Wait()
}
}()
http2shutdownEnterWaitStateHook()
select {
case err := <-done:
return err
case <-ctx.Done():
cc.mu.Lock()
// Free the goroutine above
cancelled = true
cc.cond.Broadcast()
cc.mu.Unlock()
return ctx.Err()
}
}
func (cc *http2ClientConn) sendGoAway() error {
cc.mu.Lock()
defer cc.mu.Unlock()
cc.wmu.Lock()
defer cc.wmu.Unlock()
if cc.closing {
// GOAWAY sent already
return nil
}
// Send a graceful shutdown frame to server
maxStreamID := cc.nextStreamID
if err := cc.fr.WriteGoAway(maxStreamID, http2ErrCodeNo, nil); err != nil {
return err
}
if err := cc.bw.Flush(); err != nil {
return err
}
// Prevent new requests
cc.closing = true
return nil
}
// Close closes the client connection immediately.
//
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
func (cc *http2ClientConn) Close() error {
cc.mu.Lock()
defer cc.cond.Broadcast()
defer cc.mu.Unlock()
err := errors.New("http2: client connection force closed via ClientConn.Close")
for id, cs := range cc.streams {
select {
case cs.resc <- http2resAndError{err: err}:
default:
}
cs.bufPipe.CloseWithError(err)
delete(cc.streams, id)
}
cc.closed = true
return cc.tconn.Close()
}
const http2maxAllocFrameSize = 512 << 10
// frameBuffer returns a scratch buffer suitable for writing DATA frames.
// They're capped at the min of the peer's max frame size or 512KB
// (kinda arbitrarily), but definitely capped so we don't allocate 4GB
// bufers.
func (cc *http2ClientConn) frameScratchBuffer() []byte {
cc.mu.Lock()
size := cc.maxFrameSize
if size > http2maxAllocFrameSize {
size = http2maxAllocFrameSize
}
for i, buf := range cc.freeBuf {
if len(buf) >= int(size) {
cc.freeBuf[i] = nil
cc.mu.Unlock()
return buf[:size]
}
}
cc.mu.Unlock()
return make([]byte, size)
}
func (cc *http2ClientConn) putFrameScratchBuffer(buf []byte) {
cc.mu.Lock()
defer cc.mu.Unlock()
const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.
if len(cc.freeBuf) < maxBufs {
cc.freeBuf = append(cc.freeBuf, buf)
return
}
for i, old := range cc.freeBuf {
if old == nil {
cc.freeBuf[i] = buf
return
}
}
// forget about it.
}
// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
var http2errRequestCanceled = errors.New("net/http: request canceled")
func http2commaSeparatedTrailers(req *Request) (string, error) {
keys := make([]string, 0, len(req.Trailer))
for k := range req.Trailer {
k = CanonicalHeaderKey(k)
switch k {
case "Transfer-Encoding", "Trailer", "Content-Length":
return "", &http2badStringError{"invalid Trailer key", k}
}
keys = append(keys, k)
}
if len(keys) > 0 {
sort.Strings(keys)
return strings.Join(keys, ","), nil
}
return "", nil
}
func (cc *http2ClientConn) responseHeaderTimeout() time.Duration {
if cc.t.t1 != nil {
return cc.t.t1.ResponseHeaderTimeout
}
// No way to do this (yet?) with just an http2.Transport. Probably
// no need. Request.Cancel this is the new way. We only need to support
// this for compatibility with the old http.Transport fields when
// we're doing transparent http2.
return 0
}
// checkConnHeaders checks whether req has any invalid connection-level headers.
// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
// Certain headers are special-cased as okay but not transmitted later.
func http2checkConnHeaders(req *Request) error {
if v := req.Header.Get("Upgrade"); v != "" {
return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
}
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
}
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) {
return fmt.Errorf("http2: invalid Connection request header: %q", vv)
}
return nil
}
// actualContentLength returns a sanitized version of
// req.ContentLength, where 0 actually means zero (not unknown) and -1
// means unknown.
func http2actualContentLength(req *Request) int64 {
if req.Body == nil || req.Body == NoBody {
return 0
}
if req.ContentLength != 0 {
return req.ContentLength
}
return -1
}
func (cc *http2ClientConn) RoundTrip(req *Request) (*Response, error) {
resp, _, err := cc.roundTrip(req)
return resp, err
}
func (cc *http2ClientConn) roundTrip(req *Request) (res *Response, gotErrAfterReqBodyWrite bool, err error) {
if err := http2checkConnHeaders(req); err != nil {
return nil, false, err
}
if cc.idleTimer != nil {
cc.idleTimer.Stop()
}
trailers, err := http2commaSeparatedTrailers(req)
if err != nil {
return nil, false, err
}
hasTrailers := trailers != ""
cc.mu.Lock()
if err := cc.awaitOpenSlotForRequest(req); err != nil {
cc.mu.Unlock()
return nil, false, err
}
body := req.Body
contentLen := http2actualContentLength(req)
hasBody := contentLen != 0
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
var requestedGzip bool
if !cc.t.disableCompression() &&
req.Header.Get("Accept-Encoding") == "" &&
req.Header.Get("Range") == "" &&
req.Method != "HEAD" {
// Request gzip only, not deflate. Deflate is ambiguous and
// not as universally supported anyway.
// See: http://www.gzip.org/zlib/zlib_faq.html#faq38
//
// Note that we don't request this for HEAD requests,
// due to a bug in nginx:
// http://trac.nginx.org/nginx/ticket/358
// https://golang.org/issue/5522
//
// We don't request gzip if the request is for a range, since
// auto-decoding a portion of a gzipped document will just fail
// anyway. See https://golang.org/issue/8923
requestedGzip = true
}
// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
// sent by writeRequestBody below, along with any Trailers,
// again in form HEADERS{1}, CONTINUATION{0,})
hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
if err != nil {
cc.mu.Unlock()
return nil, false, err
}
cs := cc.newStream()
cs.req = req
cs.trace = httptrace.ContextClientTrace(req.Context())
cs.requestedGzip = requestedGzip
bodyWriter := cc.t.getBodyWriterState(cs, body)
cs.on100 = bodyWriter.on100
cc.wmu.Lock()
endStream := !hasBody && !hasTrailers
werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
cc.wmu.Unlock()
http2traceWroteHeaders(cs.trace)
cc.mu.Unlock()
if werr != nil {
if hasBody {
req.Body.Close() // per RoundTripper contract
bodyWriter.cancel()
}
cc.forgetStreamID(cs.ID)
// Don't bother sending a RST_STREAM (our write already failed;
// no need to keep writing)
http2traceWroteRequest(cs.trace, werr)
return nil, false, werr
}
var respHeaderTimer <-chan time.Time
if hasBody {
bodyWriter.scheduleBodyWrite()
} else {
http2traceWroteRequest(cs.trace, nil)
if d := cc.responseHeaderTimeout(); d != 0 {
timer := time.NewTimer(d)
defer timer.Stop()
respHeaderTimer = timer.C
}
}
readLoopResCh := cs.resc
bodyWritten := false
ctx := req.Context()
handleReadLoopResponse := func(re http2resAndError) (*Response, bool, error) {
res := re.res
if re.err != nil || res.StatusCode > 299 {
// On error or status code 3xx, 4xx, 5xx, etc abort any
// ongoing write, assuming that the server doesn't care
// about our request body. If the server replied with 1xx or
// 2xx, however, then assume the server DOES potentially
// want our body (e.g. full-duplex streaming:
// golang.org/issue/13444). If it turns out the server
// doesn't, they'll RST_STREAM us soon enough. This is a
// heuristic to avoid adding knobs to Transport. Hopefully
// we can keep it.
bodyWriter.cancel()
cs.abortRequestBodyWrite(http2errStopReqBodyWrite)
}
if re.err != nil {
cc.forgetStreamID(cs.ID)
return nil, cs.getStartedWrite(), re.err
}
res.Request = req
res.TLS = cc.tlsState
return res, false, nil
}
for {
select {
case re := <-readLoopResCh:
return handleReadLoopResponse(re)
case <-respHeaderTimer:
if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, http2ErrCodeCancel, nil)
} else {
bodyWriter.cancel()
cs.abortRequestBodyWrite(http2errStopReqBodyWriteAndCancel)
}
cc.forgetStreamID(cs.ID)
return nil, cs.getStartedWrite(), http2errTimeout
case <-ctx.Done():
if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, http2ErrCodeCancel, nil)
} else {
bodyWriter.cancel()
cs.abortRequestBodyWrite(http2errStopReqBodyWriteAndCancel)
}
cc.forgetStreamID(cs.ID)
return nil, cs.getStartedWrite(), ctx.Err()
case <-req.Cancel:
if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, http2ErrCodeCancel, nil)
} else {
bodyWriter.cancel()
cs.abortRequestBodyWrite(http2errStopReqBodyWriteAndCancel)
}
cc.forgetStreamID(cs.ID)
return nil, cs.getStartedWrite(), http2errRequestCanceled
case <-cs.peerReset:
// processResetStream already removed the
// stream from the streams map; no need for
// forgetStreamID.
return nil, cs.getStartedWrite(), cs.resetErr
case err := <-bodyWriter.resc:
// Prefer the read loop's response, if available. Issue 16102.
select {
case re := <-readLoopResCh:
return handleReadLoopResponse(re)
default:
}
if err != nil {
cc.forgetStreamID(cs.ID)
return nil, cs.getStartedWrite(), err
}
bodyWritten = true
if d := cc.responseHeaderTimeout(); d != 0 {
timer := time.NewTimer(d)
defer timer.Stop()
respHeaderTimer = timer.C
}
}
}
}
// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams.
// Must hold cc.mu.
func (cc *http2ClientConn) awaitOpenSlotForRequest(req *Request) error {
var waitingForConn chan struct{}
var waitingForConnErr error // guarded by cc.mu
for {
cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() {
if waitingForConn != nil {
close(waitingForConn)
}
return http2errClientConnUnusable
}
if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
if waitingForConn != nil {
close(waitingForConn)
}
return nil
}
// Unfortunately, we cannot wait on a condition variable and channel at
// the same time, so instead, we spin up a goroutine to check if the
// request is canceled while we wait for a slot to open in the connection.
if waitingForConn == nil {
waitingForConn = make(chan struct{})
go func() {
if err := http2awaitRequestCancel(req, waitingForConn); err != nil {
cc.mu.Lock()
waitingForConnErr = err
cc.cond.Broadcast()
cc.mu.Unlock()
}
}()
}
cc.pendingRequests++
cc.cond.Wait()
cc.pendingRequests--
if waitingForConnErr != nil {
return waitingForConnErr
}
}
}
// requires cc.wmu be held
func (cc *http2ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error {
first := true // first frame written (HEADERS is first, then CONTINUATION)
for len(hdrs) > 0 && cc.werr == nil {
chunk := hdrs
if len(chunk) > maxFrameSize {
chunk = chunk[:maxFrameSize]
}
hdrs = hdrs[len(chunk):]
endHeaders := len(hdrs) == 0
if first {
cc.fr.WriteHeaders(http2HeadersFrameParam{
StreamID: streamID,
BlockFragment: chunk,
EndStream: endStream,
EndHeaders: endHeaders,
})
first = false
} else {
cc.fr.WriteContinuation(streamID, endHeaders, chunk)
}
}
// TODO(bradfitz): this Flush could potentially block (as
// could the WriteHeaders call(s) above), which means they
// wouldn't respond to Request.Cancel being readable. That's
// rare, but this should probably be in a goroutine.
cc.bw.Flush()
return cc.werr
}
// internal error values; they don't escape to callers
var (
// abort request body write; don't send cancel
http2errStopReqBodyWrite = errors.New("http2: aborting request body write")
// abort request body write, but send stream reset of cancel.
http2errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
)
func (cs *http2clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
cc := cs.cc
sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
buf := cc.frameScratchBuffer()
defer cc.putFrameScratchBuffer(buf)
defer func() {
http2traceWroteRequest(cs.trace, err)
// TODO: write h12Compare test showing whether
// Request.Body is closed by the Transport,
// and in multiple cases: server replies <=299 and >299
// while still writing request body
cerr := bodyCloser.Close()
if err == nil {
err = cerr
}
}()
req := cs.req
hasTrailers := req.Trailer != nil
var sawEOF bool
for !sawEOF {
n, err := body.Read(buf)
if err == io.EOF {
sawEOF = true
err = nil
} else if err != nil {
cc.writeStreamReset(cs.ID, http2ErrCodeCancel, err)
return err
}
remain := buf[:n]
for len(remain) > 0 && err == nil {
var allowed int32
allowed, err = cs.awaitFlowControl(len(remain))
switch {
case err == http2errStopReqBodyWrite:
return err
case err == http2errStopReqBodyWriteAndCancel:
cc.writeStreamReset(cs.ID, http2ErrCodeCancel, nil)
return err
case err != nil:
return err
}
cc.wmu.Lock()
data := remain[:allowed]
remain = remain[allowed:]
sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
err = cc.fr.WriteData(cs.ID, sentEnd, data)
if err == nil {
// TODO(bradfitz): this flush is for latency, not bandwidth.
// Most requests won't need this. Make this opt-in or
// opt-out? Use some heuristic on the body type? Nagel-like
// timers? Based on 'n'? Only last chunk of this for loop,
// unless flow control tokens are low? For now, always.
// If we change this, see comment below.
err = cc.bw.Flush()
}
cc.wmu.Unlock()
}
if err != nil {
return err
}
}
if sentEnd {
// Already sent END_STREAM (which implies we have no
// trailers) and flushed, because currently all
// WriteData frames above get a flush. So we're done.
return nil
}
var trls []byte
if hasTrailers {
cc.mu.Lock()
trls, err = cc.encodeTrailers(req)
cc.mu.Unlock()
if err != nil {
cc.writeStreamReset(cs.ID, http2ErrCodeInternal, err)
cc.forgetStreamID(cs.ID)
return err
}
}
cc.mu.Lock()
maxFrameSize := int(cc.maxFrameSize)
cc.mu.Unlock()
cc.wmu.Lock()
defer cc.wmu.Unlock()
// Two ways to send END_STREAM: either with trailers, or
// with an empty DATA frame.
if len(trls) > 0 {
err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls)
} else {
err = cc.fr.WriteData(cs.ID, true, nil)
}
if ferr := cc.bw.Flush(); ferr != nil && err == nil {
err = ferr
}
return err
}
// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
// control tokens from the server.
// It returns either the non-zero number of tokens taken or an error
// if the stream is dead.
func (cs *http2clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
cc := cs.cc
cc.mu.Lock()
defer cc.mu.Unlock()
for {
if cc.closed {
return 0, http2errClientConnClosed
}
if cs.stopReqBody != nil {
return 0, cs.stopReqBody
}
if err := cs.checkResetOrDone(); err != nil {
return 0, err
}
if a := cs.flow.available(); a > 0 {
take := a
if int(take) > maxBytes {
take = int32(maxBytes) // can't truncate int; take is int32
}
if take > int32(cc.maxFrameSize) {
take = int32(cc.maxFrameSize)
}
cs.flow.take(take)
return take, nil
}
cc.cond.Wait()
}
}
type http2badStringError struct {
what string
str string
}
func (e *http2badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
// requires cc.mu be held.
func (cc *http2ClientConn) encodeHeaders(req *Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
cc.hbuf.Reset()
host := req.Host
if host == "" {
host = req.URL.Host
}
host, err := httpguts.PunycodeHostPort(host)
if err != nil {
return nil, err
}
var path string
if req.Method != "CONNECT" {
path = req.URL.RequestURI()
if !http2validPseudoPath(path) {
orig := path
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
if !http2validPseudoPath(path) {
if req.URL.Opaque != "" {
return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
} else {
return nil, fmt.Errorf("invalid request :path %q", orig)
}
}
}
}
// Check for any invalid headers and return an error before we
// potentially pollute our hpack state. (We want to be able to
// continue to reuse the hpack encoder for future requests)
for k, vv := range req.Header {
if !httpguts.ValidHeaderFieldName(k) {
return nil, fmt.Errorf("invalid HTTP header name %q", k)
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
}
}
}
enumerateHeaders := func(f func(name, value string)) {
// 8.1.2.3 Request Pseudo-Header Fields
// The :path pseudo-header field includes the path and query parts of the
// target URI (the path-absolute production and optionally a '?' character
// followed by the query production (see Sections 3.3 and 3.4 of
// [RFC3986]).
f(":authority", host)
f(":method", req.Method)
if req.Method != "CONNECT" {
f(":path", path)
f(":scheme", req.URL.Scheme)
}
if trailers != "" {
f("trailer", trailers)
}
var didUA bool
for k, vv := range req.Header {
if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") {
// Host is :authority, already sent.
// Content-Length is automatic, set below.
continue
} else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") ||
strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") ||
strings.EqualFold(k, "keep-alive") {
// Per 8.1.2.2 Connection-Specific Header
// Fields, don't send connection-specific
// fields. We have already checked if any
// are error-worthy so just ignore the rest.
continue
} else if strings.EqualFold(k, "user-agent") {
// Match Go's http1 behavior: at most one
// User-Agent. If set to nil or empty string,
// then omit it. Otherwise if not mentioned,
// include the default (below).
didUA = true
if len(vv) < 1 {
continue
}
vv = vv[:1]
if vv[0] == "" {
continue
}
}
for _, v := range vv {
f(k, v)
}
}
if http2shouldSendReqContentLength(req.Method, contentLength) {
f("content-length", strconv.FormatInt(contentLength, 10))
}
if addGzipHeader {
f("accept-encoding", "gzip")
}
if !didUA {
f("user-agent", http2defaultUserAgent)
}
}
// Do a first pass over the headers counting bytes to ensure
// we don't exceed cc.peerMaxHeaderListSize. This is done as a
// separate pass before encoding the headers to prevent
// modifying the hpack state.
hlSize := uint64(0)
enumerateHeaders(func(name, value string) {
hf := hpack.HeaderField{Name: name, Value: value}
hlSize += uint64(hf.Size())
})
if hlSize > cc.peerMaxHeaderListSize {
return nil, http2errRequestHeaderListSize
}
trace := httptrace.ContextClientTrace(req.Context())
traceHeaders := http2traceHasWroteHeaderField(trace)
// Header list size is ok. Write the headers.
enumerateHeaders(func(name, value string) {
name = strings.ToLower(name)
cc.writeHeader(name, value)
if traceHeaders {
http2traceWroteHeaderField(trace, name, value)
}
})
return cc.hbuf.Bytes(), nil
}
// shouldSendReqContentLength reports whether the http2.Transport should send
// a "content-length" request header. This logic is basically a copy of the net/http
// transferWriter.shouldSendContentLength.
// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
// -1 means unknown.
func http2shouldSendReqContentLength(method string, contentLength int64) bool {
if contentLength > 0 {
return true
}
if contentLength < 0 {
return false
}
// For zero bodies, whether we send a content-length depends on the method.
// It also kinda doesn't matter for http2 either way, with END_STREAM.
switch method {
case "POST", "PUT", "PATCH":
return true
default:
return false
}
}
// requires cc.mu be held.
func (cc *http2ClientConn) encodeTrailers(req *Request) ([]byte, error) {
cc.hbuf.Reset()
hlSize := uint64(0)
for k, vv := range req.Trailer {
for _, v := range vv {
hf := hpack.HeaderField{Name: k, Value: v}
hlSize += uint64(hf.Size())
}
}
if hlSize > cc.peerMaxHeaderListSize {
return nil, http2errRequestHeaderListSize
}
for k, vv := range req.Trailer {
// Transfer-Encoding, etc.. have already been filtered at the
// start of RoundTrip
lowKey := strings.ToLower(k)
for _, v := range vv {
cc.writeHeader(lowKey, v)
}
}
return cc.hbuf.Bytes(), nil
}
func (cc *http2ClientConn) writeHeader(name, value string) {
if http2VerboseLogs {
log.Printf("http2: Transport encoding header %q = %q", name, value)
}
cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
}
type http2resAndError struct {
res *Response
err error
}
// requires cc.mu be held.
func (cc *http2ClientConn) newStream() *http2clientStream {
cs := &http2clientStream{
cc: cc,
ID: cc.nextStreamID,
resc: make(chan http2resAndError, 1),
peerReset: make(chan struct{}),
done: make(chan struct{}),
}
cs.flow.add(int32(cc.initialWindowSize))
cs.flow.setConnFlow(&cc.flow)
cs.inflow.add(http2transportDefaultStreamFlow)
cs.inflow.setConnFlow(&cc.inflow)
cc.nextStreamID += 2
cc.streams[cs.ID] = cs
return cs
}
func (cc *http2ClientConn) forgetStreamID(id uint32) {
cc.streamByID(id, true)
}
func (cc *http2ClientConn) streamByID(id uint32, andRemove bool) *http2clientStream {
cc.mu.Lock()
defer cc.mu.Unlock()
cs := cc.streams[id]
if andRemove && cs != nil && !cc.closed {
cc.lastActive = time.Now()
delete(cc.streams, id)
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
}
close(cs.done)
// Wake up checkResetOrDone via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
cc.cond.Broadcast()
}
return cs
}
// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
type http2clientConnReadLoop struct {
cc *http2ClientConn
closeWhenIdle bool
}
// readLoop runs in its own goroutine and reads and dispatches frames.
func (cc *http2ClientConn) readLoop() {
rl := &http2clientConnReadLoop{cc: cc}
defer rl.cleanup()
cc.readerErr = rl.run()
if ce, ok := cc.readerErr.(http2ConnectionError); ok {
cc.wmu.Lock()
cc.fr.WriteGoAway(0, http2ErrCode(ce), nil)
cc.wmu.Unlock()
}
}
// GoAwayError is returned by the Transport when the server closes the
// TCP connection after sending a GOAWAY frame.
type http2GoAwayError struct {
LastStreamID uint32
ErrCode http2ErrCode
DebugData string
}
func (e http2GoAwayError) Error() string {
return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q",
e.LastStreamID, e.ErrCode, e.DebugData)
}
func http2isEOFOrNetReadError(err error) bool {
if err == io.EOF {
return true
}
ne, ok := err.(*net.OpError)
return ok && ne.Op == "read"
}
func (rl *http2clientConnReadLoop) cleanup() {
cc := rl.cc
defer cc.tconn.Close()
defer cc.t.connPool().MarkDead(cc)
defer close(cc.readerDone)
if cc.idleTimer != nil {
cc.idleTimer.Stop()
}
// Close any response bodies if the server closes prematurely.
// TODO: also do this if we've written the headers but not
// gotten a response yet.
err := cc.readerErr
cc.mu.Lock()
if cc.goAway != nil && http2isEOFOrNetReadError(err) {
err = http2GoAwayError{
LastStreamID: cc.goAway.LastStreamID,
ErrCode: cc.goAway.ErrCode,
DebugData: cc.goAwayDebug,
}
} else if err == io.EOF {
err = io.ErrUnexpectedEOF
}
for _, cs := range cc.streams {
cs.bufPipe.CloseWithError(err) // no-op if already closed
select {
case cs.resc <- http2resAndError{err: err}:
default:
}
close(cs.done)
}
cc.closed = true
cc.cond.Broadcast()
cc.mu.Unlock()
}
func (rl *http2clientConnReadLoop) run() error {
cc := rl.cc
rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
gotReply := false // ever saw a HEADERS reply
gotSettings := false
for {
f, err := cc.fr.ReadFrame()
if err != nil {
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
}
if se, ok := err.(http2StreamError); ok {
if cs := cc.streamByID(se.StreamID, false); cs != nil {
cs.cc.writeStreamReset(cs.ID, se.Code, err)
cs.cc.forgetStreamID(cs.ID)
if se.Cause == nil {
se.Cause = cc.fr.errDetail
}
rl.endStreamError(cs, se)
}
continue
} else if err != nil {
return err
}
if http2VerboseLogs {
cc.vlogf("http2: Transport received %s", http2summarizeFrame(f))
}
if !gotSettings {
if _, ok := f.(*http2SettingsFrame); !ok {
cc.logf("protocol error: received %T before a SETTINGS frame", f)
return http2ConnectionError(http2ErrCodeProtocol)
}
gotSettings = true
}
maybeIdle := false // whether frame might transition us to idle
switch f := f.(type) {
case *http2MetaHeadersFrame:
err = rl.processHeaders(f)
maybeIdle = true
gotReply = true
case *http2DataFrame:
err = rl.processData(f)
maybeIdle = true
case *http2GoAwayFrame:
err = rl.processGoAway(f)
maybeIdle = true
case *http2RSTStreamFrame:
err = rl.processResetStream(f)
maybeIdle = true
case *http2SettingsFrame:
err = rl.processSettings(f)
case *http2PushPromiseFrame:
err = rl.processPushPromise(f)
case *http2WindowUpdateFrame:
err = rl.processWindowUpdate(f)
case *http2PingFrame:
err = rl.processPing(f)
default:
cc.logf("Transport: unhandled response frame type %T", f)
}
if err != nil {
if http2VerboseLogs {
cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, http2summarizeFrame(f), err)
}
return err
}
if rl.closeWhenIdle && gotReply && maybeIdle {
cc.closeIfIdle()
}
}
}
func (rl *http2clientConnReadLoop) processHeaders(f *http2MetaHeadersFrame) error {
cc := rl.cc
cs := cc.streamByID(f.StreamID, false)
if cs == nil {
// We'd get here if we canceled a request while the
// server had its response still in flight. So if this
// was just something we canceled, ignore it.
return nil
}
if f.StreamEnded() {
// Issue 20521: If the stream has ended, streamByID() causes
// clientStream.done to be closed, which causes the request's bodyWriter
// to be closed with an errStreamClosed, which may be received by
// clientConn.RoundTrip before the result of processing these headers.
// Deferring stream closure allows the header processing to occur first.
// clientConn.RoundTrip may still receive the bodyWriter error first, but
// the fix for issue 16102 prioritises any response.
//
// Issue 22413: If there is no request body, we should close the
// stream before writing to cs.resc so that the stream is closed
// immediately once RoundTrip returns.
if cs.req.Body != nil {
defer cc.forgetStreamID(f.StreamID)
} else {
cc.forgetStreamID(f.StreamID)
}
}
if !cs.firstByte {
if cs.trace != nil {
// TODO(bradfitz): move first response byte earlier,
// when we first read the 9 byte header, not waiting
// until all the HEADERS+CONTINUATION frames have been
// merged. This works for now.
http2traceFirstResponseByte(cs.trace)
}
cs.firstByte = true
}
if !cs.pastHeaders {
cs.pastHeaders = true
} else {
return rl.processTrailers(cs, f)
}
res, err := rl.handleResponse(cs, f)
if err != nil {
if _, ok := err.(http2ConnectionError); ok {
return err
}
// Any other error type is a stream error.
cs.cc.writeStreamReset(f.StreamID, http2ErrCodeProtocol, err)
cc.forgetStreamID(cs.ID)
cs.resc <- http2resAndError{err: err}
return nil // return nil from process* funcs to keep conn alive
}
if res == nil {
// (nil, nil) special case. See handleResponse docs.
return nil
}
cs.resTrailer = &res.Trailer
cs.resc <- http2resAndError{res: res}
return nil
}
// may return error types nil, or ConnectionError. Any other error value
// is a StreamError of type ErrCodeProtocol. The returned error in that case
// is the detail.
//
// As a special case, handleResponse may return (nil, nil) to skip the
// frame (currently only used for 1xx responses).
func (rl *http2clientConnReadLoop) handleResponse(cs *http2clientStream, f *http2MetaHeadersFrame) (*Response, error) {
if f.Truncated {
return nil, http2errResponseHeaderListSize
}
status := f.PseudoValue("status")
if status == "" {
return nil, errors.New("malformed response from server: missing status pseudo header")
}
statusCode, err := strconv.Atoi(status)
if err != nil {
return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header")
}
header := make(Header)
res := &Response{
Proto: "HTTP/2.0",
ProtoMajor: 2,
Header: header,
StatusCode: statusCode,
Status: status + " " + StatusText(statusCode),
}
for _, hf := range f.RegularFields() {
key := CanonicalHeaderKey(hf.Name)
if key == "Trailer" {
t := res.Trailer
if t == nil {
t = make(Header)
res.Trailer = t
}
http2foreachHeaderElement(hf.Value, func(v string) {
t[CanonicalHeaderKey(v)] = nil
})
} else {
header[key] = append(header[key], hf.Value)
}
}
if statusCode >= 100 && statusCode <= 199 {
cs.num1xx++
const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
if cs.num1xx > max1xxResponses {
return nil, errors.New("http2: too many 1xx informational responses")
}
if fn := cs.get1xxTraceFunc(); fn != nil {
if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil {
return nil, err
}
}
if statusCode == 100 {
http2traceGot100Continue(cs.trace)
if cs.on100 != nil {
cs.on100() // forces any write delay timer to fire
}
}
cs.pastHeaders = false // do it all again
return nil, nil
}
streamEnded := f.StreamEnded()
isHead := cs.req.Method == "HEAD"
if !streamEnded || isHead {
res.ContentLength = -1
if clens := res.Header["Content-Length"]; len(clens) == 1 {
if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
res.ContentLength = clen64
} else {
// TODO: care? unlike http/1, it won't mess up our framing, so it's
// more safe smuggling-wise to ignore.
}
} else if len(clens) > 1 {
// TODO: care? unlike http/1, it won't mess up our framing, so it's
// more safe smuggling-wise to ignore.
}
}
if streamEnded || isHead {
res.Body = http2noBody
return res, nil
}
cs.bufPipe = http2pipe{b: &http2dataBuffer{expected: res.ContentLength}}
cs.bytesRemain = res.ContentLength
res.Body = http2transportResponseBody{cs}
go cs.awaitRequestCancel(cs.req)
if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
res.Header.Del("Content-Encoding")
res.Header.Del("Content-Length")
res.ContentLength = -1
res.Body = &http2gzipReader{body: res.Body}
res.Uncompressed = true
}
return res, nil
}
func (rl *http2clientConnReadLoop) processTrailers(cs *http2clientStream, f *http2MetaHeadersFrame) error {
if cs.pastTrailers {
// Too many HEADERS frames for this stream.
return http2ConnectionError(http2ErrCodeProtocol)
}
cs.pastTrailers = true
if !f.StreamEnded() {
// We expect that any headers for trailers also
// has END_STREAM.
return http2ConnectionError(http2ErrCodeProtocol)
}
if len(f.PseudoFields()) > 0 {
// No pseudo header fields are defined for trailers.
// TODO: ConnectionError might be overly harsh? Check.
return http2ConnectionError(http2ErrCodeProtocol)
}
trailer := make(Header)
for _, hf := range f.RegularFields() {
key := CanonicalHeaderKey(hf.Name)
trailer[key] = append(trailer[key], hf.Value)
}
cs.trailer = trailer
rl.endStream(cs)
return nil
}
// transportResponseBody is the concrete type of Transport.RoundTrip's
// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body.
// On Close it sends RST_STREAM if EOF wasn't already seen.
type http2transportResponseBody struct {
cs *http2clientStream
}
func (b http2transportResponseBody) Read(p []byte) (n int, err error) {
cs := b.cs
cc := cs.cc
if cs.readErr != nil {
return 0, cs.readErr
}
n, err = b.cs.bufPipe.Read(p)
if cs.bytesRemain != -1 {
if int64(n) > cs.bytesRemain {
n = int(cs.bytesRemain)
if err == nil {
err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
cc.writeStreamReset(cs.ID, http2ErrCodeProtocol, err)
}
cs.readErr = err
return int(cs.bytesRemain), err
}
cs.bytesRemain -= int64(n)
if err == io.EOF && cs.bytesRemain > 0 {
err = io.ErrUnexpectedEOF
cs.readErr = err
return n, err
}
}
if n == 0 {
// No flow control tokens to send back.
return
}
cc.mu.Lock()
defer cc.mu.Unlock()
var connAdd, streamAdd int32
// Check the conn-level first, before the stream-level.
if v := cc.inflow.available(); v < http2transportDefaultConnFlow/2 {
connAdd = http2transportDefaultConnFlow - v
cc.inflow.add(connAdd)
}
if err == nil { // No need to refresh if the stream is over or failed.
// Consider any buffered body data (read from the conn but not
// consumed by the client) when computing flow control for this
// stream.
v := int(cs.inflow.available()) + cs.bufPipe.Len()
if v < http2transportDefaultStreamFlow-http2transportDefaultStreamMinRefresh {
streamAdd = int32(http2transportDefaultStreamFlow - v)
cs.inflow.add(streamAdd)
}
}
if connAdd != 0 || streamAdd != 0 {
cc.wmu.Lock()
defer cc.wmu.Unlock()
if connAdd != 0 {
cc.fr.WriteWindowUpdate(0, http2mustUint31(connAdd))
}
if streamAdd != 0 {
cc.fr.WriteWindowUpdate(cs.ID, http2mustUint31(streamAdd))
}
cc.bw.Flush()
}
return
}
var http2errClosedResponseBody = errors.New("http2: response body closed")
func (b http2transportResponseBody) Close() error {
cs := b.cs
cc := cs.cc
serverSentStreamEnd := cs.bufPipe.Err() == io.EOF
unread := cs.bufPipe.Len()
if unread > 0 || !serverSentStreamEnd {
cc.mu.Lock()
cc.wmu.Lock()
if !serverSentStreamEnd {
cc.fr.WriteRSTStream(cs.ID, http2ErrCodeCancel)
cs.didReset = true
}
// Return connection-level flow control.
if unread > 0 {
cc.inflow.add(int32(unread))
cc.fr.WriteWindowUpdate(0, uint32(unread))
}
cc.bw.Flush()
cc.wmu.Unlock()
cc.mu.Unlock()
}
cs.bufPipe.BreakWithError(http2errClosedResponseBody)
cc.forgetStreamID(cs.ID)
return nil
}
func (rl *http2clientConnReadLoop) processData(f *http2DataFrame) error {
cc := rl.cc
cs := cc.streamByID(f.StreamID, f.StreamEnded())
data := f.Data()
if cs == nil {
cc.mu.Lock()
neverSent := cc.nextStreamID
cc.mu.Unlock()
if f.StreamID >= neverSent {
// We never asked for this.
cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
return http2ConnectionError(http2ErrCodeProtocol)
}
// We probably did ask for this, but canceled. Just ignore it.
// TODO: be stricter here? only silently ignore things which
// we canceled, but not things which were closed normally
// by the peer? Tough without accumulating too much state.
// But at least return their flow control:
if f.Length > 0 {
cc.mu.Lock()
cc.inflow.add(int32(f.Length))
cc.mu.Unlock()
cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(f.Length))
cc.bw.Flush()
cc.wmu.Unlock()
}
return nil
}
if !cs.firstByte {
cc.logf("protocol error: received DATA before a HEADERS frame")
rl.endStreamError(cs, http2StreamError{
StreamID: f.StreamID,
Code: http2ErrCodeProtocol,
})
return nil
}
if f.Length > 0 {
if cs.req.Method == "HEAD" && len(data) > 0 {
cc.logf("protocol error: received DATA on a HEAD request")
rl.endStreamError(cs, http2StreamError{
StreamID: f.StreamID,
Code: http2ErrCodeProtocol,
})
return nil
}
// Check connection-level flow control.
cc.mu.Lock()
if cs.inflow.available() >= int32(f.Length) {
cs.inflow.take(int32(f.Length))
} else {
cc.mu.Unlock()
return http2ConnectionError(http2ErrCodeFlowControl)
}
// Return any padded flow control now, since we won't
// refund it later on body reads.
var refund int
if pad := int(f.Length) - len(data); pad > 0 {
refund += pad
}
// Return len(data) now if the stream is already closed,
// since data will never be read.
didReset := cs.didReset
if didReset {
refund += len(data)
}
if refund > 0 {
cc.inflow.add(int32(refund))
cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(refund))
if !didReset {
cs.inflow.add(int32(refund))
cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
}
cc.bw.Flush()
cc.wmu.Unlock()
}
cc.mu.Unlock()
if len(data) > 0 && !didReset {
if _, err := cs.bufPipe.Write(data); err != nil {
rl.endStreamError(cs, err)
return err
}
}
}
if f.StreamEnded() {
rl.endStream(cs)
}
return nil
}
var http2errInvalidTrailers = errors.New("http2: invalid trailers")
func (rl *http2clientConnReadLoop) endStream(cs *http2clientStream) {
// TODO: check that any declared content-length matches, like
// server.go's (*stream).endStream method.
rl.endStreamError(cs, nil)
}
func (rl *http2clientConnReadLoop) endStreamError(cs *http2clientStream, err error) {
var code func()
if err == nil {
err = io.EOF
code = cs.copyTrailers
}
if http2isConnectionCloseRequest(cs.req) {
rl.closeWhenIdle = true
}
cs.bufPipe.closeWithErrorAndCode(err, code)
select {
case cs.resc <- http2resAndError{err: err}:
default:
}
}
func (cs *http2clientStream) copyTrailers() {
for k, vv := range cs.trailer {
t := cs.resTrailer
if *t == nil {
*t = make(Header)
}
(*t)[k] = vv
}
}
func (rl *http2clientConnReadLoop) processGoAway(f *http2GoAwayFrame) error {
cc := rl.cc
cc.t.connPool().MarkDead(cc)
if f.ErrCode != 0 {
// TODO: deal with GOAWAY more. particularly the error code
cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
}
cc.setGoAway(f)
return nil
}
func (rl *http2clientConnReadLoop) processSettings(f *http2SettingsFrame) error {
cc := rl.cc
cc.mu.Lock()
defer cc.mu.Unlock()
if f.IsAck() {
if cc.wantSettingsAck {
cc.wantSettingsAck = false
return nil
}
return http2ConnectionError(http2ErrCodeProtocol)
}
err := f.ForeachSetting(func(s http2Setting) error {
switch s.ID {
case http2SettingMaxFrameSize:
cc.maxFrameSize = s.Val
case http2SettingMaxConcurrentStreams:
cc.maxConcurrentStreams = s.Val
case http2SettingMaxHeaderListSize:
cc.peerMaxHeaderListSize = uint64(s.Val)
case http2SettingInitialWindowSize:
// Values above the maximum flow-control
// window size of 2^31-1 MUST be treated as a
// connection error (Section 5.4.1) of type
// FLOW_CONTROL_ERROR.
if s.Val > math.MaxInt32 {
return http2ConnectionError(http2ErrCodeFlowControl)
}
// Adjust flow control of currently-open
// frames by the difference of the old initial
// window size and this one.
delta := int32(s.Val) - int32(cc.initialWindowSize)
for _, cs := range cc.streams {
cs.flow.add(delta)
}
cc.cond.Broadcast()
cc.initialWindowSize = s.Val
default:
// TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
cc.vlogf("Unhandled Setting: %v", s)
}
return nil
})
if err != nil {
return err
}
cc.wmu.Lock()
defer cc.wmu.Unlock()
cc.fr.WriteSettingsAck()
cc.bw.Flush()
return cc.werr
}
func (rl *http2clientConnReadLoop) processWindowUpdate(f *http2WindowUpdateFrame) error {
cc := rl.cc
cs := cc.streamByID(f.StreamID, false)
if f.StreamID != 0 && cs == nil {
return nil
}
cc.mu.Lock()
defer cc.mu.Unlock()
fl := &cc.flow
if cs != nil {
fl = &cs.flow
}
if !fl.add(int32(f.Increment)) {
return http2ConnectionError(http2ErrCodeFlowControl)
}
cc.cond.Broadcast()
return nil
}
func (rl *http2clientConnReadLoop) processResetStream(f *http2RSTStreamFrame) error {
cs := rl.cc.streamByID(f.StreamID, true)
if cs == nil {
// TODO: return error if server tries to RST_STEAM an idle stream
return nil
}
select {
case <-cs.peerReset:
// Already reset.
// This is the only goroutine
// which closes this, so there
// isn't a race.
default:
err := http2streamError(cs.ID, f.ErrCode)
cs.resetErr = err
close(cs.peerReset)
cs.bufPipe.CloseWithError(err)
cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
}
return nil
}
// Ping sends a PING frame to the server and waits for the ack.
func (cc *http2ClientConn) Ping(ctx context.Context) error {
c := make(chan struct{})
// Generate a random payload
var p [8]byte
for {
if _, err := rand.Read(p[:]); err != nil {
return err
}
cc.mu.Lock()
// check for dup before insert
if _, found := cc.pings[p]; !found {
cc.pings[p] = c
cc.mu.Unlock()
break
}
cc.mu.Unlock()
}
cc.wmu.Lock()
if err := cc.fr.WritePing(false, p); err != nil {
cc.wmu.Unlock()
return err
}
if err := cc.bw.Flush(); err != nil {
cc.wmu.Unlock()
return err
}
cc.wmu.Unlock()
select {
case <-c:
return nil
case <-ctx.Done():
return ctx.Err()
case <-cc.readerDone:
// connection closed
return cc.readerErr
}
}
func (rl *http2clientConnReadLoop) processPing(f *http2PingFrame) error {
if f.IsAck() {
cc := rl.cc
cc.mu.Lock()
defer cc.mu.Unlock()
// If ack, notify listener if any
if c, ok := cc.pings[f.Data]; ok {
close(c)
delete(cc.pings, f.Data)
}
return nil
}
cc := rl.cc
cc.wmu.Lock()
defer cc.wmu.Unlock()
if err := cc.fr.WritePing(true, f.Data); err != nil {
return err
}
return cc.bw.Flush()
}
func (rl *http2clientConnReadLoop) processPushPromise(f *http2PushPromiseFrame) error {
// We told the peer we don't want them.
// Spec says:
// "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
// setting of the peer endpoint is set to 0. An endpoint that
// has set this setting and has received acknowledgement MUST
// treat the receipt of a PUSH_PROMISE frame as a connection
// error (Section 5.4.1) of type PROTOCOL_ERROR."
return http2ConnectionError(http2ErrCodeProtocol)
}
func (cc *http2ClientConn) writeStreamReset(streamID uint32, code http2ErrCode, err error) {
// TODO: map err to more interesting error codes, once the
// HTTP community comes up with some. But currently for
// RST_STREAM there's no equivalent to GOAWAY frame's debug
// data, and the error codes are all pretty vague ("cancel").
cc.wmu.Lock()
cc.fr.WriteRSTStream(streamID, code)
cc.bw.Flush()
cc.wmu.Unlock()
}
var (
http2errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
http2errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
http2errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers")
)
func (cc *http2ClientConn) logf(format string, args ...interface{}) {
cc.t.logf(format, args...)
}
func (cc *http2ClientConn) vlogf(format string, args ...interface{}) {
cc.t.vlogf(format, args...)
}
func (t *http2Transport) vlogf(format string, args ...interface{}) {
if http2VerboseLogs {
t.logf(format, args...)
}
}
func (t *http2Transport) logf(format string, args ...interface{}) {
log.Printf(format, args...)
}
var http2noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
func http2strSliceContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
type http2erringRoundTripper struct{ err error }
func (rt http2erringRoundTripper) RoundTrip(*Request) (*Response, error) { return nil, rt.err }
// gzipReader wraps a response body so it can lazily
// call gzip.NewReader on the first call to Read
type http2gzipReader struct {
body io.ReadCloser // underlying Response.Body
zr *gzip.Reader // lazily-initialized gzip reader
zerr error // sticky error
}
func (gz *http2gzipReader) Read(p []byte) (n int, err error) {
if gz.zerr != nil {
return 0, gz.zerr
}
if gz.zr == nil {
gz.zr, err = gzip.NewReader(gz.body)
if err != nil {
gz.zerr = err
return 0, err
}
}
return gz.zr.Read(p)
}
func (gz *http2gzipReader) Close() error {
return gz.body.Close()
}
type http2errorReader struct{ err error }
func (r http2errorReader) Read(p []byte) (int, error) { return 0, r.err }
// bodyWriterState encapsulates various state around the Transport's writing
// of the request body, particularly regarding doing delayed writes of the body
// when the request contains "Expect: 100-continue".
type http2bodyWriterState struct {
cs *http2clientStream
timer *time.Timer // if non-nil, we're doing a delayed write
fnonce *sync.Once // to call fn with
fn func() // the code to run in the goroutine, writing the body
resc chan error // result of fn's execution
delay time.Duration // how long we should delay a delayed write for
}
func (t *http2Transport) getBodyWriterState(cs *http2clientStream, body io.Reader) (s http2bodyWriterState) {
s.cs = cs
if body == nil {
return
}
resc := make(chan error, 1)
s.resc = resc
s.fn = func() {
cs.cc.mu.Lock()
cs.startedWrite = true
cs.cc.mu.Unlock()
resc <- cs.writeRequestBody(body, cs.req.Body)
}
s.delay = t.expectContinueTimeout()
if s.delay == 0 ||
!httpguts.HeaderValuesContainsToken(
cs.req.Header["Expect"],
"100-continue") {
return
}
s.fnonce = new(sync.Once)
// Arm the timer with a very large duration, which we'll
// intentionally lower later. It has to be large now because
// we need a handle to it before writing the headers, but the
// s.delay value is defined to not start until after the
// request headers were written.
const hugeDuration = 365 * 24 * time.Hour
s.timer = time.AfterFunc(hugeDuration, func() {
s.fnonce.Do(s.fn)
})
return
}
func (s http2bodyWriterState) cancel() {
if s.timer != nil {
s.timer.Stop()
}
}
func (s http2bodyWriterState) on100() {
if s.timer == nil {
// If we didn't do a delayed write, ignore the server's
// bogus 100 continue response.
return
}
s.timer.Stop()
go func() { s.fnonce.Do(s.fn) }()
}
// scheduleBodyWrite starts writing the body, either immediately (in
// the common case) or after the delay timeout. It should not be
// called until after the headers have been written.
func (s http2bodyWriterState) scheduleBodyWrite() {
if s.timer == nil {
// We're not doing a delayed write (see
// getBodyWriterState), so just start the writing
// goroutine immediately.
go s.fn()
return
}
http2traceWait100Continue(s.cs.trace)
if s.timer.Stop() {
s.timer.Reset(s.delay)
}
}
// isConnectionCloseRequest reports whether req should use its own
// connection for a single request and then close the connection.
func http2isConnectionCloseRequest(req *Request) bool {
return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close")
}
// registerHTTPSProtocol calls Transport.RegisterProtocol but
// converting panics into errors.
func http2registerHTTPSProtocol(t *Transport, rt http2noDialH2RoundTripper) (err error) {
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("%v", e)
}
}()
t.RegisterProtocol("https", rt)
return nil
}
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
// if there's already has a cached connection to the host.
// (The field is exported so it can be accessed via reflect from net/http; tested
// by TestNoDialH2RoundTripperType)
type http2noDialH2RoundTripper struct{ *http2Transport }
func (rt http2noDialH2RoundTripper) RoundTrip(req *Request) (*Response, error) {
res, err := rt.http2Transport.RoundTrip(req)
if http2isNoCachedConnError(err) {
return nil, ErrSkipAltProtocol
}
return res, err
}
func (t *http2Transport) idleConnTimeout() time.Duration {
if t.t1 != nil {
return t.t1.IdleConnTimeout
}
return 0
}
func http2traceGetConn(req *Request, hostPort string) {
trace := httptrace.ContextClientTrace(req.Context())
if trace == nil || trace.GetConn == nil {
return
}
trace.GetConn(hostPort)
}
func http2traceGotConn(req *Request, cc *http2ClientConn) {
trace := httptrace.ContextClientTrace(req.Context())
if trace == nil || trace.GotConn == nil {
return
}
ci := httptrace.GotConnInfo{Conn: cc.tconn}
cc.mu.Lock()
ci.Reused = cc.nextStreamID > 1
ci.WasIdle = len(cc.streams) == 0 && ci.Reused
if ci.WasIdle && !cc.lastActive.IsZero() {
ci.IdleTime = time.Now().Sub(cc.lastActive)
}
cc.mu.Unlock()
trace.GotConn(ci)
}
func http2traceWroteHeaders(trace *httptrace.ClientTrace) {
if trace != nil && trace.WroteHeaders != nil {
trace.WroteHeaders()
}
}
func http2traceGot100Continue(trace *httptrace.ClientTrace) {
if trace != nil && trace.Got100Continue != nil {
trace.Got100Continue()
}
}
func http2traceWait100Continue(trace *httptrace.ClientTrace) {
if trace != nil && trace.Wait100Continue != nil {
trace.Wait100Continue()
}
}
func http2traceWroteRequest(trace *httptrace.ClientTrace, err error) {
if trace != nil && trace.WroteRequest != nil {
trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
}
}
func http2traceFirstResponseByte(trace *httptrace.ClientTrace) {
if trace != nil && trace.GotFirstResponseByte != nil {
trace.GotFirstResponseByte()
}
}
// writeFramer is implemented by any type that is used to write frames.
type http2writeFramer interface {
writeFrame(http2writeContext) error
// staysWithinBuffer reports whether this writer promises that
// it will only write less than or equal to size bytes, and it
// won't Flush the write context.
staysWithinBuffer(size int) bool
}
// writeContext is the interface needed by the various frame writer
// types below. All the writeFrame methods below are scheduled via the
// frame writing scheduler (see writeScheduler in writesched.go).
//
// This interface is implemented by *serverConn.
//
// TODO: decide whether to a) use this in the client code (which didn't
// end up using this yet, because it has a simpler design, not
// currently implementing priorities), or b) delete this and
// make the server code a bit more concrete.
type http2writeContext interface {
Framer() *http2Framer
Flush() error
CloseConn() error
// HeaderEncoder returns an HPACK encoder that writes to the
// returned buffer.
HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
}
// writeEndsStream reports whether w writes a frame that will transition
// the stream to a half-closed local state. This returns false for RST_STREAM,
// which closes the entire stream (not just the local half).
func http2writeEndsStream(w http2writeFramer) bool {
switch v := w.(type) {
case *http2writeData:
return v.endStream
case *http2writeResHeaders:
return v.endStream
case nil:
// This can only happen if the caller reuses w after it's
// been intentionally nil'ed out to prevent use. Keep this
// here to catch future refactoring breaking it.
panic("writeEndsStream called on nil writeFramer")
}
return false
}
type http2flushFrameWriter struct{}
func (http2flushFrameWriter) writeFrame(ctx http2writeContext) error {
return ctx.Flush()
}
func (http2flushFrameWriter) staysWithinBuffer(max int) bool { return false }
type http2writeSettings []http2Setting
func (s http2writeSettings) staysWithinBuffer(max int) bool {
const settingSize = 6 // uint16 + uint32
return http2frameHeaderLen+settingSize*len(s) <= max
}
func (s http2writeSettings) writeFrame(ctx http2writeContext) error {
return ctx.Framer().WriteSettings([]http2Setting(s)...)
}
type http2writeGoAway struct {
maxStreamID uint32
code http2ErrCode
}
func (p *http2writeGoAway) writeFrame(ctx http2writeContext) error {
err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
ctx.Flush() // ignore error: we're hanging up on them anyway
return err
}
func (*http2writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
type http2writeData struct {
streamID uint32
p []byte
endStream bool
}
func (w *http2writeData) String() string {
return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
}
func (w *http2writeData) writeFrame(ctx http2writeContext) error {
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
}
func (w *http2writeData) staysWithinBuffer(max int) bool {
return http2frameHeaderLen+len(w.p) <= max
}
// handlerPanicRST is the message sent from handler goroutines when
// the handler panics.
type http2handlerPanicRST struct {
StreamID uint32
}
func (hp http2handlerPanicRST) writeFrame(ctx http2writeContext) error {
return ctx.Framer().WriteRSTStream(hp.StreamID, http2ErrCodeInternal)
}
func (hp http2handlerPanicRST) staysWithinBuffer(max int) bool { return http2frameHeaderLen+4 <= max }
func (se http2StreamError) writeFrame(ctx http2writeContext) error {
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
}
func (se http2StreamError) staysWithinBuffer(max int) bool { return http2frameHeaderLen+4 <= max }
type http2writePingAck struct{ pf *http2PingFrame }
func (w http2writePingAck) writeFrame(ctx http2writeContext) error {
return ctx.Framer().WritePing(true, w.pf.Data)
}
func (w http2writePingAck) staysWithinBuffer(max int) bool {
return http2frameHeaderLen+len(w.pf.Data) <= max
}
type http2writeSettingsAck struct{}
func (http2writeSettingsAck) writeFrame(ctx http2writeContext) error {
return ctx.Framer().WriteSettingsAck()
}
func (http2writeSettingsAck) staysWithinBuffer(max int) bool { return http2frameHeaderLen <= max }
// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
// for the first/last fragment, respectively.
func http2splitHeaderBlock(ctx http2writeContext, headerBlock []byte, fn func(ctx http2writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
// that all peers must support (16KB). Later we could care
// more and send larger frames if the peer advertised it, but
// there's little point. Most headers are small anyway (so we
// generally won't have CONTINUATION frames), and extra frames
// only waste 9 bytes anyway.
const maxFrameSize = 16384
first := true
for len(headerBlock) > 0 {
frag := headerBlock
if len(frag) > maxFrameSize {
frag = frag[:maxFrameSize]
}
headerBlock = headerBlock[len(frag):]
if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
return err
}
first = false
}
return nil
}
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
// for HTTP response headers or trailers from a server handler.
type http2writeResHeaders struct {
streamID uint32
httpResCode int // 0 means no ":status" line
h Header // may be nil
trailers []string // if non-nil, which keys of h to write. nil means all.
endStream bool
date string
contentType string
contentLength string
}
func http2encKV(enc *hpack.Encoder, k, v string) {
if http2VerboseLogs {
log.Printf("http2: server encoding header %q = %q", k, v)
}
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
}
func (w *http2writeResHeaders) staysWithinBuffer(max int) bool {
// TODO: this is a common one. It'd be nice to return true
// here and get into the fast path if we could be clever and
// calculate the size fast enough, or at least a conservative
// upper bound that usually fires. (Maybe if w.h and
// w.trailers are nil, so we don't need to enumerate it.)
// Otherwise I'm afraid that just calculating the length to
// answer this question would be slower than the ~2µs benefit.
return false
}
func (w *http2writeResHeaders) writeFrame(ctx http2writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
if w.httpResCode != 0 {
http2encKV(enc, ":status", http2httpCodeString(w.httpResCode))
}
http2encodeHeaders(enc, w.h, w.trailers)
if w.contentType != "" {
http2encKV(enc, "content-type", w.contentType)
}
if w.contentLength != "" {
http2encKV(enc, "content-length", w.contentLength)
}
if w.date != "" {
http2encKV(enc, "date", w.date)
}
headerBlock := buf.Bytes()
if len(headerBlock) == 0 && w.trailers == nil {
panic("unexpected empty hpack")
}
return http2splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
}
func (w *http2writeResHeaders) writeHeaderBlock(ctx http2writeContext, frag []byte, firstFrag, lastFrag bool) error {
if firstFrag {
return ctx.Framer().WriteHeaders(http2HeadersFrameParam{
StreamID: w.streamID,
BlockFragment: frag,
EndStream: w.endStream,
EndHeaders: lastFrag,
})
} else {
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
}
}
// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
type http2writePushPromise struct {
streamID uint32 // pusher stream
method string // for :method
url *url.URL // for :scheme, :authority, :path
h Header
// Creates an ID for a pushed stream. This runs on serveG just before
// the frame is written. The returned ID is copied to promisedID.
allocatePromisedID func() (uint32, error)
promisedID uint32
}
func (w *http2writePushPromise) staysWithinBuffer(max int) bool {
// TODO: see writeResHeaders.staysWithinBuffer
return false
}
func (w *http2writePushPromise) writeFrame(ctx http2writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
http2encKV(enc, ":method", w.method)
http2encKV(enc, ":scheme", w.url.Scheme)
http2encKV(enc, ":authority", w.url.Host)
http2encKV(enc, ":path", w.url.RequestURI())
http2encodeHeaders(enc, w.h, nil)
headerBlock := buf.Bytes()
if len(headerBlock) == 0 {
panic("unexpected empty hpack")
}
return http2splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
}
func (w *http2writePushPromise) writeHeaderBlock(ctx http2writeContext, frag []byte, firstFrag, lastFrag bool) error {
if firstFrag {
return ctx.Framer().WritePushPromise(http2PushPromiseParam{
StreamID: w.streamID,
PromiseID: w.promisedID,
BlockFragment: frag,
EndHeaders: lastFrag,
})
} else {
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
}
}
type http2write100ContinueHeadersFrame struct {
streamID uint32
}
func (w http2write100ContinueHeadersFrame) writeFrame(ctx http2writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
http2encKV(enc, ":status", "100")
return ctx.Framer().WriteHeaders(http2HeadersFrameParam{
StreamID: w.streamID,
BlockFragment: buf.Bytes(),
EndStream: false,
EndHeaders: true,
})
}
func (w http2write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
// Sloppy but conservative:
return 9+2*(len(":status")+len("100")) <= max
}
type http2writeWindowUpdate struct {
streamID uint32 // or 0 for conn-level
n uint32
}
func (wu http2writeWindowUpdate) staysWithinBuffer(max int) bool { return http2frameHeaderLen+4 <= max }
func (wu http2writeWindowUpdate) writeFrame(ctx http2writeContext) error {
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
}
// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
// is encoded only if k is in keys.
func http2encodeHeaders(enc *hpack.Encoder, h Header, keys []string) {
if keys == nil {
sorter := http2sorterPool.Get().(*http2sorter)
// Using defer here, since the returned keys from the
// sorter.Keys method is only valid until the sorter
// is returned:
defer http2sorterPool.Put(sorter)
keys = sorter.Keys(h)
}
for _, k := range keys {
vv := h[k]
k = http2lowerHeader(k)
if !http2validWireHeaderFieldName(k) {
// Skip it as backup paranoia. Per
// golang.org/issue/14048, these should
// already be rejected at a higher level.
continue
}
isTE := k == "transfer-encoding"
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
// TODO: return an error? golang.org/issue/14048
// For now just omit it.
continue
}
// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
if isTE && v != "trailers" {
continue
}
http2encKV(enc, k, v)
}
}
}
// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
// Methods are never called concurrently.
type http2WriteScheduler interface {
// OpenStream opens a new stream in the write scheduler.
// It is illegal to call this with streamID=0 or with a streamID that is
// already open -- the call may panic.
OpenStream(streamID uint32, options http2OpenStreamOptions)
// CloseStream closes a stream in the write scheduler. Any frames queued on
// this stream should be discarded. It is illegal to call this on a stream
// that is not open -- the call may panic.
CloseStream(streamID uint32)
// AdjustStream adjusts the priority of the given stream. This may be called
// on a stream that has not yet been opened or has been closed. Note that
// RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
// https://tools.ietf.org/html/rfc7540#section-5.1
AdjustStream(streamID uint32, priority http2PriorityParam)
// Push queues a frame in the scheduler. In most cases, this will not be
// called with wr.StreamID()!=0 unless that stream is currently open. The one
// exception is RST_STREAM frames, which may be sent on idle or closed streams.
Push(wr http2FrameWriteRequest)
// Pop dequeues the next frame to write. Returns false if no frames can
// be written. Frames with a given wr.StreamID() are Pop'd in the same
// order they are Push'd.
Pop() (wr http2FrameWriteRequest, ok bool)
}
// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
type http2OpenStreamOptions struct {
// PusherID is zero if the stream was initiated by the client. Otherwise,
// PusherID names the stream that pushed the newly opened stream.
PusherID uint32
}
// FrameWriteRequest is a request to write a frame.
type http2FrameWriteRequest struct {
// write is the interface value that does the writing, once the
// WriteScheduler has selected this frame to write. The write
// functions are all defined in write.go.
write http2writeFramer
// stream is the stream on which this frame will be written.
// nil for non-stream frames like PING and SETTINGS.
stream *http2stream
// done, if non-nil, must be a buffered channel with space for
// 1 message and is sent the return value from write (or an
// earlier error) when the frame has been written.
done chan error
}
// StreamID returns the id of the stream this frame will be written to.
// 0 is used for non-stream frames such as PING and SETTINGS.
func (wr http2FrameWriteRequest) StreamID() uint32 {
if wr.stream == nil {
if se, ok := wr.write.(http2StreamError); ok {
// (*serverConn).resetStream doesn't set
// stream because it doesn't necessarily have
// one. So special case this type of write
// message.
return se.StreamID
}
return 0
}
return wr.stream.id
}
// DataSize returns the number of flow control bytes that must be consumed
// to write this entire frame. This is 0 for non-DATA frames.
func (wr http2FrameWriteRequest) DataSize() int {
if wd, ok := wr.write.(*http2writeData); ok {
return len(wd.p)
}
return 0
}
// Consume consumes min(n, available) bytes from this frame, where available
// is the number of flow control bytes available on the stream. Consume returns
// 0, 1, or 2 frames, where the integer return value gives the number of frames
// returned.
//
// If flow control prevents consuming any bytes, this returns (_, _, 0). If
// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
// underlying stream's flow control budget.
func (wr http2FrameWriteRequest) Consume(n int32) (http2FrameWriteRequest, http2FrameWriteRequest, int) {
var empty http2FrameWriteRequest
// Non-DATA frames are always consumed whole.
wd, ok := wr.write.(*http2writeData)
if !ok || len(wd.p) == 0 {
return wr, empty, 1
}
// Might need to split after applying limits.
allowed := wr.stream.flow.available()
if n < allowed {
allowed = n
}
if wr.stream.sc.maxFrameSize < allowed {
allowed = wr.stream.sc.maxFrameSize
}
if allowed <= 0 {
return empty, empty, 0
}
if len(wd.p) > int(allowed) {
wr.stream.flow.take(allowed)
consumed := http2FrameWriteRequest{
stream: wr.stream,
write: &http2writeData{
streamID: wd.streamID,
p: wd.p[:allowed],
// Even if the original had endStream set, there
// are bytes remaining because len(wd.p) > allowed,
// so we know endStream is false.
endStream: false,
},
// Our caller is blocking on the final DATA frame, not
// this intermediate frame, so no need to wait.
done: nil,
}
rest := http2FrameWriteRequest{
stream: wr.stream,
write: &http2writeData{
streamID: wd.streamID,
p: wd.p[allowed:],
endStream: wd.endStream,
},
done: wr.done,
}
return consumed, rest, 2
}
// The frame is consumed whole.
// NB: This cast cannot overflow because allowed is <= math.MaxInt32.
wr.stream.flow.take(int32(len(wd.p)))
return wr, empty, 1
}
// String is for debugging only.
func (wr http2FrameWriteRequest) String() string {
var des string
if s, ok := wr.write.(fmt.Stringer); ok {
des = s.String()
} else {
des = fmt.Sprintf("%T", wr.write)
}
return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
}
// replyToWriter sends err to wr.done and panics if the send must block
// This does nothing if wr.done is nil.
func (wr *http2FrameWriteRequest) replyToWriter(err error) {
if wr.done == nil {
return
}
select {
case wr.done <- err:
default:
panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
}
wr.write = nil // prevent use (assume it's tainted after wr.done send)
}
// writeQueue is used by implementations of WriteScheduler.
type http2writeQueue struct {
s []http2FrameWriteRequest
}
func (q *http2writeQueue) empty() bool { return len(q.s) == 0 }
func (q *http2writeQueue) push(wr http2FrameWriteRequest) {
q.s = append(q.s, wr)
}
func (q *http2writeQueue) shift() http2FrameWriteRequest {
if len(q.s) == 0 {
panic("invalid use of queue")
}
wr := q.s[0]
// TODO: less copy-happy queue.
copy(q.s, q.s[1:])
q.s[len(q.s)-1] = http2FrameWriteRequest{}
q.s = q.s[:len(q.s)-1]
return wr
}
// consume consumes up to n bytes from q.s[0]. If the frame is
// entirely consumed, it is removed from the queue. If the frame
// is partially consumed, the frame is kept with the consumed
// bytes removed. Returns true iff any bytes were consumed.
func (q *http2writeQueue) consume(n int32) (http2FrameWriteRequest, bool) {
if len(q.s) == 0 {
return http2FrameWriteRequest{}, false
}
consumed, rest, numresult := q.s[0].Consume(n)
switch numresult {
case 0:
return http2FrameWriteRequest{}, false
case 1:
q.shift()
case 2:
q.s[0] = rest
}
return consumed, true
}
type http2writeQueuePool []*http2writeQueue
// put inserts an unused writeQueue into the pool.
// put inserts an unused writeQueue into the pool.
func (p *http2writeQueuePool) put(q *http2writeQueue) {
for i := range q.s {
q.s[i] = http2FrameWriteRequest{}
}
q.s = q.s[:0]
*p = append(*p, q)
}
// get returns an empty writeQueue.
func (p *http2writeQueuePool) get() *http2writeQueue {
ln := len(*p)
if ln == 0 {
return new(http2writeQueue)
}
x := ln - 1
q := (*p)[x]
(*p)[x] = nil
*p = (*p)[:x]
return q
}
// RFC 7540, Section 5.3.5: the default weight is 16.
const http2priorityDefaultWeight = 15 // 16 = 15 + 1
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
type http2PriorityWriteSchedulerConfig struct {
// MaxClosedNodesInTree controls the maximum number of closed streams to
// retain in the priority tree. Setting this to zero saves a small amount
// of memory at the cost of performance.
//
// See RFC 7540, Section 5.3.4:
// "It is possible for a stream to become closed while prioritization
// information ... is in transit. ... This potentially creates suboptimal
// prioritization, since the stream could be given a priority that is
// different from what is intended. To avoid these problems, an endpoint
// SHOULD retain stream prioritization state for a period after streams
// become closed. The longer state is retained, the lower the chance that
// streams are assigned incorrect or default priority values."
MaxClosedNodesInTree int
// MaxIdleNodesInTree controls the maximum number of idle streams to
// retain in the priority tree. Setting this to zero saves a small amount
// of memory at the cost of performance.
//
// See RFC 7540, Section 5.3.4:
// Similarly, streams that are in the "idle" state can be assigned
// priority or become a parent of other streams. This allows for the
// creation of a grouping node in the dependency tree, which enables
// more flexible expressions of priority. Idle streams begin with a
// default priority (Section 5.3.5).
MaxIdleNodesInTree int
// ThrottleOutOfOrderWrites enables write throttling to help ensure that
// data is delivered in priority order. This works around a race where
// stream B depends on stream A and both streams are about to call Write
// to queue DATA frames. If B wins the race, a naive scheduler would eagerly
// write as much data from B as possible, but this is suboptimal because A
// is a higher-priority stream. With throttling enabled, we write a small
// amount of data from B to minimize the amount of bandwidth that B can
// steal from A.
ThrottleOutOfOrderWrites bool
}
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
// If cfg is nil, default options are used.
func http2NewPriorityWriteScheduler(cfg *http2PriorityWriteSchedulerConfig) http2WriteScheduler {
if cfg == nil {
// For justification of these defaults, see:
// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
cfg = &http2PriorityWriteSchedulerConfig{
MaxClosedNodesInTree: 10,
MaxIdleNodesInTree: 10,
ThrottleOutOfOrderWrites: false,
}
}
ws := &http2priorityWriteScheduler{
nodes: make(map[uint32]*http2priorityNode),
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
}
ws.nodes[0] = &ws.root
if cfg.ThrottleOutOfOrderWrites {
ws.writeThrottleLimit = 1024
} else {
ws.writeThrottleLimit = math.MaxInt32
}
return ws
}
type http2priorityNodeState int
const (
http2priorityNodeOpen http2priorityNodeState = iota
http2priorityNodeClosed
http2priorityNodeIdle
)
// priorityNode is a node in an HTTP/2 priority tree.
// Each node is associated with a single stream ID.
// See RFC 7540, Section 5.3.
type http2priorityNode struct {
q http2writeQueue // queue of pending frames to write
id uint32 // id of the stream, or 0 for the root of the tree
weight uint8 // the actual weight is weight+1, so the value is in [1,256]
state http2priorityNodeState // open | closed | idle
bytes int64 // number of bytes written by this node, or 0 if closed
subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
// These links form the priority tree.
parent *http2priorityNode
kids *http2priorityNode // start of the kids list
prev, next *http2priorityNode // doubly-linked list of siblings
}
func (n *http2priorityNode) setParent(parent *http2priorityNode) {
if n == parent {
panic("setParent to self")
}
if n.parent == parent {
return
}
// Unlink from current parent.
if parent := n.parent; parent != nil {
if n.prev == nil {
parent.kids = n.next
} else {
n.prev.next = n.next
}
if n.next != nil {
n.next.prev = n.prev
}
}
// Link to new parent.
// If parent=nil, remove n from the tree.
// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
n.parent = parent
if parent == nil {
n.next = nil
n.prev = nil
} else {
n.next = parent.kids
n.prev = nil
if n.next != nil {
n.next.prev = n
}
parent.kids = n
}
}
func (n *http2priorityNode) addBytes(b int64) {
n.bytes += b
for ; n != nil; n = n.parent {
n.subtreeBytes += b
}
}
// walkReadyInOrder iterates over the tree in priority order, calling f for each node
// with a non-empty write queue. When f returns true, this funcion returns true and the
// walk halts. tmp is used as scratch space for sorting.
//
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
// if any ancestor p of n is still open (ignoring the root node).
func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2priorityNode, f func(*http2priorityNode, bool) bool) bool {
if !n.q.empty() && f(n, openParent) {
return true
}
if n.kids == nil {
return false
}
// Don't consider the root "open" when updating openParent since
// we can't send data frames on the root stream (only control frames).
if n.id != 0 {
openParent = openParent || (n.state == http2priorityNodeOpen)
}
// Common case: only one kid or all kids have the same weight.
// Some clients don't use weights; other clients (like web browsers)
// use mostly-linear priority trees.
w := n.kids.weight
needSort := false
for k := n.kids.next; k != nil; k = k.next {
if k.weight != w {
needSort = true
break
}
}
if !needSort {
for k := n.kids; k != nil; k = k.next {
if k.walkReadyInOrder(openParent, tmp, f) {
return true
}
}
return false
}
// Uncommon case: sort the child nodes. We remove the kids from the parent,
// then re-insert after sorting so we can reuse tmp for future sort calls.
*tmp = (*tmp)[:0]
for n.kids != nil {
*tmp = append(*tmp, n.kids)
n.kids.setParent(nil)
}
sort.Sort(http2sortPriorityNodeSiblings(*tmp))
for i := len(*tmp) - 1; i >= 0; i-- {
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
}
for k := n.kids; k != nil; k = k.next {
if k.walkReadyInOrder(openParent, tmp, f) {
return true
}
}
return false
}
type http2sortPriorityNodeSiblings []*http2priorityNode
func (z http2sortPriorityNodeSiblings) Len() int { return len(z) }
func (z http2sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
func (z http2sortPriorityNodeSiblings) Less(i, k int) bool {
// Prefer the subtree that has sent fewer bytes relative to its weight.
// See sections 5.3.2 and 5.3.4.
wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
if bi == 0 && bk == 0 {
return wi >= wk
}
if bk == 0 {
return false
}
return bi/bk <= wi/wk
}
type http2priorityWriteScheduler struct {
// root is the root of the priority tree, where root.id = 0.
// The root queues control frames that are not associated with any stream.
root http2priorityNode
// nodes maps stream ids to priority tree nodes.
nodes map[uint32]*http2priorityNode
// maxID is the maximum stream id in nodes.
maxID uint32
// lists of nodes that have been closed or are idle, but are kept in
// the tree for improved prioritization. When the lengths exceed either
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
closedNodes, idleNodes []*http2priorityNode
// From the config.
maxClosedNodesInTree int
maxIdleNodesInTree int
writeThrottleLimit int32
enableWriteThrottle bool
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
tmp []*http2priorityNode
// pool of empty queues for reuse.
queuePool http2writeQueuePool
}
func (ws *http2priorityWriteScheduler) OpenStream(streamID uint32, options http2OpenStreamOptions) {
// The stream may be currently idle but cannot be opened or closed.
if curr := ws.nodes[streamID]; curr != nil {
if curr.state != http2priorityNodeIdle {
panic(fmt.Sprintf("stream %d already opened", streamID))
}
curr.state = http2priorityNodeOpen
return
}
// RFC 7540, Section 5.3.5:
// "All streams are initially assigned a non-exclusive dependency on stream 0x0.
// Pushed streams initially depend on their associated stream. In both cases,
// streams are assigned a default weight of 16."
parent := ws.nodes[options.PusherID]
if parent == nil {
parent = &ws.root
}
n := &http2priorityNode{
q: *ws.queuePool.get(),
id: streamID,
weight: http2priorityDefaultWeight,
state: http2priorityNodeOpen,
}
n.setParent(parent)
ws.nodes[streamID] = n
if streamID > ws.maxID {
ws.maxID = streamID
}
}
func (ws *http2priorityWriteScheduler) CloseStream(streamID uint32) {
if streamID == 0 {
panic("violation of WriteScheduler interface: cannot close stream 0")
}
if ws.nodes[streamID] == nil {
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
}
if ws.nodes[streamID].state != http2priorityNodeOpen {
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
}
n := ws.nodes[streamID]
n.state = http2priorityNodeClosed
n.addBytes(-n.bytes)
q := n.q
ws.queuePool.put(&q)
n.q.s = nil
if ws.maxClosedNodesInTree > 0 {
ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
} else {
ws.removeNode(n)
}
}
func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority http2PriorityParam) {
if streamID == 0 {
panic("adjustPriority on root")
}
// If streamID does not exist, there are two cases:
// - A closed stream that has been removed (this will have ID <= maxID)
// - An idle stream that is being used for "grouping" (this will have ID > maxID)
n := ws.nodes[streamID]
if n == nil {
if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
return
}
ws.maxID = streamID
n = &http2priorityNode{
q: *ws.queuePool.get(),
id: streamID,
weight: http2priorityDefaultWeight,
state: http2priorityNodeIdle,
}
n.setParent(&ws.root)
ws.nodes[streamID] = n
ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
}
// Section 5.3.1: A dependency on a stream that is not currently in the tree
// results in that stream being given a default priority (Section 5.3.5).
parent := ws.nodes[priority.StreamDep]
if parent == nil {
n.setParent(&ws.root)
n.weight = http2priorityDefaultWeight
return
}
// Ignore if the client tries to make a node its own parent.
if n == parent {
return
}
// Section 5.3.3:
// "If a stream is made dependent on one of its own dependencies, the
// formerly dependent stream is first moved to be dependent on the
// reprioritized stream's previous parent. The moved dependency retains
// its weight."
//
// That is: if parent depends on n, move parent to depend on n.parent.
for x := parent.parent; x != nil; x = x.parent {
if x == n {
parent.setParent(n.parent)
break
}
}
// Section 5.3.3: The exclusive flag causes the stream to become the sole
// dependency of its parent stream, causing other dependencies to become
// dependent on the exclusive stream.
if priority.Exclusive {
k := parent.kids
for k != nil {
next := k.next
if k != n {
k.setParent(n)
}
k = next
}
}
n.setParent(parent)
n.weight = priority.Weight
}
func (ws *http2priorityWriteScheduler) Push(wr http2FrameWriteRequest) {
var n *http2priorityNode
if id := wr.StreamID(); id == 0 {
n = &ws.root
} else {
n = ws.nodes[id]
if n == nil {
// id is an idle or closed stream. wr should not be a HEADERS or
// DATA frame. However, wr can be a RST_STREAM. In this case, we
// push wr onto the root, rather than creating a new priorityNode,
// since RST_STREAM is tiny and the stream's priority is unknown
// anyway. See issue #17919.
if wr.DataSize() > 0 {
panic("add DATA on non-open stream")
}
n = &ws.root
}
}
n.q.push(wr)
}
func (ws *http2priorityWriteScheduler) Pop() (wr http2FrameWriteRequest, ok bool) {
ws.root.walkReadyInOrder(false, &ws.tmp, func(n *http2priorityNode, openParent bool) bool {
limit := int32(math.MaxInt32)
if openParent {
limit = ws.writeThrottleLimit
}
wr, ok = n.q.consume(limit)
if !ok {
return false
}
n.addBytes(int64(wr.DataSize()))
// If B depends on A and B continuously has data available but A
// does not, gradually increase the throttling limit to allow B to
// steal more and more bandwidth from A.
if openParent {
ws.writeThrottleLimit += 1024
if ws.writeThrottleLimit < 0 {
ws.writeThrottleLimit = math.MaxInt32
}
} else if ws.enableWriteThrottle {
ws.writeThrottleLimit = 1024
}
return true
})
return wr, ok
}
func (ws *http2priorityWriteScheduler) addClosedOrIdleNode(list *[]*http2priorityNode, maxSize int, n *http2priorityNode) {
if maxSize == 0 {
return
}
if len(*list) == maxSize {
// Remove the oldest node, then shift left.
ws.removeNode((*list)[0])
x := (*list)[1:]
copy(*list, x)
*list = (*list)[:len(x)]
}
*list = append(*list, n)
}
func (ws *http2priorityWriteScheduler) removeNode(n *http2priorityNode) {
for k := n.kids; k != nil; k = k.next {
k.setParent(n.parent)
}
n.setParent(nil)
delete(ws.nodes, n.id)
}
// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
// priorities. Control frames like SETTINGS and PING are written before DATA
// frames, but if no control frames are queued and multiple streams have queued
// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
func http2NewRandomWriteScheduler() http2WriteScheduler {
return &http2randomWriteScheduler{sq: make(map[uint32]*http2writeQueue)}
}
type http2randomWriteScheduler struct {
// zero are frames not associated with a specific stream.
zero http2writeQueue
// sq contains the stream-specific queues, keyed by stream ID.
// When a stream is idle or closed, it's deleted from the map.
sq map[uint32]*http2writeQueue
// pool of empty queues for reuse.
queuePool http2writeQueuePool
}
func (ws *http2randomWriteScheduler) OpenStream(streamID uint32, options http2OpenStreamOptions) {
// no-op: idle streams are not tracked
}
func (ws *http2randomWriteScheduler) CloseStream(streamID uint32) {
q, ok := ws.sq[streamID]
if !ok {
return
}
delete(ws.sq, streamID)
ws.queuePool.put(q)
}
func (ws *http2randomWriteScheduler) AdjustStream(streamID uint32, priority http2PriorityParam) {
// no-op: priorities are ignored
}
func (ws *http2randomWriteScheduler) Push(wr http2FrameWriteRequest) {
id := wr.StreamID()
if id == 0 {
ws.zero.push(wr)
return
}
q, ok := ws.sq[id]
if !ok {
q = ws.queuePool.get()
ws.sq[id] = q
}
q.push(wr)
}
func (ws *http2randomWriteScheduler) Pop() (http2FrameWriteRequest, bool) {
// Control frames first.
if !ws.zero.empty() {
return ws.zero.shift(), true
}
// Iterate over all non-idle streams until finding one that can be consumed.
for _, q := range ws.sq {
if wr, ok := q.consume(math.MaxInt32); ok {
return wr, true
}
}
return http2FrameWriteRequest{}, false
}
| [
"\"DEBUG_HTTP2_GOROUTINES\"",
"\"GODEBUG\""
]
| []
| [
"GODEBUG",
"DEBUG_HTTP2_GOROUTINES"
]
| [] | ["GODEBUG", "DEBUG_HTTP2_GOROUTINES"] | go | 2 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "telegramtoken.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pytype/pytd/typeshed.py | """Utilities for parsing typeshed files."""
import os
from pytype import utils
from pytype.pytd.parse import builtins
class Typeshed(object):
"""A typeshed installation.
The location is either retrieved from the environment variable
"TYPESHED_HOME" (if set) or otherwise assumed to be directly under
pytype (i.e., /{some_path}/pytype/typeshed).
"""
def __init__(self):
home = os.getenv("TYPESHED_HOME")
if home and not os.path.isdir(home):
raise IOError("No typeshed directory %s" % home)
if home:
self._typeshed_path = home
else:
# Not guaranteed to really exist (.egg, etc)
pytype_base = os.path.split(os.path.dirname(__file__))[0]
self._typeshed_path = os.path.join(pytype_base, "typeshed")
self._env_home = home
self._missing = frozenset(self._load_missing())
def _load_file(self, path):
if self._env_home:
filename = os.path.join(self._env_home, path)
with open(filename, "rb") as f:
return filename, f.read()
else:
# Use typeshed bundled with pytype. Note we don't use self._typeshed_path
# to load the file, since load_pytype_file has its own path logic.
data = utils.load_pytype_file(os.path.join("typeshed", path))
return os.path.join(self._typeshed_path, path), data
def _load_missing(self):
return set()
@property
def missing(self):
"""Set of known-missing typeshed modules, as strings of paths."""
return self._missing
@property
def typeshed_path(self):
"""Path of typeshed's root directory.
Returns:
Base of filenames returned by get_module_file(). Not guaranteed to exist
if typeshed is bundled with pytype.
"""
return self._typeshed_path
def get_module_file(self, toplevel, module, version):
"""Get the contents of a typeshed file, typically with a file name *.pyi.
Arguments:
toplevel: the top-level directory within typeshed/, typically "builtins",
"stdlib" or "third_party".
module: module name (e.g., "sys" or "__builtins__"). Can contain dots, if
it's a submodule.
version: The Python version. (major, minor)
Returns:
A tuple with the filename and contents of the file
Raises:
IOError: if file not found
"""
module_path = os.path.join(*module.split("."))
versions = ["%d.%d" % (version[0], minor)
for minor in range(version[1], -1, -1)]
# E.g. for Python 3.5, try 3.5/, 3.4/, 3.3/, ..., 3.0/, 3/, 2and3.
# E.g. for Python 2.7, try 2.7/, 2.6/, ..., 2/, 2and3.
# The order is the same as that of mypy. See default_lib_path in
# https://github.com/JukkaL/mypy/blob/master/mypy/build.py#L249
for v in versions + [str(version[0]), "2and3"]:
path_rel = os.path.join(toplevel, v, module_path)
# Give precedence to missing.txt
if path_rel in self._missing:
return (os.path.join(self._typeshed_path, "nonexistent",
path_rel + ".pyi"),
builtins.DEFAULT_SRC)
for path in [os.path.join(path_rel, "__init__.pyi"), path_rel + ".pyi"]:
try:
return self._load_file(path)
except IOError:
pass
raise IOError("Couldn't find %s" % module)
_typeshed = None
def parse_type_definition(pyi_subdir, module, python_version):
"""Load and parse a *.pyi from typeshed.
Args:
pyi_subdir: the directory where the module should be found
module: the module name (without any file extension)
python_version: sys.version_info[:2]
Returns:
The AST of the module; None if the module doesn't have a definition.
"""
global _typeshed
if _typeshed is None:
_typeshed = Typeshed()
try:
filename, src = _typeshed.get_module_file(pyi_subdir,
module,
python_version)
except IOError:
return None
return builtins.ParsePyTD(src, filename=filename, module=module,
python_version=python_version).Replace(name=module)
| []
| []
| [
"TYPESHED_HOME"
]
| [] | ["TYPESHED_HOME"] | python | 1 | 0 | |
cmd/run-linter/run-linter.go | package main
import (
"bytes"
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/mongodb/grip"
)
type result struct {
name string
cmd string
passed bool
duration time.Duration
output []string
}
// String prints the results of a linter run in gotest format.
func (r *result) String() string {
buf := &bytes.Buffer{}
fmt.Fprintln(buf, "=== RUN", r.name)
if r.passed {
fmt.Fprintf(buf, "--- PASS: %s (%s)", r.name, r.duration)
} else {
fmt.Fprintf(buf, strings.Join(r.output, "\n"))
fmt.Fprintf(buf, "--- FAIL: %s (%s)", r.name, r.duration)
}
return buf.String()
}
// fixup goes through the output and improves the output generated by
// specific linters so that all output includes the relative path to the
// error, instead of mixing relative and absolute paths.
func (r *result) fixup(dirname string) {
for idx, ln := range r.output {
if strings.HasPrefix(ln, dirname) {
r.output[idx] = ln[len(dirname)+1:]
}
}
}
// runs the gometalinter on a list of packages; integrating with the "make lint" target.
func main() {
var (
lintArgs string
lintBin string
customLintersFlag string
customLinters []string
packageList string
output string
packages []string
results []*result
hasFailingTest bool
gopath = os.Getenv("GOPATH")
)
gopath, _ = filepath.Abs(gopath)
flag.StringVar(&lintArgs, "lintArgs", "", "args to pass to gometalinter")
flag.StringVar(&lintBin, "lintBin", filepath.Join(gopath, "bin", "golangci-lint"), "path to go metalinter")
flag.StringVar(&packageList, "packages", "", "list of space separated packages")
flag.StringVar(&customLintersFlag, "customLinters", "", "list of comma-separated custom linter commands")
flag.StringVar(&output, "output", "", "output file for to write results.")
flag.Parse()
customLinters = strings.Split(customLintersFlag, ",")
packages = strings.Split(strings.Replace(packageList, "-", "/", -1), " ")
dirname, _ := os.Getwd()
cwd := filepath.Base(dirname)
lintArgs += fmt.Sprintf(" --concurrency=%d", runtime.NumCPU()/2)
for _, pkg := range packages {
pkgDir := "./"
if cwd != pkg {
pkgDir += pkg
}
args := []string{lintBin, "run", lintArgs, pkgDir}
startAt := time.Now()
cmd := strings.Join(args, " ")
out, err := exec.Command("sh", "-c", cmd).CombinedOutput()
r := &result{
cmd: strings.Join(args, " "),
name: "lint-" + strings.Replace(pkg, "/", "-", -1),
passed: err == nil,
duration: time.Since(startAt),
output: strings.Split(string(out), "\n"),
}
for _, linter := range customLinters {
customLinterStart := time.Now()
out, err = exec.Command("sh", "-c", fmt.Sprintf("%s %s", linter, pkgDir)).CombinedOutput()
r.passed = r.passed && err == nil
r.duration += time.Since(customLinterStart)
r.output = append(r.output, strings.Split(string(out), "\n")...)
}
r.fixup(dirname)
if !r.passed {
hasFailingTest = true
}
results = append(results, r)
fmt.Println(r)
}
if output != "" {
f, err := os.Create(output)
if err != nil {
os.Exit(1)
}
defer func() {
if err != f.Close() {
panic(err)
}
}()
for _, r := range results {
if _, err = f.WriteString(r.String() + "\n"); err != nil {
grip.Error(err)
os.Exit(1)
}
}
}
if hasFailingTest {
os.Exit(1)
}
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
cte/wsgi.py | """
WSGI config for cte project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cte.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
manage.py | #!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demobaza.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/cmd/grafana-server/server.go | package main
import (
"context"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"strconv"
"time"
"github.com/facebookgo/inject"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/middleware"
"github.com/grafana/grafana/pkg/registry"
"golang.org/x/sync/errgroup"
"github.com/grafana/grafana/pkg/api"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/login"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/social"
// self registering services
_ "github.com/grafana/grafana/pkg/extensions"
_ "github.com/grafana/grafana/pkg/metrics"
_ "github.com/grafana/grafana/pkg/plugins"
_ "github.com/grafana/grafana/pkg/services/alerting"
_ "github.com/grafana/grafana/pkg/services/cleanup"
_ "github.com/grafana/grafana/pkg/services/notifications"
_ "github.com/grafana/grafana/pkg/services/provisioning"
_ "github.com/grafana/grafana/pkg/services/rendering"
_ "github.com/grafana/grafana/pkg/services/search"
_ "github.com/grafana/grafana/pkg/services/sqlstore"
_ "github.com/grafana/grafana/pkg/tracing"
)
func NewGrafanaServer() *GrafanaServerImpl {
rootCtx, shutdownFn := context.WithCancel(context.Background())
childRoutines, childCtx := errgroup.WithContext(rootCtx)
return &GrafanaServerImpl{
context: childCtx,
shutdownFn: shutdownFn,
childRoutines: childRoutines,
log: log.New("server"),
cfg: setting.NewCfg(),
}
}
type GrafanaServerImpl struct {
context context.Context
shutdownFn context.CancelFunc
childRoutines *errgroup.Group
log log.Logger
cfg *setting.Cfg
shutdownReason string
shutdownInProgress bool
RouteRegister api.RouteRegister `inject:""`
HttpServer *api.HTTPServer `inject:""`
}
func (g *GrafanaServerImpl) Run() error {
g.loadConfiguration()
g.writePIDFile()
login.Init()
social.NewOAuthService()
serviceGraph := inject.Graph{}
serviceGraph.Provide(&inject.Object{Value: bus.GetBus()})
serviceGraph.Provide(&inject.Object{Value: g.cfg})
serviceGraph.Provide(&inject.Object{Value: api.NewRouteRegister(middleware.RequestMetrics, middleware.RequestTracing)})
// self registered services
services := registry.GetServices()
// Add all services to dependency graph
for _, service := range services {
serviceGraph.Provide(&inject.Object{Value: service.Instance})
}
serviceGraph.Provide(&inject.Object{Value: g})
// Inject dependencies to services
if err := serviceGraph.Populate(); err != nil {
return fmt.Errorf("Failed to populate service dependency: %v", err)
}
// Init & start services
for _, service := range services {
if registry.IsDisabled(service.Instance) {
continue
}
g.log.Info("Initializing " + service.Name)
if err := service.Instance.Init(); err != nil {
return fmt.Errorf("Service init failed: %v", err)
}
}
// Start background services
for _, srv := range services {
// variable needed for accessing loop variable in function callback
descriptor := srv
service, ok := srv.Instance.(registry.BackgroundService)
if !ok {
continue
}
if registry.IsDisabled(descriptor.Instance) {
continue
}
g.childRoutines.Go(func() error {
// Skip starting new service when shutting down
// Can happen when service stop/return during startup
if g.shutdownInProgress {
return nil
}
err := service.Run(g.context)
// If error is not canceled then the service crashed
if err != context.Canceled && err != nil {
g.log.Error("Stopped "+descriptor.Name, "reason", err)
} else {
g.log.Info("Stopped "+descriptor.Name, "reason", err)
}
// Mark that we are in shutdown mode
// So more services are not started
g.shutdownInProgress = true
return err
})
}
sendSystemdNotification("READY=1")
return g.childRoutines.Wait()
}
func (g *GrafanaServerImpl) loadConfiguration() {
err := g.cfg.Load(&setting.CommandLineArgs{
Config: *configFile,
HomePath: *homePath,
Args: flag.Args(),
})
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to start grafana. error: %s\n", err.Error())
os.Exit(1)
}
g.log.Info("Starting "+setting.ApplicationName, "version", version, "commit", commit, "compiled", time.Unix(setting.BuildStamp, 0))
g.cfg.LogConfigSources()
}
func (g *GrafanaServerImpl) Shutdown(reason string) {
g.log.Info("Shutdown started", "reason", reason)
g.shutdownReason = reason
g.shutdownInProgress = true
// call cancel func on root context
g.shutdownFn()
// wait for child routines
g.childRoutines.Wait()
}
func (g *GrafanaServerImpl) Exit(reason error) {
// default exit code is 1
code := 1
if reason == context.Canceled && g.shutdownReason != "" {
reason = fmt.Errorf(g.shutdownReason)
code = 0
}
g.log.Error("Server shutdown", "reason", reason)
log.Close()
os.Exit(code)
}
func (g *GrafanaServerImpl) writePIDFile() {
if *pidFile == "" {
return
}
// Ensure the required directory structure exists.
err := os.MkdirAll(filepath.Dir(*pidFile), 0700)
if err != nil {
g.log.Error("Failed to verify pid directory", "error", err)
os.Exit(1)
}
// Retrieve the PID and write it.
pid := strconv.Itoa(os.Getpid())
if err := ioutil.WriteFile(*pidFile, []byte(pid), 0644); err != nil {
g.log.Error("Failed to write pidfile", "error", err)
os.Exit(1)
}
g.log.Info("Writing PID file", "path", *pidFile, "pid", pid)
}
func sendSystemdNotification(state string) error {
notifySocket := os.Getenv("NOTIFY_SOCKET")
if notifySocket == "" {
return fmt.Errorf("NOTIFY_SOCKET environment variable empty or unset.")
}
socketAddr := &net.UnixAddr{
Name: notifySocket,
Net: "unixgram",
}
conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
if err != nil {
return err
}
_, err = conn.Write([]byte(state))
conn.Close()
return err
}
| [
"\"NOTIFY_SOCKET\""
]
| []
| [
"NOTIFY_SOCKET"
]
| [] | ["NOTIFY_SOCKET"] | go | 1 | 0 | |
lib/logo/baseclass/messages.py | import functools
import logging
import os
from kivy.app import App
from kivy.properties import StringProperty, ObjectProperty, NumericProperty, BooleanProperty, Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.recycleview import RecycleDataAdapter, RecycleView
from kivymd.uix.button import MDIconButton
from kivymd.uix.list import TwoLineAvatarListItem, IRightBodyTouch
from kivymd.uix.menu import MDDropdownMenu
from angelos.common.misc import Loop
from logo.baseclass.common import Section
from logo.baseclass.dialogs import MessageDialog
from logo import strings
from logo.action.message import SynchronizeMailAction, EmptyTrashAction
class IconRightMenu(IRightBodyTouch, MDIconButton):
pass
class LogoRecycleViewListItemMixin:
"""List item mixin for recycle view."""
data = {}
item_id = ObjectProperty(allownone=True)
error = NumericProperty(allownone=True)
tab = StringProperty(allownone=True)
selected = BooleanProperty(defaultvalue=False)
def populate(self):
try:
self._call("_populate_")
except Exception as e:
logging.error(e, exc_info=True)
def _call(self, name, **kwargs):
tab = self.data.get("tab", "main")
method = getattr(self, name + str(tab), None)
if callable(method):
method(**kwargs)
else:
raise RuntimeError("Method {} not found on {}".format(name + str(tab), str(self)))
def err(self, e):
pass
def clear(self):
keys = self.data.keys()
keys += ["text", "secondary_text", "tertiary_text"]
self.data = {}
self.selected = False
self.item_id = None
self.error = 0
self.tab = ""
for key in keys:
if hasattr(self, key):
setattr(self, key, None)
class MessageListItem(LogoRecycleViewListItemMixin, TwoLineAvatarListItem):
"""Specific RV ListItem for the message section."""
source = StringProperty()
target_id = ObjectProperty() # issuer/owner
_app = None
def __init__(self, **kwargs):
super(MessageListItem, self).__init__(**kwargs)
if not MessageListItem._app:
MessageListItem._app = App.get_running_app()
def open_letter(self):
try:
self._call("_letter_")
except Exception as e:
logging.error(e, exc_info=True)
def _populate_inbox(self):
info = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_info_inbox(
self.data.get("item_id")), wait=True)
self.data.setdefault("target_id", info[1]) # Issuer
self.data.setdefault("text", "{:%c}".format(info[3])) # Posted
self.data.setdefault("secondary_text", info[2] if info[2] != "n/a" else str(info[1])) # Sender or Issuer
source = os.path.join(os.environ["LOGO_MESSENGER_ASSETS"], "images/dove.png")
self.data.setdefault("source", source)
def _letter_inbox(self):
mail = Loop.main().run(
self._app.ioc.facade.api.mailbox.open_envelope(
self.item_id), wait=True)
MessageDialog(MessageDialog.MODE_READER_RECEIVE, mail, title=strings.TEXT_MESSAGE_INBOX_TITLE).open()
def _populate_outbox(self):
info = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_info_outbox(
self.data.get("item_id")), wait=True)
self.data.setdefault("target_id", info[1]) # Owner
self.data.setdefault("text", "{:%c}".format(info[3])) # Posted
self.data.setdefault("secondary_text", info[2] if info[2] != "n/a" else str(info[1])) # Sender or Owner
source = os.path.join(os.environ["LOGO_MESSENGER_ASSETS"], "images/dove.png")
self.data.setdefault("source", source)
def _letter_outbox(self):
pass
def _populate_drafts(self):
info = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_info_draft(
self.data.get("item_id")), wait=True)
self.data.setdefault("target_id", info[1]) # Owner
self.data.setdefault("text", info[2] if info[2] else "") # Subject
self.data.setdefault("secondary_text", info[3] if info[3] else "") # Receiver
source = os.path.join(os.environ["LOGO_MESSENGER_ASSETS"], "images/dove.png")
self.data.setdefault("source", source)
def _letter_drafts(self):
mail = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_draft(
self.item_id), wait=True)
MessageDialog(MessageDialog.MODE_WRITER, mail, title=strings.TEXT_DRAFT).open()
def _populate_read(self):
info = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_info_read(
self.data.get("item_id")), wait=True)
self.data.setdefault("target_id", info[1]) # Issuer
self.data.setdefault("text", info[2] if info[2] != "n/a" else str(info[1])) # Subject or Issuer
self.data.setdefault("secondary_text", info[3] + " - " + "{:%c}".format(info[4])) # Sender and Posted
source = os.path.join(os.environ["LOGO_MESSENGER_ASSETS"], "images/dove.png")
self.data.setdefault("source", source)
def _letter_read(self):
mail = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_read(
self.item_id), wait=True)
MessageDialog(MessageDialog.MODE_READER_RECEIVE, mail, title=strings.TEXT_MESSAGE).open()
def _populate_trash(self):
info = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_info_trash(
self.data.get("item_id")), wait=True)
self.data.setdefault("target_id", info[1]) # Issuer
self.data.setdefault("text", info[2] if info[2] != "n/a" else str(info[1])) # Subject or Issuer
self.data.setdefault("secondary_text", info[3] + " - " + "{:%c}".format(info[4])) # Sender and Posted
source = os.path.join(os.environ["LOGO_MESSENGER_ASSETS"], "images/dove.png")
self.data.setdefault("source", source)
def _letter_trash(self):
mail = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_trash(
self.item_id), wait=True)
MessageDialog(MessageDialog.MODE_READER_RECEIVE, mail, title=strings.TEXT_MESSAGE_TRASH_TITLE).open()
class LogoRecycleDataAdapter(RecycleDataAdapter):
"""Custom recycle view DataAdapter.
This adapter will load extra data from the vault at scrolling."""
def __init__(self, **kwargs):
super(LogoRecycleDataAdapter, self).__init__(**kwargs)
self.app = App.get_running_app()
def refresh_view_attrs(self, index, data_item, view):
"""Wrapper for the view refresher that loads extra envelope data ad-hoc."""
if "error" not in data_item:
try:
view.data = data_item
view.populate()
except Exception as e:
logging.error(e, exc_info=True)
data_item.setdefault("error", 1)
view.err(e)
view.data = data_item
super(LogoRecycleDataAdapter, self).refresh_view_attrs(index, data_item, view)
def make_view_dirty(self, view, index):
"""Clean up some custom data from the list item"""
view.clear()
super(LogoRecycleDataAdapter, self).make_view_dirty(index, view)
class LogoRecycleView(RecycleView):
"""Custom recycle view that will set the right DataAdapter."""
def __init__(self, **kwargs):
kwargs.setdefault("view_adapter", LogoRecycleDataAdapter())
super(LogoRecycleView, self).__init__(**kwargs)
class MessageSearch(BoxLayout):
"""Message search box logic."""
pass
class MessageMenu(MDDropdownMenu):
"""Mass selection/deselection and other operations menu."""
def __init__(self, **kwargs):
super(MessageMenu, self).__init__(**kwargs)
menu_tab = {
"inbox": ["sync"],
"outbox": ["sync"],
"drafts": [],
"read": [],
"trash": ["empty"]
}
class Messages(Section):
"""The messages sub screen."""
def __init__(self, **kwargs):
Section.__init__(self, **kwargs)
self.menus = dict()
self._app = App.get_running_app()
def on_pre_enter(self, *args):
"""Prepare menus."""
self.ids.panel.on_resize()
def commit(action, tab_name, dt):
"""Selected menu command callback."""
content = self.ids.get(tab_name).ids.content
action(content=content).start()
if not self.menus:
caller = self.ids.toolbar.ids["right_actions"].children[0]
for tab in menu_tab.keys():
menu_items = list()
for item in menu_tab[tab]:
menu_items.append({
"viewclass": "MDMenuItem",
"icon": menu_context[item][0],
"text": menu_context[item][1],
"callback": functools.partial(commit, menu_context[item][2], tab)
})
self.menus[tab] = MessageMenu(caller=caller, items=menu_items, width_mult=4)
def open_menu(self, widget):
"""Open menu for right tab."""
self.menus[self.ids.panel.ids.tab_manager.current].open()
def list_inbox(self, page):
"""load all favorite contacts."""
self.__load_rv(
self._app.ioc.facade.api.mailbox.load_inbox(),
page.children[0].ids.content,
tab=page.name
)
def list_outbox(self, page):
"""Load all friend contacts."""
self.__load_rv(
self._app.ioc.facade.api.mailbox.load_outbox(),
page.children[0].ids.content,
tab=page.name
)
def list_drafts(self, page):
"""Load all known contacts."""
self.__load_rv(
self._app.ioc.facade.api.mailbox.load_drafts(),
page.children[0].ids.content,
tab=page.name
)
def list_read(self, page):
"""Load all known contacts from a church network."""
self.__load_rv(
self._app.ioc.facade.api.mailbox.load_read(),
page.children[0].ids.content,
tab=page.name
)
def list_trash(self, page):
"""Load all blocked contacts."""
self.__load_rv(
self._app.ioc.facade.api.mailbox.load_trash(),
page.children[0].ids.content,
tab=page.name
)
@staticmethod
def __load_rv(coro, content, tab=None, selectable=False):
"""Update the ScrollView with new and changed data."""
data = content.data
current = {e["item_id"] for e in data}
loaded = Loop.main().run(coro, wait=True)
# Remove from current that is not in loaded
remove = (current - loaded)
index = 0
while index < len(data):
if data[index]["item_id"] in remove:
del data[index]
else:
index += 1
# Add unique from loaded to current
new = (loaded - current)
for item in loaded:
if item in new:
model = {"item_id": item}
if tab:
model["tab"] = tab
if selectable:
model["selected"] = False
data.append(model)
menu_context = {
"sync": ("sync", strings.TEXT_SYNCHRONIZE, SynchronizeMailAction),
"empty": ("trash-can-outline", strings.TEXT_EMPTY, EmptyTrashAction),
} | []
| []
| [
"LOGO_MESSENGER_ASSETS"
]
| [] | ["LOGO_MESSENGER_ASSETS"] | python | 1 | 0 | |
loader/loader_test.go | package loader
import (
"fmt"
"io/ioutil"
"os"
"sort"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/will0523/compose-file/types"
)
func buildConfigDetails(source types.Dict) types.ConfigDetails {
workingDir, err := os.Getwd()
if err != nil {
panic(err)
}
return types.ConfigDetails{
WorkingDir: workingDir,
ConfigFiles: []types.ConfigFile{
{Filename: "filename.yml", Config: source},
},
Environment: nil,
}
}
var sampleYAML = `
version: "3"
services:
foo:
image: busybox
networks:
with_me:
bar:
image: busybox
environment:
- FOO=1
networks:
- with_ipam
volumes:
hello:
driver: default
driver_opts:
beep: boop
networks:
default:
driver: bridge
driver_opts:
beep: boop
with_ipam:
ipam:
driver: default
config:
- subnet: 172.28.0.0/16
`
var sampleDict = types.Dict{
"version": "3",
"services": types.Dict{
"foo": types.Dict{
"image": "busybox",
"networks": types.Dict{"with_me": nil},
},
"bar": types.Dict{
"image": "busybox",
"environment": []interface{}{"FOO=1"},
"networks": []interface{}{"with_ipam"},
},
},
"volumes": types.Dict{
"hello": types.Dict{
"driver": "default",
"driver_opts": types.Dict{
"beep": "boop",
},
},
},
"networks": types.Dict{
"default": types.Dict{
"driver": "bridge",
"driver_opts": types.Dict{
"beep": "boop",
},
},
"with_ipam": types.Dict{
"ipam": types.Dict{
"driver": "default",
"config": []interface{}{
types.Dict{
"subnet": "172.28.0.0/16",
},
},
},
},
},
}
var sampleConfig = types.Config{
Services: []types.ServiceConfig{
{
Name: "foo",
Image: "busybox",
Environment: map[string]string{},
Networks: map[string]*types.ServiceNetworkConfig{
"with_me": nil,
},
},
{
Name: "bar",
Image: "busybox",
Environment: map[string]string{"FOO": "1"},
Networks: map[string]*types.ServiceNetworkConfig{
"with_ipam": nil,
},
},
},
Networks: map[string]types.NetworkConfig{
"default": {
Driver: "bridge",
DriverOpts: map[string]string{
"beep": "boop",
},
},
"with_ipam": {
Ipam: types.IPAMConfig{
Driver: "default",
Config: []*types.IPAMPool{
{
Subnet: "172.28.0.0/16",
},
},
},
},
},
Volumes: map[string]types.VolumeConfig{
"hello": {
Driver: "default",
DriverOpts: map[string]string{
"beep": "boop",
},
},
},
}
func TestParseYAML(t *testing.T) {
dict, err := ParseYAML([]byte(sampleYAML))
if !assert.NoError(t, err) {
return
}
assert.Equal(t, sampleDict, dict)
}
func TestLoad(t *testing.T) {
actual, err := Load(buildConfigDetails(sampleDict))
if !assert.NoError(t, err) {
return
}
assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services))
assert.Equal(t, sampleConfig.Networks, actual.Networks)
assert.Equal(t, sampleConfig.Volumes, actual.Volumes)
}
func TestParseAndLoad(t *testing.T) {
actual, err := loadYAML(sampleYAML)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services))
assert.Equal(t, sampleConfig.Networks, actual.Networks)
assert.Equal(t, sampleConfig.Volumes, actual.Volumes)
}
func TestInvalidTopLevelObjectType(t *testing.T) {
_, err := loadYAML("1")
assert.Error(t, err)
assert.Contains(t, err.Error(), "Top-level object must be a mapping")
_, err = loadYAML("\"hello\"")
assert.Error(t, err)
assert.Contains(t, err.Error(), "Top-level object must be a mapping")
_, err = loadYAML("[\"hello\"]")
assert.Error(t, err)
assert.Contains(t, err.Error(), "Top-level object must be a mapping")
}
func TestNonStringKeys(t *testing.T) {
_, err := loadYAML(`
version: "3"
123:
foo:
image: busybox
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Non-string key at top level: 123")
_, err = loadYAML(`
version: "3"
services:
foo:
image: busybox
123:
image: busybox
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Non-string key in services: 123")
_, err = loadYAML(`
version: "3"
services:
foo:
image: busybox
networks:
default:
ipam:
config:
- 123: oh dear
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Non-string key in networks.default.ipam.config[0]: 123")
_, err = loadYAML(`
version: "3"
services:
dict-env:
image: busybox
environment:
1: FOO
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Non-string key in services.dict-env.environment: 1")
}
func TestSupportedVersion(t *testing.T) {
_, err := loadYAML(`
version: "3"
services:
foo:
image: busybox
`)
assert.NoError(t, err)
_, err = loadYAML(`
version: "3.0"
services:
foo:
image: busybox
`)
assert.NoError(t, err)
}
func TestUnsupportedVersion(t *testing.T) {
_, err := loadYAML(`
version: "2"
services:
foo:
image: busybox
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "version")
_, err = loadYAML(`
version: "2.0"
services:
foo:
image: busybox
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "version")
}
func TestInvalidVersion(t *testing.T) {
_, err := loadYAML(`
version: 3
services:
foo:
image: busybox
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "version must be a string")
}
func TestV1Unsupported(t *testing.T) {
_, err := loadYAML(`
foo:
image: busybox
`)
assert.Error(t, err)
}
func TestNonMappingObject(t *testing.T) {
_, err := loadYAML(`
version: "3"
services:
- foo:
image: busybox
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "services must be a mapping")
_, err = loadYAML(`
version: "3"
services:
foo: busybox
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "services.foo must be a mapping")
_, err = loadYAML(`
version: "3"
networks:
- default:
driver: bridge
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "networks must be a mapping")
_, err = loadYAML(`
version: "3"
networks:
default: bridge
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "networks.default must be a mapping")
_, err = loadYAML(`
version: "3"
volumes:
- data:
driver: local
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "volumes must be a mapping")
_, err = loadYAML(`
version: "3"
volumes:
data: local
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "volumes.data must be a mapping")
}
func TestNonStringImage(t *testing.T) {
_, err := loadYAML(`
version: "3"
services:
foo:
image: ["busybox", "latest"]
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "services.foo.image must be a string")
}
func TestValidEnvironment(t *testing.T) {
config, err := loadYAML(`
version: "3"
services:
dict-env:
image: busybox
environment:
FOO: "1"
BAR: 2
BAZ: 2.5
QUUX:
list-env:
image: busybox
environment:
- FOO=1
- BAR=2
- BAZ=2.5
- QUUX=
`)
assert.NoError(t, err)
expected := map[string]string{
"FOO": "1",
"BAR": "2",
"BAZ": "2.5",
"QUUX": "",
}
assert.Equal(t, 2, len(config.Services))
for _, service := range config.Services {
assert.Equal(t, expected, service.Environment)
}
}
func TestInvalidEnvironmentValue(t *testing.T) {
_, err := loadYAML(`
version: "3"
services:
dict-env:
image: busybox
environment:
FOO: ["1"]
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "services.dict-env.environment.FOO must be a string, number or null")
}
func TestInvalidEnvironmentObject(t *testing.T) {
_, err := loadYAML(`
version: "3"
services:
dict-env:
image: busybox
environment: "FOO=1"
`)
assert.Error(t, err)
assert.Contains(t, err.Error(), "services.dict-env.environment must be a mapping")
}
func TestEnvironmentInterpolation(t *testing.T) {
config, err := loadYAML(`
version: "3"
services:
test:
image: busybox
labels:
- home1=$HOME
- home2=${HOME}
- nonexistent=$NONEXISTENT
- default=${NONEXISTENT-default}
networks:
test:
driver: $HOME
volumes:
test:
driver: $HOME
`)
assert.NoError(t, err)
home := os.Getenv("HOME")
expectedLabels := map[string]string{
"home1": home,
"home2": home,
"nonexistent": "",
"default": "default",
}
assert.Equal(t, expectedLabels, config.Services[0].Labels)
assert.Equal(t, home, config.Networks["test"].Driver)
assert.Equal(t, home, config.Volumes["test"].Driver)
}
func TestUnsupportedProperties(t *testing.T) {
dict, err := ParseYAML([]byte(`
version: "3"
services:
web:
image: web
build: ./web
links:
- bar
db:
image: db
build: ./db
`))
assert.NoError(t, err)
configDetails := buildConfigDetails(dict)
_, err = Load(configDetails)
assert.NoError(t, err)
unsupported := GetUnsupportedProperties(configDetails)
assert.Equal(t, []string{"build", "links"}, unsupported)
}
func TestDeprecatedProperties(t *testing.T) {
dict, err := ParseYAML([]byte(`
version: "3"
services:
web:
image: web
container_name: web
db:
image: db
container_name: db
expose: ["5434"]
`))
assert.NoError(t, err)
configDetails := buildConfigDetails(dict)
_, err = Load(configDetails)
assert.NoError(t, err)
deprecated := GetDeprecatedProperties(configDetails)
assert.Equal(t, 2, len(deprecated))
assert.Contains(t, deprecated, "container_name")
assert.Contains(t, deprecated, "expose")
}
func TestForbiddenProperties(t *testing.T) {
_, err := loadYAML(`
version: "3"
services:
foo:
image: busybox
volumes:
- /data
volume_driver: some-driver
bar:
extends:
service: foo
`)
assert.Error(t, err)
assert.IsType(t, &ForbiddenPropertiesError{}, err)
fmt.Println(err)
forbidden := err.(*ForbiddenPropertiesError).Properties
assert.Equal(t, 2, len(forbidden))
assert.Contains(t, forbidden, "volume_driver")
assert.Contains(t, forbidden, "extends")
}
func durationPtr(value time.Duration) *time.Duration {
return &value
}
func int64Ptr(value int64) *int64 {
return &value
}
func uint64Ptr(value uint64) *uint64 {
return &value
}
func TestFullExample(t *testing.T) {
bytes, err := ioutil.ReadFile("full-example.yml")
assert.NoError(t, err)
config, err := loadYAML(string(bytes))
if !assert.NoError(t, err) {
return
}
workingDir, err := os.Getwd()
assert.NoError(t, err)
homeDir := os.Getenv("HOME")
stopGracePeriod := time.Duration(20 * time.Second)
expectedServiceConfig := types.ServiceConfig{
Name: "foo",
CapAdd: []string{"ALL"},
CapDrop: []string{"NET_ADMIN", "SYS_ADMIN"},
CgroupParent: "m-executor-abcd",
Command: []string{"bundle", "exec", "thin", "-p", "3000"},
ContainerName: "my-web-container",
DependsOn: []string{"db", "redis"},
Deploy: types.DeployConfig{
Mode: "replicated",
Replicas: uint64Ptr(6),
Labels: map[string]string{"FOO": "BAR"},
UpdateConfig: &types.UpdateConfig{
Parallelism: uint64Ptr(3),
Delay: time.Duration(10 * time.Second),
FailureAction: "continue",
Monitor: time.Duration(60 * time.Second),
MaxFailureRatio: 0.3,
},
Resources: types.Resources{
Limits: &types.Resource{
NanoCPUs: "0.001",
MemoryBytes: 50 * 1024 * 1024,
},
Reservations: &types.Resource{
NanoCPUs: "0.0001",
MemoryBytes: 20 * 1024 * 1024,
},
},
RestartPolicy: &types.RestartPolicy{
Condition: "on_failure",
Delay: durationPtr(5 * time.Second),
MaxAttempts: uint64Ptr(3),
Window: durationPtr(2 * time.Minute),
},
Placement: types.Placement{
Constraints: []string{"node=foo"},
},
},
Devices: []string{"/dev/ttyUSB0:/dev/ttyUSB0"},
Dns: []string{"8.8.8.8", "9.9.9.9"},
DnsSearch: []string{"dc1.example.com", "dc2.example.com"},
DomainName: "foo.com",
Entrypoint: []string{"/code/entrypoint.sh", "-p", "3000"},
Environment: map[string]string{
"RACK_ENV": "development",
"SHOW": "true",
"SESSION_SECRET": "",
"FOO": "1",
"BAR": "2",
"BAZ": "3",
},
Expose: []string{"3000", "8000"},
ExternalLinks: []string{
"redis_1",
"project_db_1:mysql",
"project_db_1:postgresql",
},
ExtraHosts: map[string]string{
"otherhost": "50.31.209.229",
"somehost": "162.242.195.82",
},
HealthCheck: &types.HealthCheckConfig{
Test: []string{
"CMD-SHELL",
"echo \"hello world\"",
},
Interval: "10s",
Timeout: "1s",
Retries: uint64Ptr(5),
},
Hostname: "foo",
Image: "redis",
Ipc: "host",
Labels: map[string]string{
"com.example.description": "Accounting webapp",
"com.example.number": "42",
"com.example.empty-label": "",
},
Links: []string{
"db",
"db:database",
"redis",
},
Logging: &types.LoggingConfig{
Driver: "syslog",
Options: map[string]string{
"syslog-address": "tcp://192.168.0.42:123",
},
},
MacAddress: "02:42:ac:11:65:43",
NetworkMode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b",
Networks: map[string]*types.ServiceNetworkConfig{
"some-network": {
Aliases: []string{"alias1", "alias3"},
Ipv4Address: "",
Ipv6Address: "",
},
"other-network": {
Ipv4Address: "172.16.238.10",
Ipv6Address: "2001:3984:3989::10",
},
"other-other-network": nil,
},
Pid: "host",
Ports: []string{
"3000",
"3000-3005",
"8000:8000",
"9090-9091:8080-8081",
"49100:22",
"127.0.0.1:8001:8001",
"127.0.0.1:5000-5010:5000-5010",
},
Privileged: true,
ReadOnly: true,
Restart: "always",
SecurityOpt: []string{
"label=level:s0:c100,c200",
"label=type:svirt_apache_t",
},
StdinOpen: true,
StopSignal: "SIGUSR1",
StopGracePeriod: &stopGracePeriod,
Tmpfs: []string{"/run", "/tmp"},
Tty: true,
Ulimits: map[string]*types.UlimitsConfig{
"nproc": {
Single: 65535,
},
"nofile": {
Soft: 20000,
Hard: 40000,
},
},
User: "someone",
Volumes: []string{
"/var/lib/mysql",
"/opt/data:/var/lib/mysql",
fmt.Sprintf("%s:/code", workingDir),
fmt.Sprintf("%s/static:/var/www/html", workingDir),
fmt.Sprintf("%s/configs:/etc/configs/:ro", homeDir),
"datavolume:/var/lib/mysql",
},
WorkingDir: "/code",
}
assert.Equal(t, []types.ServiceConfig{expectedServiceConfig}, config.Services)
expectedNetworkConfig := map[string]types.NetworkConfig{
"some-network": {},
"other-network": {
Driver: "overlay",
DriverOpts: map[string]string{
"foo": "bar",
"baz": "1",
},
Ipam: types.IPAMConfig{
Driver: "overlay",
Config: []*types.IPAMPool{
{Subnet: "172.16.238.0/24"},
{Subnet: "2001:3984:3989::/64"},
},
},
},
"external-network": {
External: types.External{
Name: "external-network",
External: true,
},
},
"other-external-network": {
External: types.External{
Name: "my-cool-network",
External: true,
},
},
}
assert.Equal(t, expectedNetworkConfig, config.Networks)
expectedVolumeConfig := map[string]types.VolumeConfig{
"some-volume": {},
"other-volume": {
Driver: "flocker",
DriverOpts: map[string]string{
"foo": "bar",
"baz": "1",
},
},
"external-volume": {
External: types.External{
Name: "external-volume",
External: true,
},
},
"other-external-volume": {
External: types.External{
Name: "my-cool-volume",
External: true,
},
},
}
assert.Equal(t, expectedVolumeConfig, config.Volumes)
}
func loadYAML(yaml string) (*types.Config, error) {
dict, err := ParseYAML([]byte(yaml))
if err != nil {
return nil, err
}
return Load(buildConfigDetails(dict))
}
func serviceSort(services []types.ServiceConfig) []types.ServiceConfig {
sort.Sort(servicesByName(services))
return services
}
type servicesByName []types.ServiceConfig
func (sbn servicesByName) Len() int { return len(sbn) }
func (sbn servicesByName) Swap(i, j int) { sbn[i], sbn[j] = sbn[j], sbn[i] }
func (sbn servicesByName) Less(i, j int) bool { return sbn[i].Name < sbn[j].Name }
| [
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
src/sentry/runner/commands/init.py | from __future__ import absolute_import, print_function
import os
import click
@click.command()
@click.option(
"--dev", default=False, is_flag=True, help="Use settings more conducive to local development."
)
@click.argument("directory", required=False)
@click.pass_context
def init(ctx, dev, directory):
"Initialize new configuration directory."
from sentry.runner.settings import discover_configs, generate_settings
if directory is not None:
os.environ["SENTRY_CONF"] = directory
directory, py, yaml = discover_configs()
# In this case, the config is pointing directly to a file, so we
# must maintain old behavior, and just abort
if yaml is None and os.path.isfile(py):
# TODO: Link to docs explaining about new behavior of SENTRY_CONF?
raise click.ClickException(
"Found legacy '%s' file, so aborting." % click.format_filename(py)
)
if yaml is None:
raise click.ClickException("DIRECTORY must not be a file.")
if directory and not os.path.exists(directory):
os.makedirs(directory)
py_contents, yaml_contents = generate_settings(dev)
if os.path.isfile(yaml):
click.confirm(
"File already exists at '%s', overwrite?" % click.format_filename(yaml), abort=True
)
with click.open_file(yaml, "w") as fp:
fp.write(yaml_contents)
if os.path.isfile(py):
click.confirm(
"File already exists at '%s', overwrite?" % click.format_filename(py), abort=True
)
with click.open_file(py, "w") as fp:
fp.write(py_contents)
| []
| []
| [
"SENTRY_CONF"
]
| [] | ["SENTRY_CONF"] | python | 1 | 0 | |
tests/e2e/gc_block_resize_retain_policy.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"os"
"strings"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
cnstypes "github.com/vmware/govmomi/cns/types"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
fnodes "k8s.io/kubernetes/test/e2e/framework/node"
fpod "k8s.io/kubernetes/test/e2e/framework/pod"
fpv "k8s.io/kubernetes/test/e2e/framework/pv"
k8s "sigs.k8s.io/vsphere-csi-driver/pkg/kubernetes"
)
var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation policy retain", func() {
f := framework.NewDefaultFramework("gc-resize-reclaim-policy-retain")
var (
client clientset.Interface
clientNewGc clientset.Interface
namespace string
namespaceNewGC string
storagePolicyName string
storageclass *storagev1.StorageClass
pvclaim *v1.PersistentVolumeClaim
err error
volHandle string
svcPVCName string
pv *v1.PersistentVolume
pvcDeleted bool
pvcDeletedInSvc bool
pvDeleted bool
cmd []string
cmd2 []string
)
ginkgo.BeforeEach(func() {
client = f.ClientSet
namespace = f.Namespace.Name
bootstrap()
ginkgo.By("Getting ready nodes on GC 1")
nodeList, err := fnodes.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err, "Unable to find ready and schedulable Node")
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores)
scParameters := make(map[string]string)
scParameters[scParamFsType] = ext4FSType
//Set resource quota
ginkgo.By("Set Resource quota for GC")
svcClient, svNamespace := getSvcClientAndNamespace()
setResourceQuota(svcClient, svNamespace, rqLimit)
// Create Storage class and PVC
ginkgo.By("Creating Storage Class and PVC with allowVolumeExpansion = true")
scParameters[svStorageClassName] = storagePolicyName
storageclass, err = createStorageClass(client, scParameters, nil, v1.PersistentVolumeReclaimRetain, "", true, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvclaim, err = createPVC(client, namespace, nil, "", storageclass, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Waiting for PVC to be bound
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Waiting for all claims to be in bound state")
persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pv = persistentvolumes[0]
volHandle = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle)
gomega.Expect(volHandle).NotTo(gomega.BeEmpty())
svcPVCName = pv.Spec.CSI.VolumeHandle
pvcDeleted = false
pvcDeletedInSvc = false
pvDeleted = false
// replace second element with pod.Name
cmd = []string{"exec", "", fmt.Sprintf("--namespace=%v", namespace), "--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"}
})
ginkgo.AfterEach(func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if !pvcDeleted {
err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvclaim.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
if !pvDeleted {
err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
if !pvcDeletedInSvc {
svcClient, svcNamespace := getSvcClientAndNamespace()
err := svcClient.CoreV1().PersistentVolumeClaims(svcNamespace).Delete(ctx, svcPVCName, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
err = e2eVSphere.waitForCNSVolumeToBeDeleted(pv.Spec.CSI.VolumeHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is deleted in Supervisor Cluster")
volumeExists := verifyVolumeExistInSupervisorCluster(svcPVCName)
gomega.Expect(volumeExists).To(gomega.BeFalse())
err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
svcClient, svNamespace := getSvcClientAndNamespace()
setResourceQuota(svcClient, svNamespace, defaultrqLimit)
})
/*
Combined:
PV with reclaim policy retain can be resized using new GC PVC
PV with reclaim policy can be resized using new GC PVC with pod
Steps:
1. Create a SC with allowVolumeExpansion set to 'true' and with reclaim policy set to 'Retain'
2. create a GC PVC using the SC created in step 1 and wait for binding with PV
3. Create a pod in GC to use PVC created in step 2 and file system init
4. delete GC pod created in step 3
5. Delete GC PVC created in step 2
6. verify GC PVC is removed but SVC PVC, PV and GC PV still exists
7. remove claimRef from the PV lingering in GC to get it to Available state
8. Create new PVC in GC using the PV lingering in GC using the same SC from step 1
9. verify same SVC PVC is reused
10. Resize PVC in GC
11. Wait for PVC in GC to reach "FilesystemResizePending" state
12. Check using CNS query that size has got updated to what was used in step 8
13. Verify size of PV in SVC and GC to same as the one used in the step 8
14. Create a pod in GC to use PVC create in step 6
15. wait for FS resize
16. Verify size of PVC SVC and GC are equal and bigger than what it was after step 3
17. delete pod created in step 12
18. delete PVC created in step 6
19. delete PV leftover in GC
20. delete SC created in step 1
*/
ginkgo.It("PV with reclaim policy can be reused and resized with pod", func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create a POD to use this PVC, and verify volume has been attached
ginkgo.By("Creating pod to attach PV to the node")
pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
vmUUID, err := getVMUUIDFromNodeName(pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node")
ginkgo.By("Verify the volume is accessible and filesystem type is as expected")
cmd[1] = pod.Name
lastOutput := framework.RunKubectlOrDie(namespace, cmd...)
gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse())
ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion")
originalFsSize, err := getFSSizeMb(f, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Delete POD
ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace))
err = fpod.DeletePodWithWait(client, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is detached from the node")
isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
ginkgo.By("Delete PVC in GC")
err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvclaim.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvcDeleted = true
ginkgo.By("Check GC PV exists and is released")
pv, err = waitForPvToBeReleased(ctx, client, pv.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
oldPvUID := string(pv.UID)
fmt.Println("PV uuid", oldPvUID)
ginkgo.By("Check SVC PVC exists")
_ = getPVCFromSupervisorCluster(svcPVCName)
ginkgo.By("Remove claimRef from GC PVC")
pv.Spec.ClaimRef = nil
pv, err = client.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Creating the PVC in guest cluster")
pvclaim = getPersistentVolumeClaimSpec(namespace, nil, pv.Name)
pvclaim.Spec.StorageClassName = &storageclass.Name
pvclaim, err = client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvclaim, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvclaim.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
ginkgo.By("Wait for the PVC in guest cluster to bind the lingering pv")
framework.ExpectNoError(fpv.WaitOnPVandPVC(client, framework.NewTimeoutContextWithDefaults(), namespace, pv, pvclaim))
// Modify PVC spec to trigger volume expansion
// We expand the PVC while no pod is using it to ensure offline expansion
ginkgo.By("Expanding current pvc")
currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage]
newSize := currentPvcSize.DeepCopy()
newSize.Add(resource.MustParse("1Gi"))
framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
pvclaim, err = expandPVCSize(pvclaim, newSize, client)
framework.ExpectNoError(err, "While updating pvc for more size")
gomega.Expect(pvclaim).NotTo(gomega.BeNil())
pvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvclaim.Name)
}
ginkgo.By("Checking for PVC request size change on SVC PVC")
b, err := verifyPvcRequestedSizeUpdateInSupervisorWithWait(svcPVCName, newSize)
gomega.Expect(b).To(gomega.BeTrue())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Waiting for controller volume resize to finish")
err = waitForPvResizeForGivenPvc(pvclaim, client, totalResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for pvc resize to finish")
ginkgo.By("Checking for resize on SVC PV")
verifyPVSizeinSupervisor(svcPVCName, newSize)
ginkgo.By("Checking for conditions on pvc")
pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Checking for 'FileSystemResizePending' status condition on SVC PVC")
_, err = checkSvcPvcHasGivenStatusCondition(svcPVCName, true, v1.PersistentVolumeClaimFileSystemResizePending)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle))
queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
if len(queryResult.Volumes) == 0 {
err = fmt.Errorf("QueryCNSVolumeWithResult returned no volume")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verifying disk size requested in volume expansion is honored")
newSizeInMb := convertGiStrToMibInt64(newSize)
if queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb != newSizeInMb {
err = fmt.Errorf("got wrong disk size after volume expansion")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Create a new POD to use this PVC, and verify volume has been attached
ginkgo.By("Creating a new pod to attach PV again to the node")
pod, err = createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Verify volume after expansion: %s is attached to the node: %s", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node")
ginkgo.By("Verify after expansion the filesystem type is as expected")
cmd[1] = pod.Name
lastOutput = framework.RunKubectlOrDie(namespace, cmd...)
gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse())
ginkgo.By("Waiting for file system resize to finish")
pvclaim, err = waitForFSResize(pvclaim, client)
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvclaim.Status.Conditions
expectEqual(len(pvcConditions), 0, "pvc should not have conditions")
ginkgo.By("Verify filesystem size for mount point /mnt/volume1 after expansion")
fsSize, err := getFSSizeMb(f, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Filesystem size may be smaller than the size of the block volume.
// Here since filesystem was already formatted on the original volume,
// we can compare the new filesystem size with the original filesystem size.
if fsSize <= originalFsSize {
framework.Failf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize)
}
ginkgo.By("File system resize finished successfully in GC")
ginkgo.By("Checking for PVC resize completion on SVC PVC")
gomega.Expect(verifyResizeCompletedInSupervisor(svcPVCName)).To(gomega.BeTrue())
// Delete POD
ginkgo.By(fmt.Sprintf("Deleting the new pod %s in namespace %s after expansion", pod.Name, namespace))
err = fpod.DeletePodWithWait(client, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is detached from the node after expansion")
isDiskDetached, err = e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
})
/*
PV with reclaim policy retain can be resized when used in a fresh GC
Steps:
1. Create a SC with allowVolumeExpansion set to 'true' in GC1
2. create a GC1 PVC using the SC created in step 1 and wait for binding with PV with reclaim policy set to 'Retain'
3. Delete GC1 PVC
4. verify GC1 PVC is removed but SVC PV, PVC and GC1 PV still exist
5. delete GC1 PV. SVC PV, PVC still exist
6. Create a new GC GC2
7. create SC in GC1 similar to the SC created in step 1 but with reclaim policy set to delete
8. Create new PV in GC2 using the SVC PVC from step 5 and SC created in step 7
9. create new PVC in GC2 using PV created in step 8
10. verify a new PVC API object is created
11. Resize PVC from step 9 in GC2
12. Wait for PVC in GC2 and SVC to reach "FilesystemResizePending" state
13. Check using CNS query that size has got updated to what was used in step 11
14. Verify size of PVs in SVC and GC to same as the one used in the step 11
15. delete PVC created in step 9
16. delete SC created in step 1 and step 7
17. delete GC2
Steps 6 and 17 need to run manually before and after this suite
*/
ginkgo.It("PV with reclaim policy retain can be resized when used in a fresh GC", func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
newGcKubconfigPath := os.Getenv("NEW_GUEST_CLUSTER_KUBE_CONFIG")
if newGcKubconfigPath == "" {
ginkgo.Skip("Env NEW_GUEST_CLUSTER_KUBE_CONFIG is missing")
}
clientNewGc, err = k8s.CreateKubernetesClientFromConfig(newGcKubconfigPath)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Error creating k8s client with %v: %v", newGcKubconfigPath, err))
ginkgo.By("Creating namespace on second GC")
ns, err := framework.CreateTestingNS(f.BaseName, clientNewGc, map[string]string{
"e2e-framework": f.BaseName,
})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Error creating namespace on second GC")
namespaceNewGC = ns.Name
framework.Logf("Created namespace on second GC %v", namespaceNewGC)
defer func() {
err := clientNewGc.CoreV1().Namespaces().Delete(ctx, namespaceNewGC, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
ginkgo.By("Getting ready nodes on GC 2")
nodeList, err := fnodes.GetReadySchedulableNodes(clientNewGc)
framework.ExpectNoError(err, "Unable to find ready and schedulable Node")
gomega.Expect(len(nodeList.Items)).NotTo(gomega.BeZero(), "Unable to find ready and schedulable Node")
ginkgo.By("Delete PVC and PV form orignal GC")
err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvclaim.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvcDeleted = true
err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvDeleted = true
ginkgo.By("Check SVC PVC still exists")
_ = getPVCFromSupervisorCluster(svcPVCName)
scParameters := make(map[string]string)
scParameters[scParamFsType] = ext4FSType
scParameters[svStorageClassName] = storagePolicyName
storageclassNewGC, err := createStorageClass(clientNewGc, scParameters, nil, v1.PersistentVolumeReclaimDelete, "", true, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvc, err := createPVC(clientNewGc, namespaceNewGC, nil, "", storageclassNewGC, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
var pvcs []*v1.PersistentVolumeClaim
pvcs = append(pvcs, pvc)
ginkgo.By("Waiting for all claims to be in bound state")
pvs, err := fpv.WaitForPVClaimBoundPhase(clientNewGc, pvcs, framework.ClaimProvisionTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvtemp := pvs[0]
defer func() {
err = clientNewGc.CoreV1().PersistentVolumeClaims(namespaceNewGC).Delete(ctx, pvc.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = clientNewGc.StorageV1().StorageClasses().Delete(ctx, storageclassNewGC.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = e2eVSphere.waitForCNSVolumeToBeDeleted(pvtemp.Spec.CSI.VolumeHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is deleted in Supervisor Cluster")
volumeExists := verifyVolumeExistInSupervisorCluster(pvtemp.Spec.CSI.VolumeHandle)
gomega.Expect(volumeExists).To(gomega.BeFalse())
}()
volumeID := getVolumeIDFromSupervisorCluster(svcPVCName)
gomega.Expect(volumeID).NotTo(gomega.BeEmpty())
ginkgo.By("Creating the PV")
pvNew := getPersistentVolumeSpec(svcPVCName, v1.PersistentVolumeReclaimDelete, nil)
pvNew.Annotations = pvtemp.Annotations
pvNew.Spec.StorageClassName = pvtemp.Spec.StorageClassName
pvNew.Spec.CSI = pvtemp.Spec.CSI
pvNew.Spec.CSI.VolumeHandle = svcPVCName
pvNew, err = clientNewGc.CoreV1().PersistentVolumes().Create(ctx, pvNew, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Creating the PVC")
pvcNew := getPersistentVolumeClaimSpec(namespaceNewGC, nil, pvNew.Name)
pvcNew.Spec.StorageClassName = &pvtemp.Spec.StorageClassName
pvcNew, err = clientNewGc.CoreV1().PersistentVolumeClaims(namespaceNewGC).Create(ctx, pvcNew, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Wait for PV and PVC to Bind
framework.ExpectNoError(fpv.WaitOnPVandPVC(clientNewGc, framework.NewTimeoutContextWithDefaults(), namespaceNewGC, pvNew, pvcNew))
ginkgo.By("Expanding current pvc")
currentPvcSize := pvcNew.Spec.Resources.Requests[v1.ResourceStorage]
newSize := currentPvcSize.DeepCopy()
newSize.Add(resource.MustParse("1Gi"))
framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
pvcNew, err = expandPVCSize(pvcNew, newSize, clientNewGc)
framework.ExpectNoError(err, "While updating pvc for more size")
gomega.Expect(pvcNew).NotTo(gomega.BeNil())
pvcSize := pvcNew.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvcNew.Name)
}
ginkgo.By("Checking for PVC request size change on SVC PVC")
b, err := verifyPvcRequestedSizeUpdateInSupervisorWithWait(svcPVCName, newSize)
gomega.Expect(b).To(gomega.BeTrue())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Waiting for controller volume resize to finish")
err = waitForPvResizeForGivenPvc(pvcNew, clientNewGc, totalResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for pvc resize to finish")
ginkgo.By("Checking for resize on SVC PV")
verifyPVSizeinSupervisor(svcPVCName, newSize)
ginkgo.By("Checking for conditions on pvc")
pvcNew, err = waitForPVCToReachFileSystemResizePendingCondition(clientNewGc, namespaceNewGC, pvcNew.Name, pollTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Checking for 'FileSystemResizePending' status condition on SVC PVC")
_, err = checkSvcPvcHasGivenStatusCondition(svcPVCName, true, v1.PersistentVolumeClaimFileSystemResizePending)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle))
queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
if len(queryResult.Volumes) == 0 {
err = fmt.Errorf("QueryCNSVolumeWithResult returned no volume")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verifying disk size requested in volume expansion is honored")
newSizeInMb := convertGiStrToMibInt64(newSize)
if queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb != newSizeInMb {
err = fmt.Errorf("got wrong disk size after volume expansion")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Create a new POD to use this PVC, and verify volume has been attached
ginkgo.By("Creating a pod to attach PV again to the node")
pod, err := createPod(clientNewGc, namespaceNewGC, nil, []*v1.PersistentVolumeClaim{pvcNew}, false, execCommand)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Verify volume after expansion: %s is attached to the node: %s", pvNew.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
vmUUID, err := getVMUUIDFromNodeName(pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(clientNewGc, volHandle, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node")
ginkgo.By("Verify after expansion the filesystem type is as expected")
oldKubeConfig := framework.TestContext.KubeConfig
framework.TestContext.KubeConfig = newGcKubconfigPath
defer func() {
framework.TestContext.KubeConfig = oldKubeConfig
}()
cmd2 = []string{"exec", pod.Name, fmt.Sprintf("--namespace=%v", namespaceNewGC), "--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"}
lastOutput := framework.RunKubectlOrDie(namespaceNewGC, cmd2...)
gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse())
ginkgo.By("Waiting for file system resize to finish")
pvcNew, err = waitForFSResize(pvcNew, clientNewGc)
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvclaim.Status.Conditions
expectEqual(len(pvcConditions), 0, "pvc should not have conditions")
ginkgo.By("File system resize finished successfully in GC")
ginkgo.By("Checking for PVC resize completion on SVC PVC")
gomega.Expect(verifyResizeCompletedInSupervisor(svcPVCName)).To(gomega.BeTrue())
// Delete POD
ginkgo.By(fmt.Sprintf("Deleting the new pod %s in namespace %s after expansion", pod.Name, namespaceNewGC))
err = fpod.DeletePodWithWait(clientNewGc, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is detached from the node after expansion")
isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(clientNewGc, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
ginkgo.By("Deleting the PV Claim")
framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(clientNewGc, pvcNew.Name, namespaceNewGC), "Failed to delete PVC ", pvcNew.Name)
pvcNew = nil
ginkgo.By("Verify PV should be deleted automatically")
framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(clientNewGc, pvNew.Name, poll, pollTimeoutShort))
pvNew = nil
ginkgo.By("Verify volume is deleted in Supervisor Cluster")
volumeExists := verifyVolumeExistInSupervisorCluster(svcPVCName)
gomega.Expect(volumeExists).To(gomega.BeFalse())
pvcDeletedInSvc = true
})
})
func waitForPvToBeReleased(ctx context.Context, client clientset.Interface, pvName string) (*v1.PersistentVolume, error) {
var pv *v1.PersistentVolume
var err error
waitErr := wait.PollImmediate(resizePollInterval, pollTimeoutShort, func() (bool, error) {
pv, err = client.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
if pv.Status.Phase == v1.VolumeReleased {
return true, nil
}
return false, nil
})
return pv, waitErr
}
| [
"\"NEW_GUEST_CLUSTER_KUBE_CONFIG\""
]
| []
| [
"NEW_GUEST_CLUSTER_KUBE_CONFIG"
]
| [] | ["NEW_GUEST_CLUSTER_KUBE_CONFIG"] | go | 1 | 0 | |
botCmd.py | import sqlite3
import os
MSG_HELP = """List of commands:
!help
List commands
!listAll
List all animals
!show <animal>
Give description
!getFlag
Give flag (Admin only)
!serverInfo
Give server info (Dragonite only)
!addAdmin <id>
Make user an admin (Dragonite only)
!hint
Give you a hint.
Source_code:
https://github.com/Bankde/Hack-me-bot"""
MSG_NO_DRAGONITE = "You're not Dragonite. Go away !!"
MSG_SEARCH_ERROR = "We cannot find this animal in our database"
MSG_NO_ADMIN = "You are not Admin. Go away !!"
MSG_ANIMAL_CMD = "Please specify animal: e.g. !show dog"
APP_DB = "app.db"
HINT_URL = "https://i.imgur.com/QPKpeJL.jpg"
def init():
serverInfo = os.getenv('SERVER_INFO', None)
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (serverInfo,)
cursor.execute("UPDATE ServerInfo SET info=?", values)
conn.commit()
values = ("TestLogUser", "TestLogMsg", )
cursor.execute("INSERT INTO MsgLog VALUES (?,?)", values)
conn.commit()
conn.close()
# Log userId and their msg here
def _msgLog(user, msg):
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (user, msg,)
# CREATE TABLE MsgLog (user TEXT, msg TEXT);
cursor.execute("INSERT INTO MsgLog VALUES (?,?)", values)
conn.commit()
conn.close()
# Show animal description
def _showAnimal(animal):
try:
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Animals (animal TEXT UNIQUE, description TEXT);
cursor.execute("SELECT description FROM Animals WHERE animal='%s'" % (animal))
all_data = cursor.fetchone()
conn.close()
if all_data == None or len(all_data) == 0:
return MSG_SEARCH_ERROR
else:
return all_data[0]
except:
print("SQL error for arg: %s" % (animal))
return None
# List every animals
def _listAnimal():
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Animals (animal TEXT UNIQUE, description TEXT);
cursor.execute("SELECT animal FROM Animals")
all_data = cursor.fetchall()
conn.close()
return ", ".join([data[0] for data in all_data])
# My own reminder
def _getServerInfo(user):
if user.lower() == "dragonite":
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE ServerInfo (info TEXT);
cursor.execute("SELECT info FROM ServerInfo")
all_data = cursor.fetchone()
conn.close()
return all_data[0]
else:
return MSG_NO_DRAGONITE
# You should ask Dragonite to add you to admin list
def _addAdmin(user, arg):
if user.lower() == "dragonite":
try:
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (arg,)
# CREATE TABLE Admins (user TEXT PRIMARY KEY);
cursor.execute("INSERT INTO Admins VALUES (?)", values)
conn.commit()
conn.close()
return "Successfully add %s into admin" % (arg)
except:
return "You're already an admin"
else:
return MSG_NO_DRAGONITE
# Flag is secret. No one besides admin should see it.
def _getFlag(user):
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Admins (user TEXT PRIMARY KEY);
cursor.execute("SELECT user FROM Admins WHERE user='%s'" % (user))
all_data = cursor.fetchone()
conn.close()
if all_data != None and len(all_data) == 1:
flag = os.getenv('FLAG', None)
return flag
else:
print("Alert: %s is not admin." % (user))
return MSG_NO_ADMIN
def runCmd(message, user):
_msgLog(user, message)
if message.lower() == "help" or message.lower() == "!help":
return MSG_HELP
elif message == "!listAll":
return _listAnimal()
elif message == ("!show"):
return MSG_ANIMAL_CMD
elif message.startswith("!show "):
return _showAnimal(message[6:])
elif message == "!serverInfo":
return _getServerInfo(user)
elif message == "!getFlag":
return _getFlag(user)
elif message[:10] == "!addAdmin ":
arg = message[10:]
return _addAdmin(user, arg)
elif message == "!hint":
return HINT_URL
else:
return ""
| []
| []
| [
"FLAG",
"SERVER_INFO"
]
| [] | ["FLAG", "SERVER_INFO"] | python | 2 | 0 | |
tests/functional/tabloid/test_eqc_136030.py | #coding:utf-8
#
# id: functional.tabloid.eqc_136030
# title: Check ability for preparing and then run query with parameters. Query should use ORDER-BY clause.
# decription:
# 02.02.2019: removed from DB metadata calls to UDFs - they are not used in this test but can not be used in FB 4.0 by default.
# Removed triggers because they have no deal here.
# Checked on:
# 3.0.5.33097: OK, 2.782s.
# 4.0.0.1421: OK, 3.642s.
#
# tracker_id:
# min_versions: ['3.0']
# versions: 3.0
# qmid:
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# import os
# import zipfile
#
# os.environ["ISC_USER"] = 'SYSDBA'
# os.environ["ISC_PASSWORD"] = 'masterkey'
#
# db_conn.close()
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'eqc136030.zip') )
# zf.extractall( context['temp_directory'] )
# zf.close()
#
# fbk = os.path.join(context['temp_directory'],'eqc136030.fbk')
#
# runProgram('gbak',['-rep',fbk, dsn])
#
# script="""
# set list on;
# set sqlda_display on;
# set planonly;
#
# select
# a.csoc, a.nreserc , a.coddoc , a.codgio ,
# a.macchina, a.rec_upd, a.utente_upd,
# cast(a.fblc as integer) fblc,
# cast(a.fdel as integer) fdel,
# b.tipdoc, b.desdoc, b.fblc , c.tipgio, c.desgio , c.fblc
# from docgio a
# left join doctip (a.csoc, a.nreserc) b on ( a.coddoc = b.coddoc )
# left join giotip (a.csoc, a.nreserc) c on (a.codgio = c.codgio)
# where
# a.csoc = ?
# and a.nreserc = ?
# order by a.codgio, a.coddoc;
#
# set planonly;
# set plan off;
# set sqlda_display off;
#
# select
# a.csoc, a.nreserc , a.coddoc , a.codgio ,
# a.macchina, a.rec_upd, a.utente_upd,
# cast(a.fblc as integer) fblc,
# cast(a.fdel as integer) fdel,
# b.tipdoc, b.desdoc, b.fblc , c.tipgio, c.desgio , c.fblc
# from docgio a
# left join doctip (a.csoc, a.nreserc) b on ( a.coddoc = b.coddoc )
# left join giotip (a.csoc, a.nreserc) c on (a.codgio = c.codgio)
# where
# a.csoc = 'DEM1' -- :csoc
# and a.nreserc = '' -- :nreserc
# order by a.codgio, a.coddoc;
# """
# runProgram('isql',[dsn,'-q'],script)
#
# ###############################
# # Cleanup.
# os.remove(fbk)
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
INPUT message field count: 2
01: sqltype: 448 VARYING scale: 0 subtype: 0 len: 4 charset: 0 NONE
: name: alias:
: table: owner:
02: sqltype: 448 VARYING scale: 0 subtype: 0 len: 2 charset: 0 NONE
: name: alias:
: table: owner:
PLAN JOIN (JOIN (A ORDER DOCGIO_PK, B NATURAL), C NATURAL)
OUTPUT message field count: 15
01: sqltype: 448 VARYING scale: 0 subtype: 0 len: 4 charset: 0 NONE
: name: CSOC alias: CSOC
: table: DOCGIO owner: SYSDBA
02: sqltype: 448 VARYING scale: 0 subtype: 0 len: 2 charset: 0 NONE
: name: NRESERC alias: NRESERC
: table: DOCGIO owner: SYSDBA
03: sqltype: 448 VARYING scale: 0 subtype: 0 len: 3 charset: 0 NONE
: name: CODDOC alias: CODDOC
: table: DOCGIO owner: SYSDBA
04: sqltype: 448 VARYING scale: 0 subtype: 0 len: 3 charset: 0 NONE
: name: CODGIO alias: CODGIO
: table: DOCGIO owner: SYSDBA
05: sqltype: 448 VARYING Nullable scale: 0 subtype: 0 len: 30 charset: 0 NONE
: name: MACCHINA alias: MACCHINA
: table: DOCGIO owner: SYSDBA
06: sqltype: 510 TIMESTAMP scale: 0 subtype: 0 len: 8
: name: REC_UPD alias: REC_UPD
: table: DOCGIO owner: SYSDBA
07: sqltype: 496 LONG scale: 0 subtype: 0 len: 4
: name: UTENTE_UPD alias: UTENTE_UPD
: table: DOCGIO owner: SYSDBA
08: sqltype: 496 LONG scale: 0 subtype: 0 len: 4
: name: CAST alias: FBLC
: table: owner:
09: sqltype: 496 LONG scale: 0 subtype: 0 len: 4
: name: CAST alias: FDEL
: table: owner:
10: sqltype: 448 VARYING Nullable scale: 0 subtype: 0 len: 3 charset: 0 NONE
: name: TIPDOC alias: TIPDOC
: table: DOCTIP owner: SYSDBA
11: sqltype: 448 VARYING Nullable scale: 0 subtype: 0 len: 60 charset: 0 NONE
: name: DESDOC alias: DESDOC
: table: DOCTIP owner: SYSDBA
12: sqltype: 452 TEXT Nullable scale: 0 subtype: 0 len: 1 charset: 0 NONE
: name: FBLC alias: FBLC
: table: DOCTIP owner: SYSDBA
13: sqltype: 448 VARYING Nullable scale: 0 subtype: 0 len: 3 charset: 0 NONE
: name: TIPGIO alias: TIPGIO
: table: GIOTIP owner: SYSDBA
14: sqltype: 448 VARYING Nullable scale: 0 subtype: 0 len: 60 charset: 0 NONE
: name: DESGIO alias: DESGIO
: table: GIOTIP owner: SYSDBA
15: sqltype: 452 TEXT Nullable scale: 0 subtype: 0 len: 1 charset: 0 NONE
: name: FBLC alias: FBLC
: table: GIOTIP owner: SYSDBA
CSOC DEM1
NRESERC
CODDOC AUT
CODGIO CGB
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
CSOC DEM1
NRESERC
CODDOC CGE
CODGIO CGB
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
CSOC DEM1
NRESERC
CODDOC CTI
CODGIO CGB
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
CSOC DEM1
NRESERC
CODDOC FAA
CODGIO CGB
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
CSOC DEM1
NRESERC
CODDOC FAV
CODGIO CGB
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
CSOC DEM1
NRESERC
CODDOC FTA
CODGIO CGB
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
CSOC DEM1
NRESERC
CODDOC FTV
CODGIO CGB
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
CSOC DEM1
NRESERC
CODDOC FAA
CODGIO RAC
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
CSOC DEM1
NRESERC
CODDOC FTA
CODGIO RAC
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
CSOC DEM1
NRESERC
CODDOC NCA
CODGIO RAC
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
CSOC DEM1
NRESERC
CODDOC NDA
CODGIO RAC
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
CSOC DEM1
NRESERC
CODDOC FAV
CODGIO RFV
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
CSOC DEM1
NRESERC
CODDOC FTV
CODGIO RFV
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
CSOC DEM1
NRESERC
CODDOC NCV
CODGIO RFV
MACCHINA VAIO-ADAL
REC_UPD 2007-06-17 22:50:41.0000
UTENTE_UPD 1
FBLC 0
FDEL 0
TIPDOC <null>
DESDOC <null>
FBLC <null>
TIPGIO <null>
DESGIO <null>
FBLC <null>
"""
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| []
| []
| [
"ISC_USER",
"ISC_PASSWORD"
]
| [] | ["ISC_USER", "ISC_PASSWORD"] | python | 2 | 0 | |
create_db.py | import os
import sys
import psycopg2
import sqlalchemy as sa
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
def main():
database = 'postgres'
user = 'postgres'
url = sa.engine.url.URL(
'postgresql', host=os.environ['PGHOST'], database=database,
username=user, password=os.environ.get('PGPASSWORD', None)
)
ddl_text = sa.DDL('CREATE DATABASE {};'.format(os.environ['PGDATABASE']))
engine = sa.create_engine(url)
engine.raw_connection().set_isolation_level(
ISOLATION_LEVEL_AUTOCOMMIT
)
try:
engine.execute(ddl_text)
sys.stdout.write('Creating environment successfully.\n')
except psycopg2.Error:
raise SystemExit('Could not connect to PostgreSQL.\n{0}'.format(sys.exc_info()))
if __name__ == '__main__':
main()
| []
| []
| [
"PGHOST",
"PGPASSWORD",
"PGDATABASE"
]
| [] | ["PGHOST", "PGPASSWORD", "PGDATABASE"] | python | 3 | 0 | |
bertviz/pytorch_pretrained_bert/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: str = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: str = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: str = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| []
| []
| [
"PYTORCH_PRETRAINED_BERT_CACHE"
]
| [] | ["PYTORCH_PRETRAINED_BERT_CACHE"] | python | 1 | 0 | |
enterprise/internal/batches/webhooks/github_test.go | package webhooks
import (
"bytes"
"context"
"database/sql"
"encoding/json"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path"
"path/filepath"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
gh "github.com/google/go-github/v28/github"
"github.com/sourcegraph/sourcegraph/cmd/frontend/webhooks"
"github.com/sourcegraph/sourcegraph/enterprise/internal/batches/sources"
"github.com/sourcegraph/sourcegraph/enterprise/internal/batches/store"
"github.com/sourcegraph/sourcegraph/enterprise/internal/batches/syncer"
ct "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/testing"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/httptestutil"
"github.com/sourcegraph/sourcegraph/internal/rcache"
"github.com/sourcegraph/sourcegraph/internal/repos"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
"github.com/sourcegraph/sourcegraph/internal/timeutil"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/schema"
)
// Run from integration_test.go
func testGitHubWebhook(db *sql.DB, userID int32) func(*testing.T) {
return func(t *testing.T) {
now := timeutil.Now()
clock := func() time.Time { return now }
ctx := context.Background()
rcache.SetupForTest(t)
ct.TruncateTables(t, db, "changeset_events", "changesets")
cf, save := httptestutil.NewGitHubRecorderFactory(t, *update, "github-webhooks")
defer save()
secret := "secret"
token := os.Getenv("GITHUB_TOKEN")
if token == "" {
token = "no-GITHUB_TOKEN-set"
}
repoStore := database.Repos(db)
esStore := database.ExternalServices(db)
extSvc := &types.ExternalService{
Kind: extsvc.KindGitHub,
DisplayName: "GitHub",
Config: ct.MarshalJSON(t, &schema.GitHubConnection{
Url: "https://github.com",
Token: token,
Repos: []string{"sourcegraph/sourcegraph"},
Webhooks: []*schema.GitHubWebhook{{Org: "sourcegraph", Secret: secret}},
}),
}
err := esStore.Upsert(ctx, extSvc)
if err != nil {
t.Fatal(t)
}
githubSrc, err := repos.NewGithubSource(extSvc, cf)
if err != nil {
t.Fatal(t)
}
githubRepo, err := githubSrc.GetRepo(ctx, "sourcegraph/sourcegraph")
if err != nil {
t.Fatal(err)
}
err = repoStore.Create(ctx, githubRepo)
if err != nil {
t.Fatal(err)
}
s := store.NewWithClock(db, nil, clock)
sourcer := sources.NewSourcer(cf)
spec := &btypes.BatchSpec{
NamespaceUserID: userID,
UserID: userID,
}
if err := s.CreateBatchSpec(ctx, spec); err != nil {
t.Fatal(err)
}
batchChange := &btypes.BatchChange{
Name: "Test batch changes",
Description: "Testing THE WEBHOOKS",
InitialApplierID: userID,
NamespaceUserID: userID,
LastApplierID: userID,
LastAppliedAt: clock(),
BatchSpecID: spec.ID,
}
err = s.CreateBatchChange(ctx, batchChange)
if err != nil {
t.Fatal(err)
}
// NOTE: Your sample payload should apply to a PR with the number matching below
changeset := &btypes.Changeset{
RepoID: githubRepo.ID,
ExternalID: "10156",
ExternalServiceType: githubRepo.ExternalRepo.ServiceType,
BatchChanges: []btypes.BatchChangeAssoc{{BatchChangeID: batchChange.ID}},
}
err = s.CreateChangeset(ctx, changeset)
if err != nil {
t.Fatal(err)
}
// Set up mocks to prevent the diffstat computation from trying to
// use a real gitserver, and so we can control what diff is used to
// create the diffstat.
state := ct.MockChangesetSyncState(&protocol.RepoInfo{
Name: "repo",
VCS: protocol.VCSInfo{URL: "https://example.com/repo/"},
})
defer state.Unmock()
src, err := sourcer.ForRepo(ctx, s, githubRepo)
if err != nil {
t.Fatal(err)
}
err = syncer.SyncChangeset(ctx, s, src, githubRepo, changeset)
if err != nil {
t.Fatal(err)
}
hook := NewGitHubWebhook(s)
fixtureFiles, err := filepath.Glob("testdata/fixtures/webhooks/github/*.json")
if err != nil {
t.Fatal(err)
}
for _, fixtureFile := range fixtureFiles {
_, name := path.Split(fixtureFile)
name = strings.TrimSuffix(name, ".json")
t.Run(name, func(t *testing.T) {
ct.TruncateTables(t, db, "changeset_events")
tc := loadWebhookTestCase(t, fixtureFile)
// Send all events twice to ensure we are idempotent
for i := 0; i < 2; i++ {
for _, event := range tc.Payloads {
handler := webhooks.GitHubWebhook{
ExternalServices: esStore,
}
hook.Register(&handler)
u := extsvc.WebhookURL(extsvc.TypeGitHub, extSvc.ID, "https://example.com/")
req, err := http.NewRequest("POST", u, bytes.NewReader(event.Data))
if err != nil {
t.Fatal(err)
}
req.Header.Set("X-Github-Event", event.PayloadType)
req.Header.Set("X-Hub-Signature", sign(t, event.Data, []byte(secret)))
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)
resp := rec.Result()
if resp.StatusCode != http.StatusOK {
t.Fatalf("Non 200 code: %v", resp.StatusCode)
}
}
}
have, _, err := s.ListChangesetEvents(ctx, store.ListChangesetEventsOpts{})
if err != nil {
t.Fatal(err)
}
// Overwrite and format test case
if *update {
tc.ChangesetEvents = have
data, err := json.MarshalIndent(tc, " ", " ")
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(fixtureFile, data, 0666)
if err != nil {
t.Fatal(err)
}
}
opts := []cmp.Option{
cmpopts.IgnoreFields(btypes.ChangesetEvent{}, "CreatedAt"),
cmpopts.IgnoreFields(btypes.ChangesetEvent{}, "UpdatedAt"),
}
if diff := cmp.Diff(tc.ChangesetEvents, have, opts...); diff != "" {
t.Error(diff)
}
})
}
t.Run("unexpected payload", func(t *testing.T) {
// GitHub pull request events are processed based on the action
// embedded within them, but that action is just a string that could
// be anything. We need to ensure that this is hardened against
// unexpected input.
n := 10156
action := "this is a bad action"
if err := hook.handleGitHubWebhook(ctx, extSvc, &gh.PullRequestEvent{
Number: &n,
Repo: &gh.Repository{
NodeID: &githubRepo.ExternalRepo.ID,
},
Action: &action,
}); err == nil {
t.Error("unexpected nil error")
}
})
}
}
| [
"\"GITHUB_TOKEN\""
]
| []
| [
"GITHUB_TOKEN"
]
| [] | ["GITHUB_TOKEN"] | go | 1 | 0 | |
sqlhelper/__init__.py | import os
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
DB_URI = os.environ.get("DATABASE_URL", None).replace('postgres://', 'postgresql://')
def start() -> scoped_session:
engine = create_engine(DB_URI)
BASE.metadata.bind = engine
BASE.metadata.create_all(engine)
return scoped_session(sessionmaker(bind=engine, autoflush=False))
BASE = declarative_base()
SESSION = start()
| []
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | python | 1 | 0 | |
pkg/draft/pack/load_test.go | package pack
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
)
const (
packName = "foo"
dockerfileName = "Dockerfile"
expectedDockerfile = `FROM python:onbuild
CMD [ "python", "./hello.py" ]
EXPOSE 80
`
)
func TestFromDir(t *testing.T) {
pack, err := FromDir(filepath.Join("testdata", "pack-python"))
if err != nil {
t.Fatalf("could not load python pack: %v", err)
}
if pack.Chart == nil {
t.Errorf("expected chart to be non-nil")
}
defer func() {
for _, f := range pack.Files {
f.file.Close()
}
}()
if _, ok := pack.Files["README.md"]; ok {
t.Errorf("expected README.md to not have been loaded")
}
// check that the Dockerfile was loaded
dockerfile, ok := pack.Files[dockerfileName]
if !ok {
t.Error("expected Dockerfile to have been loaded")
}
dockerfileContents, err := ioutil.ReadAll(dockerfile.file)
if err != nil {
t.Errorf("expected Dockerfile to be readable, got %v", err)
}
if string(dockerfileContents) != expectedDockerfile {
t.Errorf("expected Dockerfile == expected file contents, got '%v'", dockerfileContents)
}
_, ok = pack.Files[filepath.Join("scripts", "some-script.sh")]
if !ok {
t.Errorf("Expected scripts/some-script.sh to have been loaded but wasn't")
}
if _, err := FromDir("dir-does-not-exist"); err == nil {
t.Errorf("expected err to be non-nil when path does not exist")
}
// post-cleanup: switch back to the cwd so other tests continue to work.
cwd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
defer func() {
if err := os.Chdir(cwd); err != nil {
t.Fatal(err)
}
}()
dir, err := ioutil.TempDir("", "draft-pack-test")
if err != nil {
t.Fatal(err)
}
if err := os.Chdir(dir); err != nil {
t.Fatal(err)
}
if os.Getenv("CI") != "" {
t.Skip("skipping file permission mode tests on CI servers")
}
if err := os.MkdirAll(filepath.Join(dir, packName, dockerfileName), 0755); err != nil {
t.Fatal(err)
}
// load a pack with an un-readable Dockerfile (file perms 0000)
if err := os.Chmod(filepath.Join(dir, packName, dockerfileName), 0000); err != nil {
t.Fatalf("dir %s: %s", dir, err)
}
if _, err := FromDir(dir); err == nil {
t.Errorf("expected err to be non-nil when reading the Dockerfile")
}
// revert file perms for the Dockerfile in prep for the detect script
if err := os.Chmod(filepath.Join(dir, packName, dockerfileName), 0644); err != nil {
t.Fatal(err)
}
// remove the dir from under our feet to force filepath.Abs to fail
os.RemoveAll(dir)
if _, err := FromDir("."); err == nil {
t.Errorf("expected err to be non-nil when filepath.Abs(\".\") should fail")
}
if err := os.Chdir(cwd); err != nil {
t.Fatal(err)
}
}
func TestExtractFiles(t *testing.T) {
packFiles, err := extractFiles(filepath.Join("testdata", "DirWithNestedDirs"), "")
if err != nil {
t.Fatalf("Did not expect err but got err: %v", err)
}
if len(packFiles) != 4 {
t.Errorf("Expected 4 files to be extracted but got %v", len(packFiles))
}
}
| [
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
cmd/cmd.go | package cmd
import (
"archive/tar"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"path"
"path/filepath"
"reflect"
"strings"
"syscall"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/gobuffalo/packr/v2"
"github.com/gobwas/glob"
"github.com/mholt/archiver/v3"
"github.com/spf13/cast"
"github.com/thanhpk/randstr"
"github.com/thoas/go-funk"
)
// Version - klarista cli version
var Version = "latest"
// AssetWriterFunc - Asset writer function
type AssetWriterFunc = func(args ...interface{})
func createAssetWriter(pwd, localStateDir string, box *packr.Box) AssetWriterFunc {
// These assets will only be written if they do not already exist
politeAssets := map[string]bool{
"kubeconfig.yaml": true,
"tf/terraform.tfstate": true,
"tf_state/terraform.tfstate": true,
"tf_vars/terraform.tfstate": true,
}
return func(args ...interface{}) {
var pattern string
if len(args) == 1 {
pattern = args[0].(string)
}
var g glob.Glob
if pattern != "" {
g = glob.MustCompile(pattern)
}
useWorkDir(pwd, func() {
for _, file := range box.List() {
fp := path.Join(localStateDir, file)
if g != nil && !g.Match(file) {
Logger.Debugf("Skipping asset %s that does not match glob %s", file, pattern)
continue
}
if politeAssets[file] {
_, err := os.Stat(fp)
if os.IsNotExist(err) {
// noop
} else {
// The file exists; continue
continue
}
}
Logger.Debugf("Writing asset %s", file)
data, err := box.Find(file)
if err != nil {
panic(err)
}
if err = os.MkdirAll(path.Dir(fp), 0755); err != nil {
panic(err)
}
if err = ioutil.WriteFile(fp, data, 0644); err != nil {
panic(err)
}
}
})
}
}
// InputProcessorFunc - Input processor function
type InputProcessorFunc = func([]string) []string
func createInputProcessor(pwd, localStateDir string, box *packr.Box, writeAssets AssetWriterFunc) InputProcessorFunc {
return func(inputPaths []string) []string {
inputIds := []string{}
useWorkDir(localStateDir, func() {
for i, input := range inputPaths {
if !filepath.IsAbs(input) {
input = path.Join(pwd, input)
}
if _, err := os.Stat(input); err != nil {
Logger.Errorf("Input file %s does not exist", input)
continue
}
inputBytes, err := ioutil.ReadFile(input)
if err != nil {
panic(err)
}
inputIds = append(inputIds, fmt.Sprintf("%03d.tfvars", i))
box.AddBytes(
path.Join("tf_vars", "inputs", inputIds[i]),
inputBytes,
)
box.AddBytes(
path.Join("tf_state", "inputs", inputIds[i]),
inputBytes,
)
box.AddBytes(
path.Join("tf", "inputs", inputIds[i]),
inputBytes,
)
}
writeAssets("*/inputs/*")
})
if len(inputIds) == 0 {
Logger.Fatalf(`No input files were found. You must explicitly pass "--input <file>"`)
}
return inputIds
}
}
func getAutoFlags(override bool) string {
if override || os.Getenv("CI") != "" {
return "-auto-approve"
}
return ""
}
func getInitialInputs(localStateDir string) []string {
var localInputDir = path.Join(localStateDir, "tf_vars/inputs")
var initialInputs = []string{"input.tfvars"}
var err error
for i, input := range initialInputs {
_, err = os.Stat(input)
if err == nil {
initialInputs[i], err = filepath.Abs(input)
if err != nil {
panic(err)
}
} else {
Logger.Debug(err)
initialInputs = []string{}
break
}
}
if len(initialInputs) == 0 {
if _, err := os.Stat(localInputDir); err == nil {
var inputFileInfo []os.FileInfo
inputFileInfo, err = ioutil.ReadDir(localInputDir)
if err != nil {
panic(err)
}
initialInputs = cast.ToStringSlice(
funk.Map(inputFileInfo, func(file os.FileInfo) string {
return path.Join(localInputDir, file.Name())
}),
)
}
}
Logger.Infof(
"Reading input from [\n\t%s,\n]",
strings.Join(initialInputs, ",\n\t"),
)
return initialInputs
}
func getVarFileFlags(inputIds []string) string {
return strings.Join(
cast.ToStringSlice(
funk.Map(inputIds, func(id string) string {
return fmt.Sprintf(`-var-file "inputs/%s"`, id)
}),
),
" ",
)
}
func getOutputJSONBytes() ([]byte, error) {
command := exec.Command("terraform", "output", "-json")
outputBytes, err := command.Output()
if err != nil {
return nil, err
}
var output map[string]interface{}
err = json.Unmarshal(outputBytes, &output)
if err != nil {
return nil, err
}
for key, value := range output {
output[key] = value.(map[string]interface{})["value"]
}
outputBytes, err = json.MarshalIndent(output, "", " ")
if err != nil {
return nil, err
}
return outputBytes, nil
}
func getOutputJSON() (map[string]interface{}, error) {
outputBytes, err := getOutputJSONBytes()
if err != nil {
return nil, err
}
var output map[string]interface{}
err = json.Unmarshal(outputBytes, &output)
if err != nil {
return nil, err
}
return output, nil
}
func isDebug() bool {
return strings.Contains(os.Getenv("DEBUG"), "klarista")
}
func setAwsEnv(localStateDir string, inputIds []string) {
useWorkDir(path.Join(localStateDir, "tf_vars"), func() {
shell(
"bash",
"-c",
fmt.Sprintf(
`terraform apply -auto-approve -compact-warnings -refresh=false %s`,
getVarFileFlags(inputIds),
),
)
output, err := getOutputJSON()
if err != nil {
panic(err)
}
awsProfile := output["aws_profile"].(string)
awsRegion := output["aws_region"].(string)
if err = os.Setenv("AWS_PROFILE", awsProfile); err != nil {
panic(err)
}
if err = os.Setenv("AWS_REGION", awsRegion); err != nil {
panic(err)
}
})
}
func shell(command string, args ...string) {
filteredArgs := funk.Compact(args).([]string)
cmd := exec.Command(command, filteredArgs...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stderr
cmd.Stderr = os.Stderr
sigs := make(chan os.Signal)
done := make(chan bool)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go func() {
if <-done {
return
}
sig := <-sigs
Logger.Debug("GOT SIGNAL ", sig)
if cmd.ProcessState != nil && !cmd.ProcessState.Exited() {
if err := cmd.Process.Kill(); err != nil {
Logger.Fatal("Failed to kill process: ", err)
}
}
}()
Logger.Debugf("%s %s", command, strings.Join(filteredArgs, " "))
err := cmd.Run()
if err != nil {
panic(err)
}
done <- true
}
func useWorkDir(wd string, cb func()) {
// Get the pwd
originalWd, err := os.Getwd()
if err != nil {
panic(err)
}
wd, err = filepath.Abs(wd)
if err != nil {
panic(err)
}
if wd == originalWd {
Logger.Debugf(`Already in WD %s`, wd)
cb()
} else {
// Change to the target wd
Logger.Debugf(`Using WD %s`, wd)
if err = os.Chdir(wd); err != nil {
panic(err)
}
defer func() {
// Return to the original wd
Logger.Debugf(`Returning to WD %s`, originalWd)
if err = os.Chdir(originalWd); err != nil {
panic(err)
}
}()
// Do work
cb()
}
}
// UseTempDirCallback - useTempDir callback function
type UseTempDirCallback = func(string)
func useTempDir(args ...interface{}) {
var autoremove bool = true
var cb UseTempDirCallback
var name string
for _, v := range args {
switch arg := v.(type) {
case bool:
autoremove = arg
case string:
name = arg
default:
cb = func(tmpdir string) {
fn := reflect.ValueOf(arg)
fnArgs := []reflect.Value{}
if fn.Type().NumIn() == 1 {
fnArgs = append(fnArgs, reflect.ValueOf(tmpdir))
}
reflect.ValueOf(arg).Call(fnArgs)
}
}
}
if name == "" {
name = randstr.Hex(8)
}
tmpdir := path.Join(os.TempDir(), name)
if err := os.MkdirAll(tmpdir, 0755); err != nil {
panic(err)
}
if autoremove {
defer os.RemoveAll(tmpdir)
}
useWorkDir(tmpdir, func() {
cb(tmpdir)
})
}
func useRemoteState(clusterName, bucket string, cb func()) {
remoteStateKey := "klarista.state.tar"
sess := session.Must(session.NewSession())
uploader := s3manager.NewUploader(sess)
downloader := s3manager.NewDownloader(sess)
useTempDir(func(stateTmpDir string) {
localStateFilePath := path.Join(stateTmpDir, remoteStateKey)
stateFile, err := os.Create(localStateFilePath)
if err != nil {
panic(fmt.Errorf("Failed to create file %q, %v", localStateFilePath, err))
}
_, err = downloader.Download(stateFile, &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(remoteStateKey),
})
useTempDir(clusterName, false, func() {
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeNoSuchKey:
Logger.Warn(aerr.Error())
case s3.ErrCodeNoSuchBucket:
Logger.Error(aerr.Error())
default:
panic(aerr.Error())
}
} else {
panic(fmt.Errorf("Failed to download file, %v", err))
}
} else {
Logger.Debugf("Reading state from s3://%s/%s", bucket, remoteStateKey)
tar := &archiver.Tar{
ImplicitTopLevelFolder: false,
MkdirAll: true,
OverwriteExisting: true,
StripComponents: 0,
}
if err := tar.Unarchive(localStateFilePath, "."); err != nil {
panic(err)
}
}
defer func() {
type ArchiveElement struct {
Body []byte
ModTime time.Time
Path string
Size int64
}
var archiveContent []*ArchiveElement
filepath.Walk(".", func(fpath string, info os.FileInfo, err error) error {
if err != nil {
panic(err)
}
if info.IsDir() {
if info.Name() == ".terraform" {
return filepath.SkipDir
}
} else {
if strings.HasSuffix(info.Name(), ".backup") {
return nil
}
body, err := ioutil.ReadFile(fpath)
if err != nil {
panic(err)
}
Logger.Debugf(`Adding "%s" to state file`, fpath)
archiveContent = append(archiveContent, &ArchiveElement{
Body: body,
ModTime: info.ModTime(),
Path: fpath,
Size: info.Size(),
})
}
return nil
})
var data bytes.Buffer
writer := tar.NewWriter(&data)
for _, file := range archiveContent {
hdr := &tar.Header{
Name: file.Path,
Mode: 0755,
ModTime: file.ModTime,
Size: file.Size,
}
if err := writer.WriteHeader(hdr); err != nil {
panic(err)
}
if _, err := writer.Write(file.Body); err != nil {
panic(err)
}
}
if err := writer.Close(); err != nil {
panic(err)
}
if err := stateFile.Truncate(0); err != nil {
panic(err)
}
if _, err := stateFile.Write(data.Bytes()); err != nil {
panic(err)
}
if _, err := stateFile.Seek(0, 0); err != nil {
panic(err)
}
Logger.Infof("Writing state to s3://%s/%s", bucket, remoteStateKey)
result, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bucket),
Key: aws.String(remoteStateKey),
Body: stateFile,
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeNoSuchBucket:
Logger.Warn(aerr.Error())
// noop
default:
panic(aerr.Error())
}
} else {
panic(fmt.Errorf("Failed to upload file, %v", err))
}
} else {
Logger.Infof("State written successfully to %s", result.Location)
}
}()
cb()
})
})
}
| [
"\"CI\"",
"\"DEBUG\""
]
| []
| [
"CI",
"DEBUG"
]
| [] | ["CI", "DEBUG"] | go | 2 | 0 | |
src/productcatalogservice/server.go | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"context"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"errors"
pb "github.com/GoogleCloudPlatform/microservices-demo/src/productcatalogservice/genproto"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"github.com/golang/protobuf/jsonpb"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/jaeger"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
)
var (
cat pb.ListProductsResponse
catalogMutex *sync.Mutex
log *logrus.Logger
extraLatency time.Duration
port = "3550"
reloadCatalog bool
)
func init() {
log = logrus.New()
log.Formatter = &logrus.JSONFormatter{
FieldMap: logrus.FieldMap{
logrus.FieldKeyTime: "timestamp",
logrus.FieldKeyLevel: "severity",
logrus.FieldKeyMsg: "message",
},
TimestampFormat: time.RFC3339Nano,
}
log.Out = os.Stdout
catalogMutex = &sync.Mutex{}
err := readCatalogFile(&cat)
if err != nil {
log.Warnf("could not parse product catalog")
}
}
func main() {
if os.Getenv("DISABLE_TRACING") == "" {
log.Info("Tracing enabled.")
initOpenTelemetry(log)
} else {
log.Info("Tracing disabled.")
}
flag.Parse()
// set injected latency
if s := os.Getenv("EXTRA_LATENCY"); s != "" {
v, err := time.ParseDuration(s)
if err != nil {
log.Fatalf("failed to parse EXTRA_LATENCY (%s) as time.Duration: %+v", v, err)
}
extraLatency = v
log.Infof("extra latency enabled (duration: %v)", extraLatency)
} else {
extraLatency = time.Duration(0)
}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGUSR1, syscall.SIGUSR2)
go func() {
for {
sig := <-sigs
log.Printf("Received signal: %s", sig)
if sig == syscall.SIGUSR1 {
reloadCatalog = true
log.Infof("Enable catalog reloading")
} else {
reloadCatalog = false
log.Infof("Disable catalog reloading")
}
}
}()
if os.Getenv("PORT") != "" {
port = os.Getenv("PORT")
}
log.Infof("starting grpc server at :%s", port)
run(port)
select {}
}
func run(port string) string {
l, err := net.Listen("tcp", fmt.Sprintf(":%s", port))
if err != nil {
log.Fatal(err)
}
var srv *grpc.Server
if os.Getenv("DISABLE_TRACING") == "" {
srv = grpc.NewServer(
grpc.UnaryInterceptor(otelgrpc.UnaryServerInterceptor()),
grpc.StreamInterceptor(otelgrpc.StreamServerInterceptor()),
)
} else {
srv = grpc.NewServer()
}
svc := &productCatalog{}
pb.RegisterProductCatalogServiceServer(srv, svc)
healthpb.RegisterHealthServer(srv, svc)
go srv.Serve(l)
return l.Addr().String()
}
// for reference, see also:
// https://github.com/open-telemetry/opentelemetry-go/blob/main/example/jaeger/main.go
func createTracerProvider(log logrus.FieldLogger) (*tracesdk.TracerProvider, error) {
// Create the Jaeger exporter
svcAddr := os.Getenv("JAEGER_SERVICE_ADDR")
if svcAddr == "" {
return nil, errors.New("missing JAEGER_SERVICE_ADDR, can't initialize Jaeger exporter")
}
splitJaegerAddr := strings.Split(svcAddr, ":")
jaegerAgentHost := splitJaegerAddr[0]
jaegerAgentPort := splitJaegerAddr[1]
// exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(url)))
exporter, err := jaeger.New(jaeger.WithAgentEndpoint(jaeger.WithAgentHost(jaegerAgentHost), jaeger.WithAgentPort(jaegerAgentPort)));
if err != nil {
return nil, err
}
log.Info("created jaeger exporter to collector at " + svcAddr)
tp := tracesdk.NewTracerProvider(
tracesdk.WithBatcher(exporter, tracesdk.WithMaxExportBatchSize(95)),
// see https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace#ParentBased
tracesdk.WithSampler(tracesdk.ParentBased(tracesdk.AlwaysSample())),
tracesdk.WithResource(resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceNameKey.String("productcatalogservice"),
)),
)
return tp, nil
}
func initOpenTelemetry(log logrus.FieldLogger) {
tp, err := createTracerProvider(log)
if err != nil {
log.Fatal(err)
}
otel.SetTracerProvider(tp)
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))
}
type productCatalog struct{}
func readCatalogFile(catalog *pb.ListProductsResponse) error {
catalogMutex.Lock()
defer catalogMutex.Unlock()
catalogJSON, err := ioutil.ReadFile("products.json")
if err != nil {
log.Fatalf("failed to open product catalog json file: %v", err)
return err
}
if err := jsonpb.Unmarshal(bytes.NewReader(catalogJSON), catalog); err != nil {
log.Warnf("failed to parse the catalog JSON: %v", err)
return err
}
log.Info("successfully parsed product catalog json")
return nil
}
func parseCatalog() []*pb.Product {
if reloadCatalog || len(cat.Products) == 0 {
err := readCatalogFile(&cat)
if err != nil {
return []*pb.Product{}
}
}
return cat.Products
}
func (p *productCatalog) Check(ctx context.Context, req *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
return &healthpb.HealthCheckResponse{Status: healthpb.HealthCheckResponse_SERVING}, nil
}
func (p *productCatalog) Watch(req *healthpb.HealthCheckRequest, ws healthpb.Health_WatchServer) error {
return status.Errorf(codes.Unimplemented, "health check via Watch not implemented")
}
func (p *productCatalog) ListProducts(context.Context, *pb.Empty) (*pb.ListProductsResponse, error) {
time.Sleep(extraLatency)
return &pb.ListProductsResponse{Products: parseCatalog()}, nil
}
func (p *productCatalog) GetProduct(ctx context.Context, req *pb.GetProductRequest) (*pb.Product, error) {
time.Sleep(extraLatency)
var found *pb.Product
for i := 0; i < len(parseCatalog()); i++ {
if req.Id == parseCatalog()[i].Id {
found = parseCatalog()[i]
}
}
if found == nil {
return nil, status.Errorf(codes.NotFound, "no product with ID %s", req.Id)
}
return found, nil
}
func (p *productCatalog) SearchProducts(ctx context.Context, req *pb.SearchProductsRequest) (*pb.SearchProductsResponse, error) {
time.Sleep(extraLatency)
// Intepret query as a substring match in name or description.
var ps []*pb.Product
for _, p := range parseCatalog() {
if strings.Contains(strings.ToLower(p.Name), strings.ToLower(req.Query)) ||
strings.Contains(strings.ToLower(p.Description), strings.ToLower(req.Query)) {
ps = append(ps, p)
}
}
return &pb.SearchProductsResponse{Results: ps}, nil
}
| [
"\"DISABLE_TRACING\"",
"\"EXTRA_LATENCY\"",
"\"PORT\"",
"\"PORT\"",
"\"DISABLE_TRACING\"",
"\"JAEGER_SERVICE_ADDR\""
]
| []
| [
"PORT",
"DISABLE_TRACING",
"JAEGER_SERVICE_ADDR",
"EXTRA_LATENCY"
]
| [] | ["PORT", "DISABLE_TRACING", "JAEGER_SERVICE_ADDR", "EXTRA_LATENCY"] | go | 4 | 0 | |
main.go | package main
import (
"flag"
"net/http"
"strconv"
"strings"
cmap "github.com/orcaman/concurrent-map"
log "github.com/sirupsen/logrus"
"os"
// Import to initialize client auth plugins.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/cli"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/facebookgo/flagenv"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
settings = cli.New()
clients = cmap.New()
stats = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "helm_chart_info",
Help: "Information on helm releases",
}, []string{
"chart",
"release",
"version",
"appVersion",
"updated",
"namespace",
"currentRevision",
})
updateTime = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "helm_chart_update_time",
Help: "Last time when helm release was updated",
}, []string{
"chart",
"release",
"version",
"appVersion",
"status",
"namespace",
"currentRevision",
})
namespaces = flag.String("namespaces", "", "namespaces to monitor. Defaults to all")
statusCodeMap = map[string]float64{
"unknown": 0,
"deployed": 1,
"uninstalled": 2,
"superseded": 3,
"failed": -1,
"uninstalling": 5,
"pending-install": 6,
"pending-upgrade": 7,
"pending-rollback": 8,
}
prometheusHandler = promhttp.Handler()
)
func runStats() {
stats.Reset()
for _, client := range clients.Items() {
list := action.NewList(client.(*action.Configuration))
list.StateMask = action.ListAll
items, err := list.Run()
if err != nil {
log.Warnf("got error while listing %v", err)
continue
}
for _, item := range items {
chart := item.Chart.Name()
currentRevision := strconv.Itoa(item.Version)
releaseName := item.Name
version := item.Chart.Metadata.Version
appVersion := item.Chart.AppVersion()
updateTimestamp := item.Info.LastDeployed.Unix()
updated := strconv.FormatInt(updateTimestamp*1000, 10)
namespace := item.Namespace
statusName := item.Info.Status.String()
status := statusCodeMap[statusName]
stats.WithLabelValues(chart, releaseName, version, appVersion, updated, namespace, currentRevision).Set(status)
updateTime.WithLabelValues(chart, releaseName, version, appVersion, statusName, namespace, currentRevision).Set(float64(updateTimestamp))
}
}
}
func newHelmStatsHandler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
runStats()
prometheusHandler.ServeHTTP(w, r)
}
}
func healthz(w http.ResponseWriter, r *http.Request) {
}
func connect(namespace string) {
actionConfig := new(action.Configuration)
err := actionConfig.Init(settings.RESTClientGetter(), namespace, os.Getenv("HELM_DRIVER"), log.Infof)
if err != nil {
log.Warnf("failed to connect to %s with %v", namespace, err)
} else {
log.Infof("Watching namespace %s", namespace)
clients.Set(namespace, actionConfig)
}
}
func informer() {
actionConfig := new(action.Configuration)
err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), os.Getenv("HELM_DRIVER"), log.Infof)
if err != nil {
log.Fatal(err)
}
clientset, err := actionConfig.KubernetesClientSet()
if err != nil {
log.Fatal(err)
}
factory := informers.NewSharedInformerFactory(clientset, 0)
informer := factory.Core().V1().Namespaces().Informer()
stopper := make(chan struct{})
defer close(stopper)
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
// "k8s.io/apimachinery/pkg/apis/meta/v1" provides an Object
// interface that allows us to get metadata easily
mObj := obj.(v1.Object)
connect(mObj.GetName())
},
DeleteFunc: func(obj interface{}) {
mObj := obj.(v1.Object)
log.Infof("Removing namespace %s", mObj.GetName())
clients.Remove(mObj.GetName())
},
})
informer.Run(stopper)
}
func main() {
flagenv.Parse()
flag.Parse()
if namespaces == nil || *namespaces == "" {
go informer()
} else {
for _, namespace := range strings.Split(*namespaces, ",") {
connect(namespace)
}
}
http.HandleFunc("/metrics", newHelmStatsHandler())
http.HandleFunc("/healthz", healthz)
http.ListenAndServe(":9571", nil)
}
| [
"\"HELM_DRIVER\"",
"\"HELM_DRIVER\""
]
| []
| [
"HELM_DRIVER"
]
| [] | ["HELM_DRIVER"] | go | 1 | 0 | |
fedlab_benchmarks/fedmgda+/standalone.py | from json import load
import os
import argparse
import random
from copy import deepcopy
import torchvision
import torchvision.transforms as transforms
from torch import nn
import sys
import torch
import numpy as np
import cvxopt
torch.manual_seed(0)
from fedlab.core.client.serial_trainer import SubsetSerialTrainer
from fedlab.utils.aggregator import Aggregators
from fedlab.utils.serialization import SerializationTool
from fedlab.utils.functional import evaluate
from fedlab.utils.functional import get_best_gpu, load_dict
sys.path.append("../")
from models.cnn import CNN_MNIST
def quadprog(Q, q, G, h, A, b):
"""
Input: Numpy arrays, the format follows MATLAB quadprog function: https://www.mathworks.com/help/optim/ug/quadprog.html
Output: Numpy array of the solution
"""
Q = cvxopt.matrix(Q.tolist())
q = cvxopt.matrix(q.tolist(), tc='d')
G = cvxopt.matrix(G.tolist())
h = cvxopt.matrix(h.tolist())
A = cvxopt.matrix(A.tolist())
b = cvxopt.matrix(b.tolist(), tc='d')
sol = cvxopt.solvers.qp(Q, q.T, G.T, h.T, A.T, b)
return np.array(sol['x'])
def optim_lambdas(gradients, lambda0):
epsilon = 0.5
n = len(gradients)
J_t = [grad.numpy() for grad in gradients]
J_t = np.array(J_t)
# target function
Q = 2 * np.dot(J_t, J_t.T)
q = np.array([[0] for i in range(n)])
# equality constrint
A = np.ones(n).T
b = np.array([1])
# boundary
lb = np.array([max(0, lambda0[i] - epsilon) for i in range(n)])
ub = np.array([min(1, lambda0[i] + epsilon) for i in range(n)])
G = np.zeros((2 * n, n))
for i in range(n):
G[i][i] = -1
G[n + i][i] = 1
h = np.zeros((2 * n, 1))
for i in range(n):
h[i] = -lb[i]
h[n + i] = ub[i]
res = quadprog(Q, q, G, h, A, b)
return res
# python standalone.py --sample_ratio 0.1 --batch_size 10 --epochs 5 --partition iid
# configuration
parser = argparse.ArgumentParser(description="Standalone training example")
parser.add_argument("--total_client", type=int, default=10)
parser.add_argument("--com_round", type=int, default=5)
parser.add_argument("--sample_ratio", type=float)
parser.add_argument("--batch_size", type=int)
parser.add_argument("--lr", type=float)
parser.add_argument("--epochs", type=int)
args = parser.parse_args()
# get raw dataset
root = "../datasets/mnist/"
trainset = torchvision.datasets.MNIST(root=root,
train=True,
download=True,
transform=transforms.ToTensor())
testset = torchvision.datasets.MNIST(root=root,
train=False,
download=True,
transform=transforms.ToTensor())
test_loader = torch.utils.data.DataLoader(testset,
batch_size=len(testset),
drop_last=False,
shuffle=False)
# setup
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
gpu = get_best_gpu()
model = CNN_MNIST().cuda(gpu)
# FL settings
num_per_round = int(args.total_client * args.sample_ratio)
aggregator = Aggregators.fedavg_aggregate
total_client_num = args.total_client # client总数
data_indices = load_dict("./mnist_noniid.pkl")
# fedlab setup
local_model = deepcopy(model)
trainer = SubsetSerialTrainer(model=local_model,
dataset=trainset,
data_slices=data_indices,
aggregator=aggregator,
args={
"batch_size": args.batch_size,
"epochs": args.epochs,
"lr": args.lr
})
dynamic_lambdas = np.ones(num_per_round) * 1.0 / num_per_round
# train procedure
to_select = [i for i in range(total_client_num)]
for round in range(args.com_round):
model_parameters = SerializationTool.serialize_model(model)
selection = random.sample(to_select, num_per_round)
parameters = trainer.train(model_parameters=model_parameters,
id_list=selection,
aggregate=False)
gradients = [model_parameters - model for model in parameters]
for i, grad in enumerate(gradients):
gradients[i] = grad / grad.norm()
print(len(gradients))
print(gradients[0].shape)
# calculate lamda
lambda0 = [1.0 / num_per_round for _ in range(num_per_round)]
dynamic_lambdas = torch.Tensor(optim_lambdas(gradients, lambda0)).view(-1)
dt = Aggregators.fedavg_aggregate(gradients, dynamic_lambdas)
serialized_parameters = model_parameters - dt * args.lr
SerializationTool.deserialize_model(model, serialized_parameters)
criterion = nn.CrossEntropyLoss()
loss, acc = evaluate(model, criterion, test_loader)
print("loss: {:.4f}, acc: {:.2f}".format(loss, acc))
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
app/node/inspector_api.go | package node
import (
"fmt"
"github.com/filecoin-project/venus_lite/build/flags"
"github.com/filecoin-project/venus_lite/pkg/config"
"github.com/filecoin-project/venus_lite/pkg/repo"
sysi "github.com/whyrusleeping/go-sysinfo"
"os"
"runtime"
)
type IInspector interface {
Runtime() *RuntimeInfo
Memory() (*MemoryInfo, error)
Config() *config.Config
Disk() (*DiskInfo, error)
FilecoinVersion() string
Environment() *EnvironmentInfo
}
var _ IInspector = &inspector{}
// NewInspectorAPI returns a `Inspector` used to inspect the venus node.
func NewInspectorAPI(r repo.Repo) IInspector {
return &inspector{
repo: r,
}
}
// Inspector contains information used to inspect the venus node.
type inspector struct {
repo repo.Repo
}
// AllInspectorInfo contains all information the inspector can gather.
type AllInspectorInfo struct {
Config *config.Config
Runtime *RuntimeInfo
Environment *EnvironmentInfo
Disk *DiskInfo
Memory *MemoryInfo
FilecoinVersion string
}
// RuntimeInfo contains information about the golang runtime.
type RuntimeInfo struct {
OS string
Arch string
Version string
Compiler string
NumProc int
GoMaxProcs int
NumGoRoutines int
NumCGoCalls int64
}
// EnvironmentInfo contains information about the environment filecoin is running in.
type EnvironmentInfo struct {
VENUSAPI string `json:"VENUS_API"`
VENUSPath string `json:"VENUS_PATH"`
GoPath string `json:"GOPATH"`
}
// DiskInfo contains information about disk usage and type.
type DiskInfo struct {
Free uint64
Total uint64
FSType string
}
// MemoryInfo contains information about memory usage.
type MemoryInfo struct {
Swap uint64
Virtual uint64
}
// Runtime returns infrormation about the golang runtime.
func (g *inspector) Runtime() *RuntimeInfo {
return &RuntimeInfo{
OS: runtime.GOOS,
Arch: runtime.GOARCH,
Version: runtime.Version(),
Compiler: runtime.Compiler,
NumProc: runtime.NumCPU(),
GoMaxProcs: runtime.GOMAXPROCS(0),
NumGoRoutines: runtime.NumGoroutine(),
NumCGoCalls: runtime.NumCgoCall(),
}
}
// Environment returns information about the environment filecoin is running in.
func (g *inspector) Environment() *EnvironmentInfo {
return &EnvironmentInfo{
VENUSAPI: os.Getenv("VENUS_API"),
VENUSPath: os.Getenv("VENUS_PATH"),
GoPath: os.Getenv("GOPATH"),
}
}
// Disk return information about filesystem the filecoin nodes repo is on.
func (g *inspector) Disk() (*DiskInfo, error) {
fsr, ok := g.repo.(*repo.FSRepo)
if !ok {
// we are using a in memory repo
return &DiskInfo{
Free: 0,
Total: 0,
FSType: "0",
}, nil
}
p, err := fsr.Path()
if err != nil {
return nil, err
}
dinfo, err := sysi.DiskUsage(p)
if err != nil {
return nil, err
}
return &DiskInfo{
Free: dinfo.Free,
Total: dinfo.Total,
FSType: dinfo.FsType,
}, nil
}
// Memory return information about system meory usage.
func (g *inspector) Memory() (*MemoryInfo, error) {
meminfo, err := sysi.MemoryInfo()
if err != nil {
return nil, err
}
return &MemoryInfo{
Swap: meminfo.Swap,
Virtual: meminfo.Used,
}, nil
}
// configModule return the current config values of the filecoin node.
func (g *inspector) Config() *config.Config {
return g.repo.Config()
}
// FilecoinVersion returns the version of venus.
func (g *inspector) FilecoinVersion() string {
return fmt.Sprintf("%s %s", flags.GitTag, flags.GitCommit)
}
| [
"\"VENUS_API\"",
"\"VENUS_PATH\"",
"\"GOPATH\""
]
| []
| [
"GOPATH",
"VENUS_PATH",
"VENUS_API"
]
| [] | ["GOPATH", "VENUS_PATH", "VENUS_API"] | go | 3 | 0 | |
pulsar-broker/src/main/java/org/apache/pulsar/utils/auth/tokens/TokensCliUtils.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pulsar.utils.auth.tokens;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.Parameters;
import com.google.common.base.Charsets;
import io.jsonwebtoken.Claims;
import io.jsonwebtoken.Jwt;
import io.jsonwebtoken.Jwts;
import io.jsonwebtoken.SignatureAlgorithm;
import io.jsonwebtoken.io.Decoders;
import io.jsonwebtoken.io.Encoders;
import io.jsonwebtoken.security.Keys;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.security.Key;
import java.security.KeyPair;
import java.util.Date;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import javax.crypto.SecretKey;
import lombok.Cleanup;
import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils;
import org.apache.pulsar.common.util.RelativeTimeUtil;
public class TokensCliUtils {
public static class Arguments {
@Parameter(names = { "-h", "--help" }, description = "Show this help message")
private boolean help = false;
}
@Parameters(commandDescription = "Create a new secret key")
public static class CommandCreateSecretKey {
@Parameter(names = { "-a",
"--signature-algorithm" }, description = "The signature algorithm for the new secret key.")
SignatureAlgorithm algorithm = SignatureAlgorithm.HS256;
@Parameter(names = { "-o",
"--output" }, description = "Write the secret key to a file instead of stdout")
String outputFile;
@Parameter(names = {
"-b", "--base64" }, description = "Encode the key in base64")
boolean base64 = false;
public void run() throws IOException {
SecretKey secretKey = AuthTokenUtils.createSecretKey(algorithm);
byte[] encoded = secretKey.getEncoded();
if (base64) {
encoded = Encoders.BASE64.encode(encoded).getBytes();
}
if (outputFile != null) {
Files.write(Paths.get(outputFile), encoded);
} else {
System.out.write(encoded);
}
}
}
@Parameters(commandDescription = "Create a new or pair of keys public/private")
public static class CommandCreateKeyPair {
@Parameter(names = { "-a",
"--signature-algorithm" }, description = "The signature algorithm for the new key pair.")
SignatureAlgorithm algorithm = SignatureAlgorithm.RS256;
@Parameter(names = {
"--output-private-key" }, description = "File where to write the private key", required = true)
String privateKeyFile;
@Parameter(names = {
"--output-public-key" }, description = "File where to write the public key", required = true)
String publicKeyFile;
public void run() throws IOException {
KeyPair pair = Keys.keyPairFor(algorithm);
Files.write(Paths.get(publicKeyFile), pair.getPublic().getEncoded());
Files.write(Paths.get(privateKeyFile), pair.getPrivate().getEncoded());
}
}
@Parameters(commandDescription = "Create a new token")
public static class CommandCreateToken {
@Parameter(names = { "-a",
"--signature-algorithm" }, description = "The signature algorithm for the new key pair.")
SignatureAlgorithm algorithm = SignatureAlgorithm.RS256;
@Parameter(names = { "-s",
"--subject" }, description = "Specify the 'subject' or 'principal' associate with this token", required = true)
private String subject;
@Parameter(names = { "-e",
"--expiry-time" }, description = "Relative expiry time for the token (eg: 1h, 3d, 10y). (m=minutes) Default: no expiration")
private String expiryTime;
@Parameter(names = { "-sk",
"--secret-key" }, description = "Pass the secret key for signing the token. This can either be: data:, file:, etc..")
private String secretKey;
@Parameter(names = { "-pk",
"--private-key" }, description = "Pass the private key for signing the token. This can either be: data:, file:, etc..")
private String privateKey;
public void run() throws Exception {
if (secretKey == null && privateKey == null) {
System.err.println(
"Either --secret-key or --private-key needs to be passed for signing a token");
System.exit(1);
} else if (secretKey != null && privateKey != null) {
System.err.println(
"Only one of --secret-key and --private-key needs to be passed for signing a token");
System.exit(1);
}
Key signingKey;
if (privateKey != null) {
byte[] encodedKey = AuthTokenUtils.readKeyFromUrl(privateKey);
signingKey = AuthTokenUtils.decodePrivateKey(encodedKey, algorithm);
} else {
byte[] encodedKey = AuthTokenUtils.readKeyFromUrl(secretKey);
signingKey = AuthTokenUtils.decodeSecretKey(encodedKey);
}
Optional<Date> optExpiryTime = Optional.empty();
if (expiryTime != null) {
long relativeTimeMillis = TimeUnit.SECONDS
.toMillis(RelativeTimeUtil.parseRelativeTimeInSeconds(expiryTime));
optExpiryTime = Optional.of(new Date(System.currentTimeMillis() + relativeTimeMillis));
}
String token = AuthTokenUtils.createToken(signingKey, subject, optExpiryTime);
System.out.println(token);
}
}
@Parameters(commandDescription = "Show the content of token")
public static class CommandShowToken {
@Parameter(description = "The token string", arity = 1)
private java.util.List<String> args;
@Parameter(names = { "-i",
"--stdin" }, description = "Read token from standard input")
private Boolean stdin = false;
@Parameter(names = { "-f",
"--token-file" }, description = "Read token from a file")
private String tokenFile;
public void run() throws Exception {
String token;
if (args != null) {
token = args.get(0);
} else if (stdin) {
@Cleanup
BufferedReader r = new BufferedReader(new InputStreamReader(System.in));
token = r.readLine();
} else if (tokenFile != null) {
token = new String(Files.readAllBytes(Paths.get(tokenFile)), Charsets.UTF_8);
} else if (System.getenv("TOKEN") != null) {
token = System.getenv("TOKEN");
} else {
System.err.println(
"Token needs to be either passed as an argument or through `--stdin`, `--token-file` or by the `TOKEN` environment variable");
System.exit(1);
return;
}
String[] parts = token.split("\\.");
System.out.println(new String(Decoders.BASE64URL.decode(parts[0])));
System.out.println("---");
System.out.println(new String(Decoders.BASE64URL.decode(parts[1])));
}
}
@Parameters(commandDescription = "Validate a token against a key")
public static class CommandValidateToken {
@Parameter(names = { "-a",
"--signature-algorithm" }, description = "The signature algorithm for the key pair if using public key.")
SignatureAlgorithm algorithm = SignatureAlgorithm.RS256;
@Parameter(description = "The token string", arity = 1)
private java.util.List<String> args;
@Parameter(names = { "-i",
"--stdin" }, description = "Read token from standard input")
private Boolean stdin = false;
@Parameter(names = { "-f",
"--token-file" }, description = "Read token from a file")
private String tokenFile;
@Parameter(names = { "-sk",
"--secret-key" }, description = "Pass the secret key for validating the token. This can either be: data:, file:, etc..")
private String secretKey;
@Parameter(names = { "-pk",
"--public-key" }, description = "Pass the public key for validating the token. This can either be: data:, file:, etc..")
private String publicKey;
public void run() throws Exception {
if (secretKey == null && publicKey == null) {
System.err.println(
"Either --secret-key or --public-key needs to be passed for signing a token");
System.exit(1);
} else if (secretKey != null && publicKey != null) {
System.err.println(
"Only one of --secret-key and --public-key needs to be passed for signing a token");
System.exit(1);
}
String token;
if (args != null) {
token = args.get(0);
} else if (stdin) {
@Cleanup
BufferedReader r = new BufferedReader(new InputStreamReader(System.in));
token = r.readLine();
} else if (tokenFile != null) {
token = new String(Files.readAllBytes(Paths.get(tokenFile)), Charsets.UTF_8);
} else if (System.getenv("TOKEN") != null) {
token = System.getenv("TOKEN");
} else {
System.err.println(
"Token needs to be either passed as an argument or through `--stdin`, `--token-file` or by the `TOKEN` environment variable");
System.exit(1);
return;
}
Key validationKey;
if (publicKey != null) {
byte[] encodedKey = AuthTokenUtils.readKeyFromUrl(publicKey);
validationKey = AuthTokenUtils.decodePublicKey(encodedKey, algorithm);
} else {
byte[] encodedKey = AuthTokenUtils.readKeyFromUrl(secretKey);
validationKey = AuthTokenUtils.decodeSecretKey(encodedKey);
}
// Validate the token
@SuppressWarnings("unchecked")
Jwt<?, Claims> jwt = Jwts.parser()
.setSigningKey(validationKey)
.parse(token);
System.out.println(jwt.getBody());
}
}
public static void main(String[] args) throws Exception {
Arguments arguments = new Arguments();
JCommander jcommander = new JCommander(arguments);
CommandCreateSecretKey commandCreateSecretKey = new CommandCreateSecretKey();
jcommander.addCommand("create-secret-key", commandCreateSecretKey);
CommandCreateKeyPair commandCreateKeyPair = new CommandCreateKeyPair();
jcommander.addCommand("create-key-pair", commandCreateKeyPair);
CommandCreateToken commandCreateToken = new CommandCreateToken();
jcommander.addCommand("create", commandCreateToken);
CommandShowToken commandShowToken = new CommandShowToken();
jcommander.addCommand("show", commandShowToken);
CommandValidateToken commandValidateToken = new CommandValidateToken();
jcommander.addCommand("validate", commandValidateToken);
try {
jcommander.parse(args);
if (arguments.help || jcommander.getParsedCommand() == null) {
jcommander.usage();
System.exit(1);
}
} catch (Exception e) {
jcommander.usage();
System.err.println(e);
System.exit(1);
}
String cmd = jcommander.getParsedCommand();
if (cmd.equals("create-secret-key")) {
commandCreateSecretKey.run();
} else if (cmd.equals("create-key-pair")) {
commandCreateKeyPair.run();
} else if (cmd.equals("create")) {
commandCreateToken.run();
} else if (cmd.equals("show")) {
commandShowToken.run();
} else if (cmd.equals("validate")) {
commandValidateToken.run();
} else {
System.err.println("Invalid command: " + cmd);
System.exit(1);
}
}
}
| [
"\"TOKEN\"",
"\"TOKEN\"",
"\"TOKEN\"",
"\"TOKEN\""
]
| []
| [
"TOKEN"
]
| [] | ["TOKEN"] | java | 1 | 0 | |
main.go | /*
* Copyright 2013-2016 Fabian Groffen
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"bufio"
"expvar"
"flag"
"fmt"
"io"
"log"
"net"
"net/http"
_ "net/http/pprof"
"os"
"regexp"
"runtime"
"strconv"
"strings"
"time"
cfg "github.com/alyu/configparser"
"github.com/dgryski/carbonzipper/mlog"
"github.com/dgryski/httputil"
whisper "github.com/grobian/go-whisper"
"github.com/peterbourgon/g2g"
)
var config = struct {
WhisperData string
GraphiteHost string
}{
WhisperData: "/var/lib/carbon/whisper",
}
// grouped expvars for /debug/vars and graphite
var Metrics = struct {
MetricsReceived *expvar.Int
}{
MetricsReceived: expvar.NewInt("metrics_received"),
}
var BuildVersion = "(development build)"
var logger mlog.Level
func handleConnection(conn net.Conn, schemas []*StorageSchema, aggrs []*StorageAggregation, whiteRegexps []*regexp.Regexp, blackRegexps []*regexp.Regexp) {
bufconn := bufio.NewReader(conn)
defer func() {
conn.Close()
if r := recover(); r != nil {
logger.Logf("recovering from whisper panic:", r)
}
}()
for {
line, err := bufconn.ReadString('\n')
if err != nil {
if err != io.EOF {
logger.Logf("read failed: %s", err.Error())
}
return
}
elems := strings.Split(line, " ")
if len(elems) != 3 {
logger.Logf("invalid line: %s", line)
continue
}
metric := elems[0]
if metric == "" {
logger.Logf("invalid line: %s", line)
continue
}
if !metricNameIsValid([]byte(metric), whiteRegexps, blackRegexps) {
continue
}
value, err := strconv.ParseFloat(elems[1], 64)
if err != nil {
logger.Logf("invalue value '%s': %s", elems[1], err.Error())
continue
}
elems[2] = strings.TrimRight(elems[2], "\n")
tsf, err := strconv.ParseFloat(elems[2], 64)
if err != nil {
logger.Logf("invalid timestamp '%s': %s", elems[2], err.Error())
continue
}
ts := int(tsf)
if ts == 0 {
logger.Logf("invalid timestamp (0): %s", line)
continue
}
logger.Debugf("metric: %s, value: %f, ts: %d", metric, value, ts)
// do what we want to do
path := config.WhisperData + "/" + strings.Replace(metric, ".", "/", -1) + ".wsp"
w, err := whisper.Open(path)
if err != nil && os.IsNotExist(err) {
w = createMetric([]byte(metric), path, schemas, aggrs)
if w == nil {
continue
}
} else if err != nil {
// some other error
logger.Logf("failed to open whisper file %s: %v", path, err)
continue
}
err = w.Update(value, int(ts))
if err != nil {
logger.Logf("failed to update whisper file %s: %v", path, err)
}
w.Close()
Metrics.MetricsReceived.Add(1)
}
}
func createMetric(metric []byte, path string, schemas []*StorageSchema, aggrs []*StorageAggregation) *whisper.Whisper {
var schema *StorageSchema
for _, s := range schemas {
if s.pattern.Match(metric) {
schema = s
break
}
}
if schema == nil {
logger.Logf("no storage schema defined for %s", metric)
return nil
}
logger.Debugf("%s: found schema: %s", metric, schema.name)
var aggr *StorageAggregation
for _, a := range aggrs {
if a.pattern.Match(metric) {
aggr = a
break
}
}
// http://graphite.readthedocs.org/en/latest/config-carbon.html#storage-aggregation-conf
aggrName := "(default)"
aggrStr := "average"
aggrType := whisper.Average
xfilesf := float32(0.5)
if aggr != nil {
aggrName = aggr.name
aggrStr = aggr.aggregationMethodStr
aggrType = aggr.aggregationMethod
xfilesf = float32(aggr.xFilesFactor)
}
logger.Logf("creating %s: %s, retention: %s (section %s), aggregationMethod: %s, xFilesFactor: %f (section %s)",
metric, path, schema.retentionStr, schema.name,
aggrStr, xfilesf, aggrName)
// whisper.Create doesn't mkdir, so let's do it ourself
lastslash := strings.LastIndex(path, "/")
if lastslash != -1 {
dir := path[0:lastslash]
err := os.MkdirAll(dir, os.ModeDir|os.ModePerm)
if err != nil {
logger.Logf("error during mkdir(%q): %v\n", dir, err)
return nil
}
}
w, err := whisper.Create(path, schema.retentions, aggrType, xfilesf)
if err != nil {
logger.Logf("failed to create new whisper file %s: %v", path, err)
return nil
}
return w
}
func listenAndServe(listen string, schemas []*StorageSchema, aggrs []*StorageAggregation, whiteRegexps []*regexp.Regexp, blackRegexps []*regexp.Regexp) {
l, err := net.Listen("tcp", listen)
if err != nil {
logger.Logf("failed to listen on %s: %s", listen, err.Error())
os.Exit(1)
}
defer l.Close()
for {
conn, err := l.Accept()
if err != nil {
logger.Logf("failed to accept connection: %s", err.Error())
continue
}
go handleConnection(conn, schemas, aggrs, whiteRegexps, blackRegexps)
}
}
type StorageSchema struct {
name string
pattern *regexp.Regexp
retentionStr string
retentions whisper.Retentions
}
func readStorageSchemas(file string) ([]*StorageSchema, error) {
config, err := cfg.Read(file)
if err != nil {
return nil, err
}
sections, err := config.AllSections()
if err != nil {
return nil, err
}
var ret []*StorageSchema
for _, s := range sections {
var sschema StorageSchema
// this is mildly stupid, but I don't feel like forking
// configparser just for this
sschema.name =
strings.Trim(strings.SplitN(s.String(), "\n", 2)[0], " []")
if sschema.name == "" {
continue
}
sschema.pattern, err = regexp.Compile(s.ValueOf("pattern"))
if err != nil {
logger.Logf("failed to parse pattern '%s'for [%s]: %s",
s.ValueOf("pattern"), sschema.name, err.Error())
continue
}
sschema.retentionStr = s.ValueOf("retentions")
sschema.retentions, err = whisper.ParseRetentionDefs(sschema.retentionStr)
logger.Debugf("adding schema [%s] pattern = %s retentions = %s",
sschema.name, s.ValueOf("pattern"), sschema.retentionStr)
ret = append(ret, &sschema)
}
return ret, nil
}
type StorageAggregation struct {
name string
pattern *regexp.Regexp
xFilesFactor float64
aggregationMethodStr string
aggregationMethod whisper.AggregationMethod
}
func readStorageAggregations(file string) ([]*StorageAggregation, error) {
config, err := cfg.Read(file)
if err != nil {
return nil, err
}
sections, err := config.AllSections()
if err != nil {
return nil, err
}
var ret []*StorageAggregation
for _, s := range sections {
var saggr StorageAggregation
// this is mildly stupid, but I don't feel like forking
// configparser just for this
saggr.name =
strings.Trim(strings.SplitN(s.String(), "\n", 2)[0], " []")
if saggr.name == "" {
continue
}
saggr.pattern, err = regexp.Compile(s.ValueOf("pattern"))
if err != nil {
logger.Logf("failed to parse pattern '%s'for [%s]: %s",
s.ValueOf("pattern"), saggr.name, err.Error())
continue
}
saggr.xFilesFactor, err = strconv.ParseFloat(s.ValueOf("xFilesFactor"), 64)
if err != nil {
logger.Logf("failed to parse xFilesFactor '%s' in %s: %s",
s.ValueOf("xFilesFactor"), saggr.name, err.Error())
continue
}
saggr.aggregationMethodStr = s.ValueOf("aggregationMethod")
switch saggr.aggregationMethodStr {
case "average", "avg":
saggr.aggregationMethod = whisper.Average
case "sum":
saggr.aggregationMethod = whisper.Sum
case "last":
saggr.aggregationMethod = whisper.Last
case "max":
saggr.aggregationMethod = whisper.Max
case "min":
saggr.aggregationMethod = whisper.Min
default:
logger.Logf("unknown aggregation method '%s'",
s.ValueOf("aggregationMethod"))
continue
}
logger.Debugf("adding aggregation [%s] pattern = %s aggregationMethod = %s xFilesFactor = %f",
saggr.name, s.ValueOf("pattern"),
saggr.aggregationMethodStr, saggr.xFilesFactor)
ret = append(ret, &saggr)
}
return ret, nil
}
func readRegexpList(file string) []*regexp.Regexp {
f, err := os.Open(file)
if err != nil {
logger.Logf("Warning: %s", string(err.Error()))
return nil
}
defer f.Close()
regexps := make([]*regexp.Regexp, 0)
reader := bufio.NewReader(f)
for str := ""; err == nil; str, err = reader.ReadString('\n') {
// Ignore empty lines in file
if str == "" || str == "\n" {
continue
}
regexps = append(regexps, regexp.MustCompile(strings.Replace(str, "\n", "", -1)))
}
return regexps
}
func metricNameIsValid(metric []byte, whiteRegexps []*regexp.Regexp, blackRegexps []*regexp.Regexp) bool {
// We need to check if our metric matches compiled regexp from whitelist file
metricIsWhitelisted := false
for _, reg := range whiteRegexps {
if reg.Match(metric) {
metricIsWhitelisted = true
break
}
}
if len(whiteRegexps) != 0 && !metricIsWhitelisted {
logger.Logf("metric '%s' does not match whitelist regexp", metric)
return false
}
for _, reg := range blackRegexps {
if reg.Match(metric) {
logger.Logf("metric '%s' matches blacklist regexp '%s'", metric, reg)
return false
}
}
return true
}
func main() {
port := flag.Int("p", 2003, "port to bind to")
reportport := flag.Int("reportport", 8080, "port to bind http report interface to")
verbose := flag.Bool("v", false, "enable verbose logging")
debug := flag.Bool("vv", false, "enable more verbose (debug) logging")
whisperdata := flag.String("w", config.WhisperData, "location where whisper files are stored")
maxprocs := flag.Int("maxprocs", runtime.NumCPU()*80/100, "GOMAXPROCS")
logdir := flag.String("logdir", "/var/log/carbonwriter/", "logging directory")
schemafile := flag.String("schemafile", "/etc/carbon/storage-schemas.conf", "storage-schemas.conf location")
aggrfile := flag.String("aggrfile", "/etc/carbon/storage-aggregation.conf", "storage-aggregation.conf location")
whitelistfile := flag.String("whitefile", "/etc/carbon/whitelist.conf", "whitelist.conf location")
blacklistfile := flag.String("blackfile", "/etc/carbon/blacklist.conf", "blacklist.conf location")
logtostdout := flag.Bool("stdout", false, "log also to stdout")
flag.Parse()
mlog.SetOutput(*logdir, "carbonwriter", *logtostdout)
expvar.NewString("BuildVersion").Set(BuildVersion)
log.Println("starting carbonwriter", BuildVersion)
loglevel := mlog.Normal
if *verbose {
loglevel = mlog.Debug
}
if *debug {
loglevel = mlog.Trace
}
logger = mlog.Level(loglevel)
schemas, err := readStorageSchemas(*schemafile)
if err != nil {
logger.Logf("failed to read %s: %s", *schemafile, err.Error())
os.Exit(1)
}
aggrs, err := readStorageAggregations(*aggrfile)
if err != nil {
logger.Logf("failed to read %s: %s", *aggrfile, err.Error())
os.Exit(1)
}
whiteRegexps := readRegexpList(*whitelistfile)
blackRegexps := readRegexpList(*blacklistfile)
config.WhisperData = strings.TrimRight(*whisperdata, "/")
logger.Logf("writing whisper files to: %s", config.WhisperData)
logger.Logf("reading storage schemas from: %s", *schemafile)
logger.Logf("reading aggregation rules from: %s", *aggrfile)
runtime.GOMAXPROCS(*maxprocs)
logger.Logf("set GOMAXPROCS=%d", *maxprocs)
httputil.PublishTrackedConnections("httptrack")
// nothing in the config? check the environment
if config.GraphiteHost == "" {
if host := os.Getenv("GRAPHITEHOST") + ":" + os.Getenv("GRAPHITEPORT"); host != ":" {
config.GraphiteHost = host
}
}
// only register g2g if we have a graphite host
if config.GraphiteHost != "" {
logger.Logf("Using graphite host %v", config.GraphiteHost)
// register our metrics with graphite
graphite := g2g.NewGraphite(config.GraphiteHost, 60*time.Second, 10*time.Second)
hostname, _ := os.Hostname()
hostname = strings.Replace(hostname, ".", "_", -1)
graphite.Register(fmt.Sprintf("carbon.writer.%s.metricsReceived", hostname), Metrics.MetricsReceived)
}
listen := fmt.Sprintf(":%d", *port)
httplisten := fmt.Sprintf(":%d", *reportport)
logger.Logf("listening on %s, statistics via %s", listen, httplisten)
go listenAndServe(listen, schemas, aggrs, whiteRegexps, blackRegexps)
err = http.ListenAndServe(httplisten, nil)
if err != nil {
log.Fatalf("%s", err)
}
logger.Logf("stopped")
}
| [
"\"GRAPHITEHOST\"",
"\"GRAPHITEPORT\""
]
| []
| [
"GRAPHITEHOST",
"GRAPHITEPORT"
]
| [] | ["GRAPHITEHOST", "GRAPHITEPORT"] | go | 2 | 0 | |
app.go | package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
)
func handler(w http.ResponseWriter, r *http.Request) {
log.Print("Simple app running...")
msg := os.Getenv("SIMPLE_MSG")
if msg == "" {
msg = ":( SIMPLE_MSG variable not defined"
}
fmt.Fprintf(w, "<h1>KNative Simple</h1><h2>%s</h2>", msg)
}
func main() {
flag.Parse()
log.Print("Simple app server started...")
http.HandleFunc("/", handler)
http.ListenAndServe(":8080", nil)
}
| [
"\"SIMPLE_MSG\""
]
| []
| [
"SIMPLE_MSG"
]
| [] | ["SIMPLE_MSG"] | go | 1 | 0 | |
python-build/python-libs/ase/android.py | # Copyright (C) 2017 shimoda [email protected]
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from __future__ import print_function
__author__ = 'Damon Kohler <[email protected]>'
import sys
import time
import collections
import json
import os
import socket
from logging import warning as warn
PORT = os.environ.get('AP_PORT')
HOST = os.environ.get('AP_HOST')
HANDSHAKE = os.environ.get('AP_HANDSHAKE')
Result = collections.namedtuple('Result', 'id,result,error')
class Android(object):
def __init__(self, addr=None):
if addr is None:
addr = HOST, PORT
if True:
try:
self.conn = socket.create_connection(addr)
except:
self.conn = self.launchSL4A(addr)
if sys.version_info[0] == 2:
self.client = self.conn.makefile('rw')
else:
self.client = self.conn.makefile('rw', encoding='utf-8')
self.id = 0
if HANDSHAKE is not None:
self._authenticate(HANDSHAKE)
def _rpc(self, method, *args):
data = {'id': self.id,
'method': method,
'params': args}
request = json.dumps(data)
self.client.write(request+'\n')
self.client.flush()
response = self.client.readline()
self.id += 1
result = json.loads(response)
if result['error'] is not None:
print(result['error'])
# namedtuple doesn't work with unicode keys.
return Result(id=result['id'], result=result['result'],
error=result['error'], )
def __getattr__(self, name):
def rpc_call(*args):
return self._rpc(name, *args)
return rpc_call
if True:
def launchSL4A(self, addr):
if addr[0] is None:
addr = ("127.0.0.1", addr[1])
if addr[1] is None:
addr = (addr[0], "8888")
sl4a = 'com.googlecode.android_scripting'
cmd = ('am start -a %s.action.LAUNCH_SERVER '
'--ei %s.extra.USE_SERVICE_PORT %s '
'%s/.activity.ScriptingLayerServiceLauncher '
% (sl4a, sl4a, addr[1], sl4a))
warn("launch SL4A with %s" % str(addr))
os.system(cmd)
time.sleep(2)
return socket.create_connection(addr)
# vi: et:ts=4:nowrap
| []
| []
| [
"AP_HOST",
"AP_PORT",
"AP_HANDSHAKE"
]
| [] | ["AP_HOST", "AP_PORT", "AP_HANDSHAKE"] | python | 3 | 0 | |
python/lax_sod_data.py | import os
import numpy as np
def get_lax_sod_network():
return [12, 12, 10, 12, 10, 12, 10, 10, 12,1]
def get_lax_sod_data_inner():
data_path = os.environ.get("LAX_SOD_REPO_PATH", "../lax_sod_tube")
qmc_points = np.loadtxt(os.path.join(data_path, "parameters/parameters_sobol_X.txt"))
forces = np.loadtxt(os.path.join(data_path, "functionals/average_functionals_sobol_2048.txt"))
data_per_func = {}
force_names = [*[f'q{k+1}' for k in range(3)],
*[f'EK{k+1}' for k in range(3)]]
for n, force_name in enumerate(force_names):
data_per_func[force_name] = forces[:, n]
return qmc_points, data_per_func
def get_lax_sod_data():
qmc_points, qmc_values = get_lax_sod_data_inner()
mc_params, mc_values = get_lax_sod_data_mc_inner()
return qmc_points, qmc_values, mc_params, mc_values
def get_lax_sod_data_mc_inner():
data_path = os.environ.get("LAX_SOD_REPO_PATH", "../lax_sod_tube")
mc_points = np.loadtxt(os.path.join(data_path, "parameters/parameters_mc_X.txt"))
forces = np.loadtxt(os.path.join(data_path, "functionals/average_functionals_mc_2048.txt"))
data_per_func = {}
force_names = [*[f'q{k+1}' for k in range(3)],
*[f'EK{k+1}' for k in range(3)]]
for n, force_name in enumerate(force_names):
data_per_func[force_name] = forces[:, n]
return mc_points, data_per_func
def get_lax_sod_data_mc():
mc_params, mc_values = get_lax_sod_data_mc_inner()
qmc_params, qmc_values = get_lax_sod_data_inner()
return mc_params, mc_values, qmc_params, qmc_values
def make_folders():
folders = ['img', 'img_tikz', 'tables', 'results']
for folder in folders:
if not os.path.exists(folder):
os.mkdir(folder)
| []
| []
| [
"LAX_SOD_REPO_PATH"
]
| [] | ["LAX_SOD_REPO_PATH"] | python | 1 | 0 | |
model/security.go | // Copyright 2015 mokey Authors. All rights reserved.
// Use of this source code is governed by a BSD style
// license that can be found in the LICENSE file.
package model
import (
log "github.com/Sirupsen/logrus"
"github.com/jmoiron/sqlx"
"golang.org/x/crypto/bcrypt"
)
type SecurityAnswer struct {
UserName string `db:"user_name"`
QuestionId int `db:"question_id"`
Question string `db:"question"`
Answer string `db:"answer"`
}
type SecurityQuestion struct {
Id int `db:"id"`
Question string `db:"question"`
}
// Verify security answer
func (a *SecurityAnswer) Verify(ans string) bool {
err := bcrypt.CompareHashAndPassword([]byte(a.Answer), []byte(ans))
if err != nil {
return false
}
return true
}
func FetchAnswer(db *sqlx.DB, uid string) (*SecurityAnswer, error) {
answer := SecurityAnswer{}
err := db.Get(&answer, "select a.user_name,a.question_id,q.question,a.answer from security_answer a join security_question q on a.question_id = q.id where a.user_name = ?", uid)
if err != nil {
return nil, err
}
return &answer, nil
}
func StoreAnswer(db *sqlx.DB, user, ans string, qid int) error {
hash, err := bcrypt.GenerateFromPassword([]byte(ans), bcrypt.DefaultCost)
if err != nil {
log.WithFields(log.Fields{
"uid": user,
"error": err.Error(),
}).Error("failed to generate bcrypt hash of answer")
return err
}
sa := &SecurityAnswer{
UserName: user,
QuestionId: qid,
Answer: string(hash)}
_, err = db.NamedExec("replace into security_answer (user_name,question_id,answer,created_at) values (:user_name, :question_id, :answer, now())", sa)
if err != nil {
return err
}
return nil
}
func FetchQuestions(db *sqlx.DB) ([]*SecurityQuestion, error) {
questions := []*SecurityQuestion{}
err := db.Select(&questions, "select id,question from security_question")
if err != nil {
return nil, err
}
return questions, nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gramproject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tensorflow/python/keras/backend.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=redefined-builtin
"""Keras backend API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import os
import sys
import threading
import weakref
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import tf2
from tensorflow.python.client import session as session_module
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tfdev
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend_config
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn as map_fn_lib
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import moving_averages
from tensorflow.python.training.tracking import util as tracking_util
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import keras_export
py_all = all
py_sum = sum
py_any = any
# INTERNAL UTILS
# The internal graph maintained by Keras and used by the symbolic Keras APIs
# while executing eagerly (such as the functional API for model-building).
_GRAPH = None
# A graph which is used for constructing functions in eager mode.
_CURRENT_SCRATCH_GRAPH = None
# This is a thread local object that will hold the default internal TF session
# used by Keras. It can be set manually via `set_session(sess)`.
_SESSION = threading.local()
# _DUMMY_EAGER_GRAPH.key is used as a key in _GRAPH_LEARNING_PHASES.
# We keep a separate reference to it to make sure it does not get removed from
# _GRAPH_LEARNING_PHASES.
# _DummyEagerGraph inherits from threading.local to make its `key` attribute
# thread local. This is needed to make set_learning_phase affect only the
# current thread during eager execution (see b/123096885 for more details).
class _DummyEagerGraph(threading.local):
"""_DummyEagerGraph provides a thread local `key` attribute.
We can't use threading.local directly, i.e. without subclassing, because
gevent monkey patches threading.local and its version does not support
weak references.
"""
class _WeakReferencableClass(object):
"""This dummy class is needed for two reasons.
- We need something that supports weak references. Basic types like string
and ints don't.
- We need something whose hash and equality are based on object identity
to make sure they are treated as different keys to _GRAPH_LEARNING_PHASES.
An empty Python class satisfies both of these requirements.
"""
pass
def __init__(self):
# Constructors for classes subclassing threading.local run once
# per thread accessing something in the class. Thus, each thread will
# get a different key.
super(_DummyEagerGraph, self).__init__()
self.key = _DummyEagerGraph._WeakReferencableClass()
_DUMMY_EAGER_GRAPH = _DummyEagerGraph()
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# This list holds the available devices.
# It is populated when `_get_available_gpus()` is called for the first time.
# We assume our devices don't change henceforth.
_LOCAL_DEVICES = None
# The below functions are kept accessible from backend for compatibility.
epsilon = backend_config.epsilon
floatx = backend_config.floatx
image_data_format = backend_config.image_data_format
set_epsilon = backend_config.set_epsilon
set_floatx = backend_config.set_floatx
set_image_data_format = backend_config.set_image_data_format
@keras_export('keras.backend.backend')
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return 'tensorflow'
@keras_export('keras.backend.cast_to_floatx')
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Arguments:
x: Numpy array or TensorFlow tensor.
Returns:
The same array (Numpy array if `x` was a Numpy array, or TensorFlow tensor
if `x` was a tensor), cast to its new type.
Example:
>>> tf.keras.backend.floatx()
'float32'
>>> arr = np.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = cast_to_floatx(arr)
>>> new_arr
array([1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
"""
if isinstance(x, (ops.Tensor,
variables_module.Variable,
sparse_tensor.SparseTensor)):
return math_ops.cast(x, dtype=floatx())
return np.asarray(x, dtype=floatx())
# A global dictionary mapping graph objects to an index of counters used
# for various layer/optimizer names in each graph.
# Allows to give unique autogenerated names to layers, in a graph-specific way.
PER_GRAPH_OBJECT_NAME_UIDS = weakref.WeakKeyDictionary()
@keras_export('keras.backend.get_uid')
def get_uid(prefix=''):
"""Associates a string prefix with an integer counter in a TensorFlow graph.
Arguments:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
>>> get_uid('dense')
1
>>> get_uid('dense')
2
"""
graph = get_graph()
if graph not in PER_GRAPH_OBJECT_NAME_UIDS:
PER_GRAPH_OBJECT_NAME_UIDS[graph] = collections.defaultdict(int)
layer_name_uids = PER_GRAPH_OBJECT_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
@keras_export('keras.backend.reset_uids')
def reset_uids():
"""Resets graph identifiers.
"""
PER_GRAPH_OBJECT_NAME_UIDS.clear()
@keras_export('keras.backend.clear_session')
def clear_session():
"""Resets all state generated by Keras.
Keras manages a global state, which it uses to implement the Functional
model-building API and to uniquify autogenerated layer names.
If you are creating many models in a loop, this global state will consume
an increasing amount of memory over time, and you may want to clear it.
Calling `clear_session()` releases the global state: this helps avoid clutter
from old models and layers, especially when memory is limited.
Example 1: calling `clear_session()` when creating models in a loop
```python
for _ in range(100):
# Without `clear_session()`, each iteration of this loop will
# slightly increase the size of the global state managed by Keras
model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)])
for _ in range(100):
# With `clear_session()` called at the beginning,
# Keras starts with a blank state at each iteration
# and memory consumption is constant over time.
tf.keras.backend.clear_session()
model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)])
```
Example 2: resetting the layer name generation counter
>>> import tensorflow as tf
>>> layers = [tf.keras.layers.Dense(10) for _ in range(10)]
>>> new_layer = tf.keras.layers.Dense(10)
>>> print(new_layer.name)
dense_10
>>> tf.keras.backend.set_learning_phase(1)
>>> print(tf.keras.backend.learning_phase())
1
>>> tf.keras.backend.clear_session()
>>> new_layer = tf.keras.layers.Dense(10)
>>> print(new_layer.name)
dense
"""
global _SESSION
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned
global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned
global _GRAPH
global _FREEZABLE_VARS
_GRAPH = None
ops.reset_default_graph()
reset_uids()
_SESSION.session = None
graph = get_graph()
with graph.as_default():
_GRAPH_LEARNING_PHASES.clear()
# Create the learning phase placeholder in graph using the default factory.
_GRAPH_LEARNING_PHASES.setdefault(graph)
_GRAPH_VARIABLES.pop(graph, None)
_GRAPH_TF_OPTIMIZERS.pop(graph, None)
_FREEZABLE_VARS.pop(graph, None)
@keras_export('keras.backend.manual_variable_initialization')
def manual_variable_initialization(value):
"""Sets the manual variable initialization flag.
This boolean flag determines whether
variables should be initialized
as they are instantiated (default), or if
the user should handle the initialization
(e.g. via `tf.compat.v1.initialize_all_variables()`).
Arguments:
value: Python boolean.
"""
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
@keras_export('keras.backend.learning_phase')
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer).
"""
graph = ops.get_default_graph()
if graph is _GRAPH:
# Don't enter an init_scope for the learning phase if eager execution
# is enabled but we're inside the Keras workspace graph.
learning_phase = symbolic_learning_phase()
else:
with ops.init_scope():
# We always check & set the learning phase inside the init_scope,
# otherwise the wrong default_graph will be used to look up the learning
# phase inside of functions & defuns.
#
# This is because functions & defuns (both in graph & in eager mode)
# will always execute non-eagerly using a function-specific default
# subgraph.
learning_phase = _GRAPH_LEARNING_PHASES[None]
_mark_func_graph_as_unsaveable(graph, learning_phase)
return learning_phase
def global_learning_phase_is_set():
return _DUMMY_EAGER_GRAPH.key in _GRAPH_LEARNING_PHASES
def _mark_func_graph_as_unsaveable(graph, learning_phase):
"""Mark func graph as unsaveable due to use of symbolic keras learning phase.
Functions that capture the symbolic learning phase cannot be exported to
SavedModel. Mark the funcgraph as unsaveable, so that an error will be raised
if it is exported.
Args:
graph: Graph or FuncGraph object.
learning_phase: Learning phase placeholder or int defined in the graph.
"""
if graph.building_function and is_placeholder(learning_phase):
graph.mark_as_unsaveable(
'The keras learning phase placeholder was used inside a function. '
'Exporting placeholders is not supported when saving out a SavedModel. '
'Please call `tf.keras.backend.set_learning_phase(0)` in the function '
'to set the learning phase to a constant value.')
def symbolic_learning_phase():
graph = get_graph()
with graph.as_default():
return _GRAPH_LEARNING_PHASES[graph]
def _default_learning_phase():
if context.executing_eagerly():
return 0
else:
with name_scope(''):
return array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
@keras_export('keras.backend.set_learning_phase')
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
The backend learning phase affects any code that calls
`backend.learning_phase()`
In particular, all Keras built-in layers use the learning phase as the default
for the `training` arg to `Layer.__call__`.
User-written layers and models can achieve the same behavior with code that
looks like:
```python
def call(self, inputs, training=None):
if training is None:
training = backend.learning_phase()
```
Arguments:
value: Learning phase value, either 0 or 1 (integers).
0 = test, 1 = train
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
# In an eager context, the learning phase values applies to both the eager
# context and the internal Keras graph.
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = value
_GRAPH_LEARNING_PHASES[get_graph()] = value
@keras_export('keras.backend.learning_phase_scope')
@tf_contextlib.contextmanager
def learning_phase_scope(value):
"""Provides a scope within which the learning phase is equal to `value`.
The learning phase gets restored to its original value upon exiting the scope.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
0 = test, 1 = train
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
previous_eager_value = _GRAPH_LEARNING_PHASES.get(
_DUMMY_EAGER_GRAPH.key, None)
previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None)
try:
set_learning_phase(value)
yield
finally:
# Restore learning phase to initial value.
with ops.init_scope():
if context.executing_eagerly():
if previous_eager_value is not None:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = previous_eager_value
elif _DUMMY_EAGER_GRAPH.key in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key]
graph = get_graph()
if previous_graph_value is not None:
_GRAPH_LEARNING_PHASES[graph] = previous_graph_value
elif graph in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[graph]
@tf_contextlib.contextmanager
def eager_learning_phase_scope(value):
"""Internal scope that sets the learning phase in eager / tf.function only.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
0 = test, 1 = train
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
assert value in {0, 1}
assert ops.executing_eagerly_outside_functions()
global_learning_phase_was_set = global_learning_phase_is_set()
if global_learning_phase_was_set:
previous_value = learning_phase()
try:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = value
yield
finally:
# Restore learning phase to initial value or unset.
if global_learning_phase_was_set:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = previous_value
else:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key]
def _current_graph(op_input_list):
"""Return the graph members of `op_input_list`, or the current graph."""
return ops._get_graph_from_inputs(op_input_list)
def _get_session(op_input_list=()):
"""Returns the session object for the current thread."""
global _SESSION
default_session = ops.get_default_session()
if default_session is not None:
session = default_session
else:
if ops.inside_function():
raise RuntimeError('Cannot get session inside Tensorflow graph function.')
# If we don't have a session, or that session does not match the current
# graph, create and cache a new session.
if (getattr(_SESSION, 'session', None) is None or
_SESSION.session.graph is not _current_graph(op_input_list)):
# If we are creating the Session inside a tf.distribute.Strategy scope,
# we ask the strategy for the right session options to use.
if distribution_strategy_context.has_strategy():
configure_and_create_distributed_session(
distribution_strategy_context.get_strategy())
else:
_SESSION.session = session_module.Session(
config=get_default_session_config())
session = _SESSION.session
return session
@keras_export(v1=['keras.backend.get_session'])
def get_session(op_input_list=()):
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session assuming it matches
the current graph.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Arguments:
op_input_list: An option sequence of tensors or ops, which will be used
to determine the current graph. Otherwise the default graph will be
used.
Returns:
A TensorFlow session.
"""
session = _get_session(op_input_list)
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
# Inject the get_session function to tracking_util to avoid the backward
# dependency from TF to Keras.
tracking_util.register_session_provider(get_session)
def get_graph():
if context.executing_eagerly():
global _GRAPH
if _GRAPH is None:
_GRAPH = func_graph.FuncGraph('keras_graph')
return _GRAPH
else:
return ops.get_default_graph()
@tf_contextlib.contextmanager
def _scratch_graph(graph=None):
"""Retrieve a shared and temporary func graph.
The eager execution path lifts a subgraph from the keras global graph into
a scratch graph in order to create a function. DistributionStrategies, in
turn, constructs multiple functions as well as a final combined function. In
order for that logic to work correctly, all of the functions need to be
created on the same scratch FuncGraph.
Args:
graph: A graph to be used as the current scratch graph. If not set then
a scratch graph will either be retrieved or created:
Yields:
The current scratch graph.
"""
global _CURRENT_SCRATCH_GRAPH
if (_CURRENT_SCRATCH_GRAPH is not None and graph is not None and
_CURRENT_SCRATCH_GRAPH is not graph):
raise ValueError('Multiple scratch graphs specified.')
if _CURRENT_SCRATCH_GRAPH:
yield _CURRENT_SCRATCH_GRAPH
return
graph = graph or func_graph.FuncGraph('keras_scratch_graph')
try:
_CURRENT_SCRATCH_GRAPH = graph
yield graph
finally:
_CURRENT_SCRATCH_GRAPH = None
@keras_export(v1=['keras.backend.set_session'])
def set_session(session):
"""Sets the global TensorFlow session.
Arguments:
session: A TF Session.
"""
global _SESSION
_SESSION.session = session
def get_default_session_config():
if os.environ.get('OMP_NUM_THREADS'):
logging.warning(
'OMP_NUM_THREADS is no longer used by the default Keras config. '
'To configure the number of threads, use tf.config.threading APIs.')
config = context.context().config
config.allow_soft_placement = True
return config
def get_default_graph_uid_map():
graph = ops.get_default_graph()
name_uid_map = PER_GRAPH_OBJECT_NAME_UIDS.get(graph, None)
if name_uid_map is None:
name_uid_map = collections.defaultdict(int)
PER_GRAPH_OBJECT_NAME_UIDS[graph] = name_uid_map
return name_uid_map
# DEVICE MANIPULATION
class _TfDeviceCaptureOp(object):
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
if tfdev.is_device_spec(device):
device = device.to_string()
self.device = device
def _set_device_from_string(self, device_str):
self.device = device_str
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
graph = get_graph()
op = _TfDeviceCaptureOp()
graph._apply_device_functions(op)
return tfdev.DeviceSpec.from_string(op.device)
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set on the device type specified.
Arguments:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on the
device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available gpu devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
if ops.executing_eagerly_outside_functions():
# Returns names of devices directly.
return [d.name for d in config.list_logical_devices('GPU')]
global _LOCAL_DEVICES
if _LOCAL_DEVICES is None:
_LOCAL_DEVICES = get_session().list_devices()
return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _constant_to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
This is slightly faster than the _to_tensor function, at the cost of
handling fewer cases.
Arguments:
x: An object to be converted (numpy arrays, floats, ints and lists of
them).
dtype: The destination type.
Returns:
A tensor.
"""
return constant_op.constant(x, dtype=dtype)
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Arguments:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return ops.convert_to_tensor_v2(x, dtype=dtype)
@keras_export('keras.backend.is_sparse')
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
Arguments:
tensor: A tensor instance.
Returns:
A boolean.
Example:
>>> a = tf.keras.backend.placeholder((2, 2), sparse=False)
>>> print(tf.keras.backend.is_sparse(a))
False
>>> b = tf.keras.backend.placeholder((2, 2), sparse=True)
>>> print(tf.keras.backend.is_sparse(b))
True
"""
return isinstance(tensor, sparse_tensor.SparseTensor)
@keras_export('keras.backend.to_dense')
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
Arguments:
tensor: A tensor instance (potentially sparse).
Returns:
A dense tensor.
Examples:
>>> b = tf.keras.backend.placeholder((2, 2), sparse=True)
>>> print(tf.keras.backend.is_sparse(b))
True
>>> c = tf.keras.backend.to_dense(b)
>>> print(tf.keras.backend.is_sparse(c))
False
"""
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
@keras_export('keras.backend.name_scope', v1=[])
def name_scope(name):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
def my_op(a):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
# Define some computation that uses `a`.
return foo_op(..., name=scope)
When executed, the Tensor `a` will have the name `MyOp/a`.
Args:
name: The prefix to use on all names created within the name scope.
Returns:
Name scope context manager.
"""
return ops.name_scope_v2(name)
# Export V1 version.
keras_export(v1=['keras.backend.name_scope'])(ops.name_scope_v1)
@keras_export('keras.backend.variable')
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
Arguments:
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
Returns:
A variable instance (with Keras metadata included).
Examples:
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val, dtype='float64',
... name='example_var')
>>> tf.keras.backend.dtype(kvar)
'float64'
>>> print(kvar)
<tf.Variable 'example_var:...' shape=(2, 2) dtype=float64, numpy=
array([[1., 2.],
[3., 4.]])>
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
return v
v = variables_module.Variable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'shape'):
v._keras_shape = int_shape(value)
track_variable(v)
return v
def track_tf_optimizer(tf_optimizer):
"""Tracks the given TF optimizer for initialization of its variables."""
if context.executing_eagerly():
return
optimizers = _GRAPH_TF_OPTIMIZERS[None]
optimizers.add(tf_optimizer)
def track_variable(v):
"""Tracks the given variable for initialization."""
if context.executing_eagerly():
return
graph = v.graph if hasattr(v, 'graph') else get_graph()
_GRAPH_VARIABLES[graph].add(v)
def unique_object_name(name,
name_uid_map=None,
avoid_names=None,
namespace='',
zero_based=False):
"""Makes a object name (or arbitrary string) unique within a TensorFlow graph.
Arguments:
name: String name to make unique.
name_uid_map: An optional defaultdict(int) to use when creating unique
names. If None (default), uses a per-Graph dictionary.
avoid_names: An optional set or dict with names which should not be used. If
None (default) does not avoid any names.
namespace: Gets a name which is unique within the (graph, namespace). Layers
which are not Networks use a blank namespace and so get graph-global
names.
zero_based: If True, name sequences start with no suffix (e.g. "dense",
"dense_1"). If False, naming is one-based ("dense_1", "dense_2").
Returns:
Unique string name.
Example:
unique_object_name('dense') # dense_1
unique_object_name('dense') # dense_2
"""
if name_uid_map is None:
name_uid_map = get_default_graph_uid_map()
if avoid_names is None:
avoid_names = set()
proposed_name = None
while proposed_name is None or proposed_name in avoid_names:
name_key = (namespace, name)
if zero_based:
number = name_uid_map[name_key]
if number:
proposed_name = name + '_' + str(number)
else:
proposed_name = name
name_uid_map[name_key] += 1
else:
name_uid_map[name_key] += 1
proposed_name = name + '_' + str(name_uid_map[name_key])
return proposed_name
def _get_variables(graph=None):
"""Returns variables corresponding to the given graph for initialization."""
assert not context.executing_eagerly()
variables = _GRAPH_VARIABLES[graph]
for opt in _GRAPH_TF_OPTIMIZERS[graph]:
variables.update(opt.optimizer.variables())
return variables
def _initialize_variables(session):
"""Utility to initialize uninitialized variables on the fly."""
variables = _get_variables(get_graph())
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
# This step is expensive, so we only run it on variables not already
# marked as initialized.
is_initialized = session.run(
[variables_module.is_variable_initialized(v) for v in candidate_vars])
# TODO(kathywu): Some metric variables loaded from SavedModel are never
# actually used, and do not have an initializer.
should_be_initialized = [
(not is_initialized[n]) and v.initializer is not None
for n, v in enumerate(candidate_vars)]
uninitialized_vars = []
for flag, v in zip(should_be_initialized, candidate_vars):
if flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
@keras_export('keras.backend.constant')
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
Arguments:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
@keras_export('keras.backend.is_keras_tensor')
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
Arguments:
x: A candidate tensor.
Returns:
A boolean: Whether the argument is a Keras tensor.
Raises:
ValueError: In case `x` is not a symbolic tensor.
Examples:
>>> np_var = np.array([1, 2])
>>> # A numpy array is not a symbolic tensor.
>>> tf.keras.backend.is_keras_tensor(np_var)
Traceback (most recent call last):
...
ValueError: Unexpectedly found an instance of type `<class 'numpy.ndarray'>`.
Expected a symbolic tensor instance.
>>> keras_var = tf.keras.backend.variable(np_var)
>>> # A variable created with the keras backend is not a Keras tensor.
>>> tf.keras.backend.is_keras_tensor(keras_var)
False
>>> keras_placeholder = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> # A placeholder is not a Keras tensor.
>>> tf.keras.backend.is_keras_tensor(keras_placeholder)
False
>>> keras_input = tf.keras.layers.Input([10])
>>> # An Input is a Keras tensor.
>>> tf.keras.backend.is_keras_tensor(keras_input)
True
>>> keras_layer_output = tf.keras.layers.Dense(10)(keras_input)
>>> # Any Keras layer output is a Keras tensor.
>>> tf.keras.backend.is_keras_tensor(keras_layer_output)
True
"""
if not isinstance(x,
(ops.Tensor, variables_module.Variable,
sparse_tensor.SparseTensor, ragged_tensor.RaggedTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
@keras_export('keras.backend.placeholder')
def placeholder(shape=None,
ndim=None,
dtype=None,
sparse=False,
name=None,
ragged=False):
"""Instantiates a placeholder tensor and returns it.
Arguments:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
ragged: Boolean, whether the placeholder should have a ragged type.
In this case, values of 'None' in the 'shape' argument represent
ragged dimensions. For more information about RaggedTensors, see this
[guide](https://www.tensorflow.org/guide/ragged_tensors).
Raises:
ValueError: If called with eager execution
ValueError: If called with sparse = True and ragged = True.
Returns:
Tensor instance (with Keras metadata included).
Examples:
>>> input_ph = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> input_ph
<tf.Tensor 'Placeholder_...' shape=(2, 4, 5) dtype=float32>
"""
if sparse and ragged:
raise ValueError(
'Cannot set both sparse and ragged to True when creating a placeholder.'
)
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = (None,) * ndim
with get_graph().as_default():
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
elif ragged:
ragged_rank = 0
for i in range(1, len(shape)):
if shape[i] is None:
ragged_rank = i
type_spec = ragged_tensor.RaggedTensorSpec(
shape=shape, dtype=dtype, ragged_rank=ragged_rank)
def tensor_spec_to_placeholder(tensorspec):
return array_ops.placeholder(tensorspec.dtype, tensorspec.shape)
x = nest.map_structure(tensor_spec_to_placeholder, type_spec,
expand_composites=True)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
Arguments:
x: A candidate placeholder.
Returns:
Boolean.
"""
try:
if isinstance(x, composite_tensor.CompositeTensor):
flat_components = nest.flatten(x, expand_composites=True)
return py_any(is_placeholder(c) for c in flat_components)
else:
return x.op.type == 'Placeholder'
except AttributeError:
return False
def freezable_variable(value, shape=None, name=None):
"""A tensor-like object whose value can be updated only up until execution.
After creating the freezable variable, you can update its value by calling
`var.update_value(new_value)` (similar to a regular variable).
Unlike an actual variable, the value used during execution is the current
value at the time the execution function (`backend.function()`) was created.
This is an internal API, expected to be temporary. It is used to implement a
mutable `trainable` property for `BatchNormalization` layers, with a frozen
value after model compilation.
We don't use a plain variable in this case because we need the value used
in a specific model to be frozen after `compile` has been called
(e.g. GAN use case).
Arguments:
value: The initial value for the tensor-like object.
shape: The shape for the tensor-like object (cannot be changed).
name: The name for the tensor-like object.
Returns:
A tensor-like object with a static value that can be updated via
`x.update_value(new_value)`, up until creating an execution function
(afterwards the value is fixed).
"""
graph = get_graph()
with graph.as_default():
x = array_ops.placeholder_with_default(
value, shape=shape, name=name)
x._initial_value = value
x._current_value = value
def update_value(new_value):
x._current_value = new_value
def get_value():
return x._current_value
x.update_value = update_value
x.get_value = get_value
global _FREEZABLE_VARS
_FREEZABLE_VARS[graph].add(x)
return x
@keras_export('keras.backend.shape')
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
Arguments:
x: A tensor or variable.
Returns:
A symbolic shape (which is itself a tensor).
Examples:
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val)
>>> tf.keras.backend.shape(kvar)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)>
>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> tf.keras.backend.shape(input)
<tf.Tensor 'Shape_...' shape=(3,) dtype=int32>
"""
return array_ops.shape(x)
@keras_export('keras.backend.int_shape')
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
Arguments:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> tf.keras.backend.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val)
>>> tf.keras.backend.int_shape(kvar)
(2, 2)
"""
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
@keras_export('keras.backend.ndim')
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Arguments:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val)
>>> tf.keras.backend.ndim(input)
3
>>> tf.keras.backend.ndim(kvar)
2
"""
dims = x.shape._dims
if dims is not None:
return len(dims)
return None
@keras_export('keras.backend.dtype')
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Arguments:
x: Tensor or variable.
Returns:
String, dtype of `x`.
Examples:
>>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5)))
'float32'
>>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5),
... dtype='float32'))
'float32'
>>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5),
... dtype='float64'))
'float64'
>>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]))
>>> tf.keras.backend.dtype(kvar)
'float32'
>>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]),
... dtype='float32')
>>> tf.keras.backend.dtype(kvar)
'float32'
"""
return x.dtype.base_dtype.name
@keras_export('keras.backend.eval')
def eval(x):
"""Evaluates the value of a variable.
Arguments:
x: A variable.
Returns:
A Numpy array.
Examples:
>>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]),
... dtype='float32')
>>> tf.keras.backend.eval(kvar)
array([[1., 2.],
[3., 4.]], dtype=float32)
"""
return get_value(to_dense(x))
@keras_export('keras.backend.zeros')
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
Arguments:
shape: Tuple or list of integers, shape of returned Keras variable
dtype: data type of returned Keras variable
name: name of returned Keras variable
Returns:
A variable (including Keras metadata), filled with `0.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
>>> kvar = tf.keras.backend.zeros((3,4))
>>> tf.keras.backend.eval(kvar)
array([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]], dtype=float32)
>>> A = tf.constant([1,2,3])
>>> kvar2 = tf.keras.backend.zeros(A.shape) # [0., 0., 0.]
>>> tf.keras.backend.eval(kvar2)
array([0., 0., 0.], dtype=float32)
>>> kvar3 = tf.keras.backend.zeros(A.shape,dtype=tf.int32)
>>> tf.keras.backend.eval(kvar3)
array([0, 0, 0], dtype=int32)
>>> kvar4 = tf.keras.backend.zeros([2,3])
>>> tf.keras.backend.eval(kvar4)
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
return v
@keras_export('keras.backend.ones')
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, filled with `1.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
>>> kvar = tf.keras.backend.ones((3,4))
>>> tf.keras.backend.eval(kvar)
array([[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.]], dtype=float32)
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
return v
@keras_export('keras.backend.eye')
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
Arguments:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
>>> kvar = tf.keras.backend.eye(3)
>>> tf.keras.backend.eval(kvar)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32)
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
@keras_export('keras.backend.zeros_like')
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
Arguments:
x: Keras variable or Keras tensor.
dtype: dtype of returned Keras variable.
`None` uses the dtype of `x`.
name: name for the variable to create.
Returns:
A Keras variable with the shape of `x` filled with zeros.
Example:
from tensorflow.keras import backend as K
kvar = K.variable(np.random.random((2,3)))
kvar_zeros = K.zeros_like(kvar)
K.eval(kvar_zeros)
# array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32)
"""
return array_ops.zeros_like(x, dtype=dtype, name=name)
@keras_export('keras.backend.ones_like')
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable of the same shape as another tensor.
Arguments:
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with ones.
Example:
>>> kvar = tf.keras.backend.variable(np.random.random((2,3)))
>>> kvar_ones = tf.keras.backend.ones_like(kvar)
>>> tf.keras.backend.eval(kvar_ones)
array([[1., 1., 1.],
[1., 1., 1.]], dtype=float32)
"""
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
Arguments:
x: The input tensor.
name: String, name for the variable to create.
Returns:
A tensor of the same shape, type and content.
"""
return array_ops.identity(x, name=name)
@keras_export('keras.backend.random_uniform_variable')
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""Instantiates a variable with values drawn from a uniform distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
>>> kvar = tf.keras.backend.random_uniform_variable(shape=(2,3),
... low=0.0, high=1.0)
>>> kvar
<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=...,
dtype=float32)>
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.random_normal_variable')
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
>>> kvar = tf.keras.backend.random_normal_variable(shape=(2,3),
... mean=0.0, scale=1.0)
>>> kvar
<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=...,
dtype=float32)>
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.count_params')
def count_params(x):
"""Returns the static number of elements in a variable or tensor.
Arguments:
x: Variable or tensor.
Returns:
Integer, the number of scalars in `x`.
Example:
>>> kvar = tf.keras.backend.zeros((2,3))
>>> tf.keras.backend.count_params(kvar)
6
>>> tf.keras.backend.eval(kvar)
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
"""
return np.prod(x.shape.as_list())
@keras_export('keras.backend.cast')
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Arguments:
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
Returns:
Keras tensor with dtype `dtype`.
Examples:
Cast a float32 variable to a float64 tensor
>>> input = tf.keras.backend.ones(shape=(1,3))
>>> print(input)
<tf.Variable 'Variable:0' shape=(1, 3) dtype=float32,
numpy=array([[1., 1., 1.]], dtype=float32)>
>>> cast_input = tf.keras.backend.cast(input, dtype='float64')
>>> print(cast_input)
tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64)
"""
return math_ops.cast(x, dtype)
# UPDATES OPS
@keras_export('keras.backend.update')
def update(x, new_x):
return state_ops.assign(x, new_x)
@keras_export('keras.backend.update_add')
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
Arguments:
x: A Variable.
increment: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_add(x, increment)
@keras_export('keras.backend.update_sub')
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
Arguments:
x: A Variable.
decrement: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_sub(x, decrement)
@keras_export('keras.backend.moving_average_update')
def moving_average_update(x, value, momentum):
"""Compute the moving average of a variable.
Arguments:
x: A Variable.
value: A tensor with the same shape as `variable`.
momentum: The moving average momentum.
Returns:
An Operation to update the variable.
"""
zero_debias = not tf2.enabled()
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=zero_debias)
# LINEAR ALGEBRA
@keras_export('keras.backend.dot')
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a tensor.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor, dot product of `x` and `y`.
Examples:
>>> x = tf.keras.backend.placeholder(shape=(2, 3))
>>> y = tf.keras.backend.placeholder(shape=(3, 4))
>>> xy = tf.keras.backend.dot(x, y)
>>> xy
<tf.Tensor ... shape=(2, 4) dtype=float32>
>>> x = tf.keras.backend.placeholder(shape=(32, 28, 3))
>>> y = tf.keras.backend.placeholder(shape=(3, 4))
>>> xy = tf.keras.backend.dot(x, y)
>>> xy
<tf.Tensor ... shape=(32, 28, 4) dtype=float32>
>>> x = tf.keras.backend.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = tf.keras.backend.ones((4, 3, 5))
>>> xy = tf.keras.backend.dot(x, y)
>>> tf.keras.backend.int_shape(xy)
(2, 4, 5)
"""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
@keras_export('keras.backend.batch_dot')
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of
`(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Arguments:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: Tuple or list of integers with target dimensions, or single integer.
The sizes of `x.shape[axes[0]]` and `y.shape[axes[1]]` should be equal.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to `(batch_size, 1)`.
Examples:
>>> x_batch = tf.keras.backend.ones(shape=(32, 20, 1))
>>> y_batch = tf.keras.backend.ones(shape=(32, 30, 20))
>>> xy_batch_dot = tf.keras.backend.batch_dot(x_batch, y_batch, axes=(1, 2))
>>> tf.keras.backend.int_shape(xy_batch_dot)
(32, 1, 30)
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape,
dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape,
always ignore first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape,
dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
`output_shape` = `(100, 30)`
"""
x_shape = int_shape(x)
y_shape = int_shape(y)
x_ndim = len(x_shape)
y_ndim = len(y_shape)
if x_ndim < 2 or y_ndim < 2:
raise ValueError('Cannot do batch_dot on inputs '
'with rank < 2. '
'Received inputs with shapes ' +
str(x_shape) + ' and ' +
str(y_shape) + '.')
x_batch_size = x_shape[0]
y_batch_size = y_shape[0]
if x_batch_size is not None and y_batch_size is not None:
if x_batch_size != y_batch_size:
raise ValueError('Cannot do batch_dot on inputs '
'with different batch sizes. '
'Received inputs with shapes ' +
str(x_shape) + ' and ' +
str(y_shape) + '.')
if isinstance(axes, int):
axes = [axes, axes]
if axes is None:
if y_ndim == 2:
axes = [x_ndim - 1, y_ndim - 1]
else:
axes = [x_ndim - 1, y_ndim - 2]
if py_any(isinstance(a, (list, tuple)) for a in axes):
raise ValueError('Multiple target dimensions are not supported. ' +
'Expected: None, int, (int, int), ' +
'Provided: ' + str(axes))
# if tuple, convert to list.
axes = list(axes)
# convert negative indices.
if axes[0] < 0:
axes[0] += x_ndim
if axes[1] < 0:
axes[1] += y_ndim
# sanity checks
if 0 in axes:
raise ValueError('Cannot perform batch_dot over axis 0. '
'If your inputs are not batched, '
'add a dummy batch dimension to your '
'inputs using K.expand_dims(x, 0)')
a0, a1 = axes
d1 = x_shape[a0]
d2 = y_shape[a1]
if d1 is not None and d2 is not None and d1 != d2:
raise ValueError('Cannot do batch_dot on inputs with shapes ' +
str(x_shape) + ' and ' + str(y_shape) +
' with axes=' + str(axes) + '. x.shape[%d] != '
'y.shape[%d] (%d != %d).' % (axes[0], axes[1], d1, d2))
# backup ndims. Need them later.
orig_x_ndim = x_ndim
orig_y_ndim = y_ndim
# if rank is 2, expand to 3.
if x_ndim == 2:
x = array_ops.expand_dims(x, 1)
a0 += 1
x_ndim += 1
if y_ndim == 2:
y = array_ops.expand_dims(y, 2)
y_ndim += 1
# bring x's dimension to be reduced to last axis.
if a0 != x_ndim - 1:
pattern = list(range(x_ndim))
for i in range(a0, x_ndim - 1):
pattern[i] = pattern[i + 1]
pattern[-1] = a0
x = array_ops.transpose(x, pattern)
# bring y's dimension to be reduced to axis 1.
if a1 != 1:
pattern = list(range(y_ndim))
for i in range(a1, 1, -1):
pattern[i] = pattern[i - 1]
pattern[1] = a1
y = array_ops.transpose(y, pattern)
# normalize both inputs to rank 3.
if x_ndim > 3:
# squash middle dimensions of x.
x_shape = shape(x)
x_mid_dims = x_shape[1:-1]
x_squashed_shape = array_ops.stack(
[x_shape[0], -1, x_shape[-1]])
x = array_ops.reshape(x, x_squashed_shape)
x_squashed = True
else:
x_squashed = False
if y_ndim > 3:
# squash trailing dimensions of y.
y_shape = shape(y)
y_trail_dims = y_shape[2:]
y_squashed_shape = array_ops.stack(
[y_shape[0], y_shape[1], -1])
y = array_ops.reshape(y, y_squashed_shape)
y_squashed = True
else:
y_squashed = False
result = math_ops.matmul(x, y)
# if inputs were squashed, we have to reshape the matmul output.
output_shape = array_ops.shape(result)
do_reshape = False
if x_squashed:
output_shape = array_ops.concat(
[output_shape[:1],
x_mid_dims,
output_shape[-1:]], 0)
do_reshape = True
if y_squashed:
output_shape = array_ops.concat([output_shape[:-1], y_trail_dims], 0)
do_reshape = True
if do_reshape:
result = array_ops.reshape(result, output_shape)
# if the inputs were originally rank 2, we remove the added 1 dim.
if orig_x_ndim == 2:
result = array_ops.squeeze(result, 1)
elif orig_y_ndim == 2:
result = array_ops.squeeze(result, -1)
return result
@keras_export('keras.backend.transpose')
def transpose(x):
"""Transposes a tensor and returns it.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
Examples:
>>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]])
>>> tf.keras.backend.eval(var)
array([[1., 2., 3.],
[4., 5., 6.]], dtype=float32)
>>> var_transposed = tf.keras.backend.transpose(var)
>>> tf.keras.backend.eval(var_transposed)
array([[1., 4.],
[2., 5.],
[3., 6.]], dtype=float32)
>>> input = tf.keras.backend.placeholder((2, 3))
>>> input
<tf.Tensor 'Placeholder_...' shape=(2, 3) dtype=float32>
>>> input_transposed = tf.keras.backend.transpose(input)
>>> input_transposed
<tf.Tensor 'Transpose_...' shape=(3, 2) dtype=float32>
"""
return array_ops.transpose(x)
@keras_export('keras.backend.gather')
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
Arguments:
reference: A tensor.
indices: An integer tensor of indices.
Returns:
A tensor of same type as `reference`.
Examples:
>>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]])
>>> tf.keras.backend.eval(var)
array([[1., 2., 3.],
[4., 5., 6.]], dtype=float32)
>>> var_gathered = tf.keras.backend.gather(var, [0])
>>> tf.keras.backend.eval(var_gathered)
array([[1., 2., 3.]], dtype=float32)
>>> var_gathered = tf.keras.backend.gather(var, [1])
>>> tf.keras.backend.eval(var_gathered)
array([[4., 5., 6.]], dtype=float32)
>>> var_gathered = tf.keras.backend.gather(var, [0,1,0])
>>> tf.keras.backend.eval(var_gathered)
array([[1., 2., 3.],
[4., 5., 6.],
[1., 2., 3.]], dtype=float32)
"""
return array_ops.gather(reference, indices)
# ELEMENT-WISE OPERATIONS
@keras_export('keras.backend.max')
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with maximum values of `x`.
"""
return math_ops.reduce_max(x, axis, keepdims)
@keras_export('keras.backend.min')
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with minimum values of `x`.
"""
return math_ops.reduce_min(x, axis, keepdims)
@keras_export('keras.backend.sum')
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with sum of `x`.
"""
return math_ops.reduce_sum(x, axis, keepdims)
@keras_export('keras.backend.prod')
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the product of elements of `x`.
"""
return math_ops.reduce_prod(x, axis, keepdims)
@keras_export('keras.backend.cumsum')
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return math_ops.cumsum(x, axis=axis)
@keras_export('keras.backend.cumprod')
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
"""
return math_ops.cumprod(x, axis=axis)
@keras_export('keras.backend.var')
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.std')
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
It is an alias to `tf.math.reduce_std`.
Arguments:
x: A tensor or variable. It should have numerical dtypes. Boolean type
inputs will be converted to float.
axis: An integer, the axis to compute the standard deviation. If `None`
(the default), reduces all dimensions. Must be in the range
`[-rank(x), rank(x))`.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is retained with
length 1.
Returns:
A tensor with the standard deviation of elements of `x` with same dtype.
Boolean type input will be converted to float.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.mean')
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keepdims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis, keepdims)
@keras_export('keras.backend.any')
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis, keepdims)
@keras_export('keras.backend.all')
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis, keepdims)
@keras_export('keras.backend.argmax')
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmax(x, axis)
@keras_export('keras.backend.argmin')
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmin(x, axis)
@keras_export('keras.backend.square')
def square(x):
"""Element-wise square.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.square(x)
@keras_export('keras.backend.abs')
def abs(x):
"""Element-wise absolute value.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.abs(x)
@keras_export('keras.backend.sqrt')
def sqrt(x):
"""Element-wise square root.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
zero = _constant_to_tensor(0., x.dtype.base_dtype)
inf = _constant_to_tensor(np.inf, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, inf)
return math_ops.sqrt(x)
@keras_export('keras.backend.exp')
def exp(x):
"""Element-wise exponential.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.exp(x)
@keras_export('keras.backend.log')
def log(x):
"""Element-wise log.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
Returns:
The reduced tensor.
"""
return math_ops.reduce_logsumexp(x, axis, keepdims)
@keras_export('keras.backend.round')
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.round(x)
@keras_export('keras.backend.sign')
def sign(x):
"""Element-wise sign.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
@keras_export('keras.backend.pow')
def pow(x, a):
"""Element-wise exponentiation.
Arguments:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
@keras_export('keras.backend.clip')
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Arguments:
x: Tensor or variable.
min_value: Python float, integer, or tensor.
max_value: Python float, integer, or tensor.
Returns:
A tensor.
"""
if (isinstance(min_value, (int, float)) and
isinstance(max_value, (int, float))):
if max_value < min_value:
max_value = min_value
if min_value is None:
min_value = -np.inf
if max_value is None:
max_value = np.inf
return clip_ops.clip_by_value(x, min_value, max_value)
@keras_export('keras.backend.equal')
def equal(x, y):
"""Element-wise equality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.equal(x, y)
@keras_export('keras.backend.not_equal')
def not_equal(x, y):
"""Element-wise inequality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.not_equal(x, y)
@keras_export('keras.backend.greater')
def greater(x, y):
"""Element-wise truth value of (x > y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater(x, y)
@keras_export('keras.backend.greater_equal')
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater_equal(x, y)
@keras_export('keras.backend.less')
def less(x, y):
"""Element-wise truth value of (x < y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
@keras_export('keras.backend.less_equal')
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less_equal(x, y)
@keras_export('keras.backend.maximum')
def maximum(x, y):
"""Element-wise maximum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor with the element wise maximum value(s) of `x` and `y`.
Examples:
>>> x = tf.Variable([[1, 2], [3, 4]])
>>> y = tf.Variable([[2, 1], [0, -1]])
>>> m = tf.keras.backend.maximum(x, y)
>>> m
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[2, 2],
[3, 4]], dtype=int32)>
"""
return math_ops.maximum(x, y)
@keras_export('keras.backend.minimum')
def minimum(x, y):
"""Element-wise minimum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.minimum(x, y)
@keras_export('keras.backend.sin')
def sin(x):
"""Computes sin of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sin(x)
@keras_export('keras.backend.cos')
def cos(x):
"""Computes cos of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
def _regular_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
return normed, mean, var
def _broadcast_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused, broadcast version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def _fused_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if list(reduction_axes) == [0, 1, 2]:
normalization_axis = 3
tf_data_format = 'NHWC'
else:
normalization_axis = 1
tf_data_format = 'NCHW'
if gamma is None:
gamma = constant_op.constant(
1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
if beta is None:
beta = constant_op.constant(
0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
return nn.fused_batch_norm(
x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)
@keras_export('keras.backend.normalize_batch_in_training')
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:
if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
return _fused_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
return _regular_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
@keras_export('keras.backend.batch_normalization')
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
Arguments:
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
axis: Integer, the axis that should be normalized.
(typically the features axis).
epsilon: Fuzz factor.
Returns:
A tensor.
"""
if ndim(x) == 4:
# The CPU implementation of `fused_batch_norm` only supports NHWC
if axis == 1 or axis == -3:
tf_data_format = 'NCHW'
elif axis == 3 or axis == -1:
tf_data_format = 'NHWC'
else:
tf_data_format = None
if (tf_data_format == 'NHWC' or
tf_data_format == 'NCHW' and _has_nchw_support()):
# The mean / var / beta / gamma tensors may be broadcasted
# so they may have extra axes of size 1, which should be squeezed.
if ndim(mean) > 1:
mean = array_ops.reshape(mean, [-1])
if ndim(var) > 1:
var = array_ops.reshape(var, [-1])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) > 1:
beta = array_ops.reshape(beta, [-1])
if gamma is None:
gamma = ones_like(mean)
elif ndim(gamma) > 1:
gamma = array_ops.reshape(gamma, [-1])
y, _, _ = nn.fused_batch_norm(
x,
gamma,
beta,
epsilon=epsilon,
mean=mean,
variance=var,
data_format=tf_data_format,
is_training=False
)
return y
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
# SHAPE OPERATIONS
@keras_export('keras.backend.concatenate')
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Arguments:
tensors: list of tensors to concatenate.
axis: concatenation axis.
Returns:
A tensor.
Example:
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> b = tf.constant([[10, 20, 30], [40, 50, 60], [70, 80, 90]])
>>> tf.keras.backend.concatenate((a, b), axis=-1)
<tf.Tensor: shape=(3, 6), dtype=int32, numpy=
array([[ 1, 2, 3, 10, 20, 30],
[ 4, 5, 6, 40, 50, 60],
[ 7, 8, 9, 70, 80, 90]], dtype=int32)>
"""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all(is_sparse(x) for x in tensors):
return sparse_ops.sparse_concat(axis, tensors)
elif py_all(isinstance(x, ragged_tensor.RaggedTensor) for x in tensors):
return ragged_concat_ops.concat(tensors, axis)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
@keras_export('keras.backend.reshape')
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
Arguments:
x: Tensor or variable.
shape: Target shape tuple.
Returns:
A tensor.
Example:
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
>>> a
<tf.Tensor: shape=(4, 3), dtype=int32, numpy=
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]], dtype=int32)>
>>> tf.keras.backend.reshape(a, shape=(2, 6))
<tf.Tensor: shape=(2, 6), dtype=int32, numpy=
array([[ 1, 2, 3, 4, 5, 6],
[ 7, 8, 9, 10, 11, 12]], dtype=int32)>
"""
return array_ops.reshape(x, shape)
@keras_export('keras.backend.permute_dimensions')
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
Arguments:
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
Returns:
A tensor.
Example:
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
>>> a
<tf.Tensor: shape=(4, 3), dtype=int32, numpy=
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]], dtype=int32)>
>>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0))
<tf.Tensor: shape=(3, 4), dtype=int32, numpy=
array([[ 1, 4, 7, 10],
[ 2, 5, 8, 11],
[ 3, 6, 9, 12]], dtype=int32)>
"""
return array_ops.transpose(x, perm=pattern)
@keras_export('keras.backend.resize_images')
def resize_images(x, height_factor, width_factor, data_format,
interpolation='nearest'):
"""Resizes the images contained in a 4D tensor.
Arguments:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
interpolation: A string, one of `nearest` or `bilinear`.
Returns:
A tensor.
Raises:
ValueError: in case of incorrect value for
`data_format` or `interpolation`.
"""
if data_format == 'channels_first':
rows, cols = 2, 3
elif data_format == 'channels_last':
rows, cols = 1, 2
else:
raise ValueError('Invalid `data_format` argument: %s' % (data_format,))
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[rows:cols + 1]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor], dtype='int32'))
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 2, 3, 1])
if interpolation == 'nearest':
x = image_ops.resize_images_v2(
x, new_shape, method=image_ops.ResizeMethod.NEAREST_NEIGHBOR)
elif interpolation == 'bilinear':
x = image_ops.resize_images_v2(x, new_shape,
method=image_ops.ResizeMethod.BILINEAR)
else:
raise ValueError('interpolation should be one '
'of "nearest" or "bilinear".')
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 3, 1, 2])
if original_shape[rows] is None:
new_height = None
else:
new_height = original_shape[rows] * height_factor
if original_shape[cols] is None:
new_width = None
else:
new_width = original_shape[cols] * width_factor
if data_format == 'channels_first':
output_shape = (None, None, new_height, new_width)
else:
output_shape = (None, new_height, new_width, None)
x.set_shape(output_shape)
return x
@keras_export('keras.backend.resize_volumes')
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resizes the volume contained in a 5D tensor.
Arguments:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format: ' + str(data_format))
@keras_export('keras.backend.repeat_elements')
def repeat_elements(x, rep, axis):
"""Repeats the elements of a tensor along an axis, like `np.repeat`.
If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
will have shape `(s1, s2 * rep, s3)`.
Arguments:
x: Tensor or variable.
rep: Python integer, number of times to repeat.
axis: Axis along which to repeat.
Returns:
A tensor.
Example:
>>> b = tf.constant([1, 2, 3])
>>> tf.keras.backend.repeat_elements(b, rep=2, axis=0)
<tf.Tensor: shape=(6,), dtype=int32,
numpy=array([1, 1, 2, 2, 3, 3], dtype=int32)>
"""
x_shape = x.shape.as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.shape) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.shape.as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
@keras_export('keras.backend.repeat')
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Arguments:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
Example:
>>> b = tf.constant([[1, 2], [3, 4]])
>>> b
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4]], dtype=int32)>
>>> tf.keras.backend.repeat(b, n=2)
<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
array([[[1, 2],
[1, 2]],
[[3, 4],
[3, 4]]], dtype=int32)>
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
@keras_export('keras.backend.arange')
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument and "start" is 0.
The default type of the returned tensor is `'int32'` to
match TensorFlow's default.
Arguments:
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
Returns:
An integer tensor.
Example:
>>> tf.keras.backend.arange(start=0, stop=10, step=1.5)
<tf.Tensor: shape=(7,), dtype=float32,
numpy=array([0. , 1.5, 3. , 4.5, 6. , 7.5, 9. ], dtype=float32)>
"""
# Match the behavior of numpy and Theano by returning an empty sequence.
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
@keras_export('keras.backend.tile')
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
Arguments:
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
Returns:
A tiled tensor.
"""
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
@keras_export('keras.backend.flatten')
def flatten(x):
"""Flatten a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor, reshaped into 1-D
Example:
>>> b = tf.constant([[1, 2], [3, 4]])
>>> b
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4]], dtype=int32)>
>>> tf.keras.backend.flatten(b)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([1, 2, 3, 4], dtype=int32)>
"""
return array_ops.reshape(x, [-1])
@keras_export('keras.backend.batch_flatten')
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
Examples:
Flattening a 3D tensor to 2D by collapsing the last dimension.
>>> x_batch = tf.keras.backend.ones(shape=(2, 3, 4, 5))
>>> x_batch_flatten = batch_flatten(x_batch)
>>> tf.keras.backend.int_shape(x_batch_flatten)
(2, 60)
"""
x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
return x
@keras_export('keras.backend.expand_dims')
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Arguments:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
@keras_export('keras.backend.squeeze')
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
Arguments:
x: A tensor or variable.
axis: Axis to drop.
Returns:
A tensor with the same data as `x` but reduced dimensions.
"""
return array_ops.squeeze(x, [axis])
@keras_export('keras.backend.temporal_padding')
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
Returns:
A padded 3D tensor.
"""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_2d_padding')
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 4D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_3d_padding')
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
Arguments:
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 5D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.stack')
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
Arguments:
x: List of tensors.
axis: Axis along which to perform stacking.
Returns:
A tensor.
Example:
>>> a = tf.constant([[1, 2],[3, 4]])
>>> b = tf.constant([[10, 20],[30, 40]])
>>> tf.keras.backend.stack((a, b))
<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
array([[[ 1, 2],
[ 3, 4]],
[[10, 20],
[30, 40]]], dtype=int32)>
"""
return array_ops.stack(x, axis=axis)
@keras_export('keras.backend.one_hot')
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
Arguments:
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
Returns:
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
Returns:
The one-hot tensor.
"""
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
@keras_export('keras.backend.reverse')
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
Arguments:
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
Returns:
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
# VALUE MANIPULATION
_VALUE_SET_CODE_STRING = """
>>> K = tf.keras.backend # Common keras convention
>>> v = K.variable(1.)
>>> # reassign
>>> K.set_value(v, 2.)
>>> print(K.get_value(v))
2.0
>>> # increment
>>> K.set_value(v, K.get_value(v) + 1)
>>> print(K.get_value(v))
3.0
Variable semantics in TensorFlow 2 are eager execution friendly. The above
code is roughly equivalent to:
>>> v = tf.Variable(1.)
>>> v.assign(2.)
>>> print(v.numpy())
2.0
>>> v.assign_add(1.)
>>> print(v.numpy())
3.0"""[3:] # Prune first newline and indent to match the docstring template.
@keras_export('keras.backend.get_value')
def get_value(x):
"""Returns the value of a variable.
`backend.get_value` is the compliment of `backend.set_value`, and provides
a generic interface for reading from variables while abstracting away the
differences between TensorFlow 1.x and 2.x semantics.
{snippet}
Arguments:
x: input variable.
Returns:
A Numpy array.
"""
if not tensor_util.is_tensor(x):
return x
if context.executing_eagerly() or isinstance(x, ops.EagerTensor):
return x.numpy()
if not getattr(x, '_in_graph_mode', True):
# This is a variable which was created in an eager context, but is being
# evaluated from a Graph.
with context.eager_mode():
return x.numpy()
if ops.executing_eagerly_outside_functions():
# This method of evaluating works inside the Keras FuncGraph.
return function([], x)(x)
with x.graph.as_default():
return x.eval(session=get_session((x,)))
@keras_export('keras.backend.batch_get_value')
def batch_get_value(tensors):
"""Returns the value of more than one tensor variable.
Arguments:
tensors: list of ops to run.
Returns:
A list of Numpy arrays.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return [x.numpy() for x in tensors]
elif ops.inside_function(): # pylint: disable=protected-access
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
if tensors:
return get_session(tensors).run(tensors)
else:
return []
@keras_export('keras.backend.set_value')
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
`backend.set_value` is the compliment of `backend.get_value`, and provides
a generic interface for assigning to variables while abstracting away the
differences between TensorFlow 1.x and 2.x semantics.
{snippet}
Arguments:
x: Variable to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = np.asarray(value, dtype=dtype(x))
if ops.executing_eagerly_outside_functions():
x.assign(value)
else:
with get_graph().as_default():
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
# In order to support assigning weights to resizable variables in
# Keras, we make a placeholder with the correct number of dimensions
# but with None in each dimension. This way, we can assign weights
# of any size (as long as they have the correct dimensionality).
placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)
assign_placeholder = array_ops.placeholder(
tf_dtype, shape=placeholder_shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
@keras_export('keras.backend.batch_set_value')
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Arguments:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if ops.executing_eagerly_outside_functions():
for x, value in tuples:
x.assign(np.asarray(value, dtype=dtype(x)))
else:
with get_graph().as_default():
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
# In order to support assigning weights to resizable variables in
# Keras, we make a placeholder with the correct number of dimensions
# but with None in each dimension. This way, we can assign weights
# of any size (as long as they have the correct dimensionality).
placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)
assign_placeholder = array_ops.placeholder(
tf_dtype, shape=placeholder_shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
get_value.__doc__ = get_value.__doc__.format(snippet=_VALUE_SET_CODE_STRING)
set_value.__doc__ = set_value.__doc__.format(snippet=_VALUE_SET_CODE_STRING)
@keras_export('keras.backend.print_tensor')
def print_tensor(x, message=''):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
Example:
>>> x = tf.constant([[1.0, 2.0], [3.0, 4.0]])
>>> tf.keras.backend.print_tensor(x)
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[1., 2.],
[3., 4.]], dtype=float32)>
Arguments:
x: Tensor to print.
message: Message to print jointly with the tensor.
Returns:
The same tensor `x`, unchanged.
"""
if isinstance(x, ops.Tensor) and hasattr(x, 'graph'):
with get_graph().as_default():
op = logging_ops.print_v2(message, x, output_stream=sys.stdout)
with ops.control_dependencies([op]):
return array_ops.identity(x)
else:
logging_ops.print_v2(message, x, output_stream=sys.stdout)
return x
# GRAPH MANIPULATION
class GraphExecutionFunction(object):
"""Runs a computation graph.
It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`.
In particular additional operations via `fetches` argument and additional
tensor substitutions via `feed_dict` arguments. Note that given
substitutions are merged with substitutions from `inputs`. Even though
`feed_dict` is passed once in the constructor (called in `model.compile()`)
we can modify the values in the dictionary. Through this feed_dict we can
provide additional substitutions besides Keras inputs.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Arguments to `tf.Session.run()`:
`fetches`, `feed_dict`, `options`, `run_metadata`.
"""
def __init__(self, inputs, outputs, updates=None, name=None,
**session_kwargs):
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
self._inputs_structure = inputs
self.inputs = nest.flatten(inputs, expand_composites=True)
self._outputs_structure = outputs
self.outputs = cast_variables_to_tensor(
nest.flatten(outputs, expand_composites=True))
# TODO(b/127668432): Consider using autograph to generate these
# dependencies in call.
# Index 0 = total loss or model output for `predict`.
with ops.control_dependencies([self.outputs[0]]):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
p, new_p = update
updates_ops.append(state_ops.assign(p, new_p))
else:
# assumed already an op
updates_ops.append(update)
self.updates_op = control_flow_ops.group(*updates_ops)
self.name = name
# additional tensor substitutions
self.feed_dict = session_kwargs.pop('feed_dict', None)
# additional operations
self.fetches = session_kwargs.pop('fetches', [])
if not isinstance(self.fetches, list):
self.fetches = [self.fetches]
self.run_options = session_kwargs.pop('options', None)
self.run_metadata = session_kwargs.pop('run_metadata', None)
# The main use case of `fetches` being passed to a model is the ability
# to run custom updates
# This requires us to wrap fetches in `identity` ops.
self.fetches = [array_ops.identity(x) for x in self.fetches]
self.session_kwargs = session_kwargs
# This mapping keeps track of the function that should receive the
# output from a fetch in `fetches`: { fetch: function(fetch_output) }
# A Callback can use this to register a function with access to the
# output values for a fetch it added.
self.fetch_callbacks = {}
if session_kwargs:
raise ValueError('Some keys in session_kwargs are not supported at this '
'time: %s' % (session_kwargs.keys(),))
self._callable_fn = None
self._feed_arrays = None
self._feed_symbols = None
self._symbol_vals = None
self._fetches = None
self._session = None
def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):
"""Generates a callable that runs the graph.
Arguments:
feed_arrays: List of input tensors to be fed Numpy arrays at runtime.
feed_symbols: List of input tensors to be fed symbolic tensors at runtime.
symbol_vals: List of symbolic tensors to be fed to `feed_symbols`.
session: Session to use to generate the callable.
Returns:
Function that runs the graph according to the above options.
"""
# Prepare callable options.
callable_opts = config_pb2.CallableOptions()
# Handle external-data feed.
for x in feed_arrays:
callable_opts.feed.append(x.name)
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
callable_opts.feed.append(key.name)
# Handle symbolic feed.
for x, y in zip(feed_symbols, symbol_vals):
connection = callable_opts.tensor_connection.add()
if x.dtype != y.dtype:
y = math_ops.cast(y, dtype=x.dtype)
from_tensor = ops._as_graph_element(y)
if from_tensor is None:
from_tensor = y
connection.from_tensor = from_tensor.name # Data tensor
connection.to_tensor = x.name # Placeholder
# Handle fetches.
for x in self.outputs + self.fetches:
callable_opts.fetch.append(x.name)
# Handle updates.
callable_opts.target.append(self.updates_op.name)
# Handle run_options.
if self.run_options:
callable_opts.run_options.CopyFrom(self.run_options)
# Create callable.
callable_fn = session._make_callable_from_options(callable_opts)
# Cache parameters corresponding to the generated callable, so that
# we can detect future mismatches and refresh the callable.
self._callable_fn = callable_fn
self._feed_arrays = feed_arrays
self._feed_symbols = feed_symbols
self._symbol_vals = symbol_vals
self._fetches = list(self.fetches)
self._session = session
def _call_fetch_callbacks(self, fetches_output):
for fetch, output in zip(self._fetches, fetches_output):
if fetch in self.fetch_callbacks:
self.fetch_callbacks[fetch](output)
def _eval_if_composite(self, tensor):
"""Helper method which evaluates any CompositeTensors passed to it."""
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
if isinstance(tensor, composite_tensor.CompositeTensor):
return self._session.run(tensor)
else:
return tensor
def __call__(self, inputs):
inputs = nest.flatten(inputs, expand_composites=True)
session = get_session(inputs)
feed_arrays = []
array_vals = []
feed_symbols = []
symbol_vals = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
continue
if tensor_util.is_tensor(value):
# Case: feeding symbolic tensor.
feed_symbols.append(tensor)
symbol_vals.append(value)
else:
# Case: feeding Numpy array.
feed_arrays.append(tensor)
# We need to do array conversion and type casting at this level, since
# `callable_fn` only supports exact matches.
tensor_type = dtypes_module.as_dtype(tensor.dtype)
array_vals.append(np.asarray(value,
dtype=tensor_type.as_numpy_dtype))
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
array_vals.append(
np.asarray(self.feed_dict[key], dtype=key.dtype.base_dtype.name))
# Refresh callable if anything has changed.
if (self._callable_fn is None or feed_arrays != self._feed_arrays or
symbol_vals != self._symbol_vals or
feed_symbols != self._feed_symbols or self.fetches != self._fetches or
session != self._session):
self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
fetched = self._callable_fn(*array_vals,
run_metadata=self.run_metadata)
self._call_fetch_callbacks(fetched[-len(self._fetches):])
output_structure = nest.pack_sequence_as(
self._outputs_structure,
fetched[:len(self.outputs)],
expand_composites=True)
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
return nest.map_structure(self._eval_if_composite, output_structure)
class EagerExecutionFunction(object):
"""Helper class for constructing a TF graph function from the Keras graph.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Unsupported.
"""
def __init__(self, inputs, outputs, updates=None, name=None):
self.name = name
self._inputs_structure = inputs
inputs = nest.flatten(inputs, expand_composites=True)
self._outputs_structure = outputs
outputs = nest.flatten(outputs, expand_composites=True)
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
if updates and not outputs:
# Edge case; never happens in practice
raise ValueError('Cannot create a Keras backend function with updates'
' but no outputs during eager execution.')
graphs = {
i.graph
for i in nest.flatten([inputs, outputs, updates])
if hasattr(i, 'graph')
}
if len(graphs) > 1:
raise ValueError('Cannot create an execution function which is comprised '
'of elements from multiple graphs.')
source_graph = graphs.pop()
global_graph = get_graph()
updates_ops = []
legacy_update_ops = []
for update in updates:
# For legacy reasons it is allowed to pass an update as a tuple
# `(variable, new_value)` (this maps to an assign op). Otherwise it
# is assumed to already be an op -- we cannot control its execution
# order.
if isinstance(update, tuple):
legacy_update_ops.append(update)
else:
if hasattr(update, 'op'):
update = update.op
if update is not None:
# `update.op` may have been None in certain cases.
updates_ops.append(update)
self._freezable_vars_to_feed = []
self._freezable_vars_values = []
freezable_vars_from_keras_graph = object_identity.ObjectIdentitySet(
_FREEZABLE_VARS.get(global_graph, {}))
with _scratch_graph() as exec_graph:
global_graph = get_graph()
if source_graph not in (exec_graph, global_graph):
raise ValueError('Unknown graph. Aborting.')
if source_graph is global_graph and exec_graph is not global_graph:
init_tensors = (
outputs + updates_ops + [p for [p, _] in legacy_update_ops] +
[p_new for [_, p_new] in legacy_update_ops
if isinstance(p_new, ops.Tensor)])
lifted_map = lift_to_graph.lift_to_graph(
tensors=init_tensors,
graph=exec_graph,
sources=inputs,
add_sources=True,
handle_captures=True,
base_graph=source_graph)
inputs = [lifted_map[i] for i in inputs]
outputs = [lifted_map[i] for i in outputs]
updates_ops = [lifted_map[i] for i in updates_ops]
legacy_update_ops = [(lifted_map[p], lifted_map.get(p_new, p_new))
for p, p_new in legacy_update_ops]
# Keep track of the value to feed to any "freezable variables"
# created in this graph.
for old_op, new_op in lifted_map.items():
if old_op in freezable_vars_from_keras_graph:
frozen_var = old_op
if frozen_var._initial_value != frozen_var._current_value:
# We only feed a frozen_variable if its value has changed;
# otherwise it can rely on the default value of the
# underlying placeholder_with_default.
self._freezable_vars_to_feed.append(new_op)
self._freezable_vars_values.append(frozen_var._current_value)
# Consolidate updates
with exec_graph.as_default():
outputs = cast_variables_to_tensor(outputs)
with ops.control_dependencies(outputs):
for p, p_new in legacy_update_ops:
updates_ops.append(state_ops.assign(p, p_new))
self.inputs, self.outputs = inputs, outputs
self._input_references = self.inputs + self._freezable_vars_to_feed
with ops.control_dependencies(updates_ops):
self.outputs[0] = array_ops.identity(self.outputs[0])
exec_graph.inputs = self._input_references + exec_graph.internal_captures
exec_graph.outputs = self.outputs
graph_fn = eager_function.ConcreteFunction(exec_graph)
graph_fn._num_positional_args = len(self._input_references)
graph_fn._arg_keywords = []
self._graph_fn = graph_fn
# Handle placeholders with default
# (treated as required placeholder by graph functions)
self._placeholder_default_values = {}
with exec_graph.as_default():
for x in self.inputs:
if x.op.type == 'PlaceholderWithDefault':
self._placeholder_default_values[ops.tensor_id(
x)] = tensor_util.constant_value(x.op.inputs[0])
def __call__(self, inputs):
input_values = nest.flatten(inputs, expand_composites=True)
if self._freezable_vars_values:
input_values = input_values + self._freezable_vars_values
converted_inputs = []
for tensor, value in zip(self._input_references, input_values):
if value is None:
# Assume `value` is a placeholder with default
value = self._placeholder_default_values.get(
ops.tensor_id(tensor), None)
if value is None:
raise ValueError(
'You must feed a value for placeholder %s' % (tensor,))
if not isinstance(value, ops.Tensor):
value = ops.convert_to_tensor_v2(value, dtype=tensor.dtype)
if value.dtype != tensor.dtype:
# Temporary workaround due to `convert_to_tensor` not casting floats.
# See b/119637405
value = math_ops.cast(value, tensor.dtype)
converted_inputs.append(value)
outputs = self._graph_fn(*converted_inputs)
# EagerTensor.numpy() will often make a copy to ensure memory safety.
# However in this case `outputs` is not directly returned, so it is always
# safe to reuse the underlying buffer without checking. In such a case the
# private numpy conversion method is preferred to guarantee performance.
return nest.pack_sequence_as(
self._outputs_structure,
[x._numpy() for x in outputs], # pylint: disable=protected-access
expand_composites=True)
@keras_export('keras.backend.function')
def function(inputs, outputs, updates=None, name=None, **kwargs):
"""Instantiates a Keras function.
Arguments:
inputs: List of placeholder tensors.
outputs: List of output tensors.
updates: List of update ops.
name: String, name of function.
**kwargs: Passed to `tf.Session.run`.
Returns:
Output values as Numpy arrays.
Raises:
ValueError: if invalid kwargs are passed in or if in eager execution.
"""
if ops.executing_eagerly_outside_functions():
if kwargs:
raise ValueError('Session keyword arguments are not support during '
'eager execution. You passed: %s' % (kwargs,))
return EagerExecutionFunction(inputs, outputs, updates=updates, name=name)
if kwargs:
for key in kwargs:
if (key not in tf_inspect.getfullargspec(session_module.Session.run)[0]
and key not in ['inputs', 'outputs', 'updates', 'name']):
msg = ('Invalid argument "%s" passed to K.function with TensorFlow '
'backend') % key
raise ValueError(msg)
return GraphExecutionFunction(
inputs, outputs, updates=updates, name=name, **kwargs)
@keras_export('keras.backend.gradients')
def gradients(loss, variables):
"""Returns the gradients of `loss` w.r.t. `variables`.
Arguments:
loss: Scalar tensor to minimize.
variables: List of variables.
Returns:
A gradients tensor.
"""
return gradients_module.gradients(
loss, variables, colocate_gradients_with_ops=True)
@keras_export('keras.backend.stop_gradient')
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
Arguments:
variables: Tensor or list of tensors to consider constant with respect
to any other variable.
Returns:
A single tensor or a list of tensors (depending on the passed argument)
that has no gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(array_ops.stop_gradient, variables)
return array_ops.stop_gradient(variables)
# CONTROL FLOW
@keras_export('keras.backend.rnn')
def rnn(step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False):
"""Iterates over the time dimension of a tensor.
Arguments:
step_function: RNN step function.
Args;
input; Tensor with shape `(samples, ...)` (no time dimension),
representing input for the batch of samples at a certain
time step.
states; List of tensors.
Returns;
output; Tensor with shape `(samples, output_dim)`
(no time dimension).
new_states; List of tensors, same length and shapes
as 'states'. The first state in the list must be the
output tensor at the previous timestep.
inputs: Tensor of temporal data of shape `(samples, time, ...)`
(at least 3D), or nested tensors, and each of which has shape
`(samples, time, ...)`.
initial_states: Tensor with shape `(samples, state_size)`
(no time dimension), containing the initial values for the states used
in the step function. In the case that state_size is in a nested
shape, the shape of initial_states will also follow the nested
structure.
go_backwards: Boolean. If True, do the iteration over the time
dimension in reverse order and return the reversed sequence.
mask: Binary tensor with shape `(samples, time, 1)`,
with a zero for every element that is masked.
constants: List of constant values passed at each step.
unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.
input_length: An integer or a 1-D Tensor, depending on whether
the time dimension is fixed-length or not. In case of variable length
input, it is used for masking in case there's no mask specified.
time_major: Boolean. If true, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
zero_output_for_mask: Boolean. If True, the output for masked timestep
will be zeros, whereas in the False case, output from previous
timestep is returned.
Returns:
A tuple, `(last_output, outputs, new_states)`.
last_output: the latest output of the rnn, of shape `(samples, ...)`
outputs: tensor with shape `(samples, time, ...)` where each
entry `outputs[s, t]` is the output of the step function
at time `t` for sample `s`.
new_states: list of tensors, latest states returned by
the step function, of shape `(samples, ...)`.
Raises:
ValueError: if input dimension is less than 3.
ValueError: if `unroll` is `True` but input timestep is not a fixed
number.
ValueError: if `mask` is provided (not `None`) but states is not provided
(`len(states)` == 0).
"""
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return array_ops.transpose(input_t, axes)
if not time_major:
inputs = nest.map_structure(swap_batch_timestep, inputs)
flatted_inputs = nest.flatten(inputs)
time_steps = flatted_inputs[0].shape[0]
batch = flatted_inputs[0].shape[1]
time_steps_t = array_ops.shape(flatted_inputs[0])[0]
for input_ in flatted_inputs:
input_.shape.with_rank_at_least(3)
if mask is not None:
if mask.dtype != dtypes_module.bool:
mask = math_ops.cast(mask, dtypes_module.bool)
if len(mask.shape) == 2:
mask = expand_dims(mask)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
# tf.where needs its condition tensor to be the same shape as its two
# result tensors, but in our case the condition (mask) tensor is
# (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
# So we need to broadcast the mask to match the shape of inputs.
# That's what the tile call does, it just repeats the mask along its
# second dimension n times.
def _expand_mask(mask_t, input_t, fixed_dim=1):
if nest.is_sequence(mask_t):
raise ValueError('mask_t is expected to be tensor, but got %s' % mask_t)
if nest.is_sequence(input_t):
raise ValueError('input_t is expected to be tensor, but got %s' % input_t)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = array_ops.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:]
return array_ops.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError('Unrolling requires a fixed number of timesteps.')
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of nested
# input, the input is flattened and then transformed individually.
# The result of this will be a tuple of lists, each of the item in tuple is
# list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = array_ops.unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if nest.is_sequence(inputs):
processed_input = nest.map_structure(_process_single_input_t, inputs)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return nest.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = array_ops.unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(inp,
tuple(states) + tuple(constants))
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = array_ops.where_v2(tiled_mask_t, output, prev_output)
flat_states = nest.flatten(states)
flat_new_states = nest.flatten(new_states)
tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_states)
flat_final_states = tuple(
array_ops.where_v2(m, s, ps)
for m, s, ps in zip(tiled_mask_t, flat_new_states, flat_states))
states = nest.pack_sequence_as(states, flat_final_states)
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
if zero_output_for_mask:
last_output = array_ops.where_v2(
_expand_mask(mask_list[-1], last_output), last_output,
zeros_like(last_output))
outputs = array_ops.where_v2(
_expand_mask(mask, outputs, fixed_dim=2), outputs,
zeros_like(outputs))
else: # mask is None
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(inp, tuple(states) + tuple(constants))
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else: # Unroll == False
states = tuple(initial_states)
# Create input tensor array, if the inputs is nested tensors, then it will
# be flattened first, and tensor array will be created one per flattened
# tensor.
input_ta = tuple(
tensor_array_ops.TensorArray(
dtype=inp.dtype,
size=time_steps_t,
tensor_array_name='input_ta_%s' % i)
for i, inp in enumerate(flatted_inputs))
input_ta = tuple(
ta.unstack(input_) if not go_backwards else ta
.unstack(reverse(input_, 0))
for ta, input_ in zip(input_ta, flatted_inputs))
# Get the time(0) input and compute the output for that, the output will be
# used to determine the dtype of output tensor array. Don't read from
# input_ta due to TensorArray clear_after_read default to True.
input_time_zero = nest.pack_sequence_as(inputs,
[inp[0] for inp in flatted_inputs])
# output_time_zero is used to determine the cell output shape and its dtype.
# the value is discarded.
output_time_zero, _ = step_function(
input_time_zero, tuple(initial_states) + tuple(constants))
output_ta = tuple(
tensor_array_ops.TensorArray(
dtype=out.dtype,
size=time_steps_t,
element_shape=out.shape,
tensor_array_name='output_ta_%s' % i)
for i, out in enumerate(nest.flatten(output_time_zero)))
time = constant_op.constant(0, dtype='int32', name='time')
# We only specify the 'maximum_iterations' when building for XLA since that
# causes slowdowns on GPU in TF.
if (not context.executing_eagerly() and
control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph())):
max_iterations = math_ops.reduce_max(input_length)
else:
max_iterations = None
while_loop_kwargs = {
'cond': lambda time, *_: time < time_steps_t,
'maximum_iterations': max_iterations,
'parallel_iterations': 32,
'swap_memory': True,
}
if mask is not None:
if go_backwards:
mask = reverse(mask, 0)
mask_ta = tensor_array_ops.TensorArray(
dtype=dtypes_module.bool,
size=time_steps_t,
tensor_array_name='mask_ta')
mask_ta = mask_ta.unstack(mask)
def masking_fn(time):
return mask_ta.read(time)
def compute_masked_output(mask_t, flat_out, flat_mask):
tiled_mask_t = tuple(
_expand_mask(mask_t, o, fixed_dim=len(mask_t.shape))
for o in flat_out)
return tuple(
array_ops.where_v2(m, o, fm)
for m, o, fm in zip(tiled_mask_t, flat_out, flat_mask))
elif isinstance(input_length, ops.Tensor):
if go_backwards:
max_len = math_ops.reduce_max(input_length, axis=0)
rev_input_length = math_ops.subtract(max_len - 1, input_length)
def masking_fn(time):
return math_ops.less(rev_input_length, time)
else:
def masking_fn(time):
return math_ops.greater(input_length, time)
def compute_masked_output(mask_t, flat_out, flat_mask):
return tuple(
array_ops.where(mask_t, o, zo)
for (o, zo) in zip(flat_out, flat_mask))
else:
masking_fn = None
if masking_fn is not None:
# Mask for the T output will be base on the output of T - 1. In the case
# T = 0, a zero filled tensor will be used.
flat_zero_output = tuple(array_ops.zeros_like(o)
for o in nest.flatten(output_time_zero))
def _step(time, output_ta_t, prev_output, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
# maybe set shape.
current_input = nest.pack_sequence_as(inputs, current_input)
mask_t = masking_fn(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
# mask output
flat_output = nest.flatten(output)
flat_mask_output = (flat_zero_output if zero_output_for_mask
else nest.flatten(prev_output))
flat_new_output = compute_masked_output(mask_t, flat_output,
flat_mask_output)
# mask states
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, ops.Tensor):
new_state.set_shape(state.shape)
flat_final_state = compute_masked_output(mask_t, flat_new_state,
flat_state)
new_states = nest.pack_sequence_as(new_states, flat_final_state)
output_ta_t = tuple(
ta.write(time, out)
for ta, out in zip(output_ta_t, flat_new_output))
return (time + 1, output_ta_t,
tuple(flat_new_output)) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta, flat_zero_output) + states,
**while_loop_kwargs)
# Skip final_outputs[2] which is the output for final timestep.
new_states = final_outputs[3:]
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
current_input = nest.pack_sequence_as(inputs, current_input)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, ops.Tensor):
new_state.set_shape(state.shape)
flat_output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, flat_output))
new_states = nest.pack_sequence_as(initial_states, flat_new_state)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta) + states,
**while_loop_kwargs)
new_states = final_outputs[2:]
output_ta = final_outputs[1]
outputs = tuple(o.stack() for o in output_ta)
last_output = tuple(o[-1] for o in outputs)
outputs = nest.pack_sequence_as(output_time_zero, outputs)
last_output = nest.pack_sequence_as(output_time_zero, last_output)
# static shape inference
def set_shape(output_):
if isinstance(output_, ops.Tensor):
shape = output_.shape.as_list()
shape[0] = time_steps
shape[1] = batch
output_.set_shape(shape)
return output_
outputs = nest.map_structure(set_shape, outputs)
if not time_major:
outputs = nest.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
@keras_export('keras.backend.switch')
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value.
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Arguments:
condition: tensor (`int` or `bool`).
then_expression: either a tensor, or a callable that returns a tensor.
else_expression: either a tensor, or a callable that returns a tensor.
Returns:
The selected tensor.
Raises:
ValueError: If rank of `condition` is greater than rank of expressions.
"""
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
cond_ndim = ndim(condition)
if not cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError('Rank of `condition` should be less than or'
' equal to rank of `then_expression` and '
'`else_expression`. ndim(condition)=' + str(cond_ndim) +
', ndim(then_expression)'
'=' + str(expr_ndim))
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = array_ops.concat(
[array_ops.shape(condition), [1] * ndim_diff], axis=0)
condition = array_ops.reshape(condition, cond_shape)
expr_shape = array_ops.shape(then_expression)
shape_diff = expr_shape - cond_shape
tile_shape = array_ops.where_v2(shape_diff > 0, expr_shape,
array_ops.ones_like(expr_shape))
condition = array_ops.tile(condition, tile_shape)
x = array_ops.where_v2(condition, then_expression, else_expression)
return x
@keras_export('keras.backend.in_train_phase')
def in_train_phase(x, alt, training=None):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in train phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on the `training` flag.
the `training` flag defaults to `K.learning_phase()`.
"""
from tensorflow.python.keras.engine import base_layer_utils # pylint: disable=g-import-not-at-top
if training is None:
training = base_layer_utils.call_context().training
if training is None:
training = learning_phase()
# TODO(b/138862903): Handle the case when training is tensor.
if not tensor_util.is_tensor(training):
if training == 1 or training is True:
if callable(x):
return x()
else:
return x
elif training == 0 or training is False:
if callable(alt):
return alt()
else:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(training, x, alt)
return x
@keras_export('keras.backend.in_test_phase')
def in_test_phase(x, alt, training=None):
"""Selects `x` in test phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in test phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
@keras_export('keras.backend.relu')
def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
if alpha != 0.:
if max_value is None and threshold == 0:
return nn.leaky_relu(x, alpha=alpha)
if threshold != 0:
negative_part = nn.relu(-x + threshold)
else:
negative_part = nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
x = x * math_ops.cast(math_ops.greater(x, threshold), floatx())
elif max_value == 6:
# if no threshold, then can use nn.relu6 native TF op for performance
x = nn.relu6(x)
clip_max = False
else:
x = nn.relu(x)
if clip_max:
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
zero = _constant_to_tensor(0, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
@keras_export('keras.backend.elu')
def elu(x, alpha=1.):
"""Exponential linear unit.
Arguments:
x: A tensor or variable to compute the activation function for.
alpha: A scalar, slope of negative section.
Returns:
A tensor.
"""
res = nn.elu(x)
if alpha == 1:
return res
else:
return array_ops.where_v2(x > 0, res, alpha * res)
@keras_export('keras.backend.softmax')
def softmax(x, axis=-1):
"""Softmax of a tensor.
Arguments:
x: A tensor or variable.
axis: The dimension softmax would be performed on.
The default is -1 which indicates the last dimension.
Returns:
A tensor.
"""
return nn.softmax(x, axis=axis)
@keras_export('keras.backend.softplus')
def softplus(x):
"""Softplus of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softplus(x)
@keras_export('keras.backend.softsign')
def softsign(x):
"""Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softsign(x)
def _backtrack_identity(tensor):
while tensor.op.type == 'Identity':
tensor = tensor.op.inputs[0]
return tensor
@keras_export('keras.backend.categorical_crossentropy')
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
Example:
>>> a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3])
>>> print(a)
tf.Tensor(
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]], shape=(3, 3), dtype=float32)
>>> b = tf.constant([.9, .05, .05, .5, .89, .6, .05, .01, .94], shape=[3,3])
>>> print(b)
tf.Tensor(
[[0.9 0.05 0.05]
[0.5 0.89 0.6 ]
[0.05 0.01 0.94]], shape=(3, 3), dtype=float32)
>>> loss = tf.keras.backend.categorical_crossentropy(a, b)
>>> print(np.around(loss, 5))
[0.10536 0.80467 0.06188]
>>> loss = tf.keras.backend.categorical_crossentropy(a, a)
>>> print(np.around(loss, 5))
[0. 0. 0.]
"""
target.shape.assert_is_compatible_with(output.shape)
if from_logits:
return nn.softmax_cross_entropy_with_logits_v2(
labels=target, logits=output, axis=axis)
if not isinstance(output, (ops.EagerTensor, variables_module.Variable)):
output = _backtrack_identity(output)
if output.op.type == 'Softmax':
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.softmax_cross_entropy_with_logits_v2(
labels=target, logits=output, axis=axis)
# scale preds so that the class probas of each sample sum to 1
output = output / math_ops.reduce_sum(output, axis, True)
# Compute cross entropy from probabilities.
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
return -math_ops.reduce_sum(target * math_ops.log(output), axis)
@keras_export('keras.backend.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
Arguments:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
if not from_logits and not isinstance(
output, (ops.EagerTensor, variables_module.Variable)):
output = _backtrack_identity(output)
if output.op.type == 'Softmax':
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
from_logits = True
if not from_logits:
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output)
if isinstance(output.shape, (tuple, list)):
output_rank = len(output.shape)
else:
output_rank = output.shape.ndims
if output_rank is not None:
axis %= output_rank
if axis != output_rank - 1:
permutation = list(
itertools.chain(range(axis), range(axis + 1, output_rank), [axis]))
output = array_ops.transpose(output, perm=permutation)
elif axis != -1:
raise ValueError(
'Cannot compute sparse categorical crossentropy with `axis={}` on an '
'output tensor with unknown rank'.format(axis))
target = cast(target, 'int64')
# Try to adjust the shape so that rank of labels = rank of logits - 1.
output_shape = array_ops.shape_v2(output)
target_rank = target.shape.ndims
update_shape = (
target_rank is not None and output_rank is not None and
target_rank != output_rank - 1)
if update_shape:
target = flatten(target)
output = array_ops.reshape(output, [-1, output_shape[-1]])
if py_any(_is_symbolic_tensor(v) for v in [target, output]):
with get_graph().as_default():
res = nn.sparse_softmax_cross_entropy_with_logits_v2(
labels=target, logits=output)
else:
res = nn.sparse_softmax_cross_entropy_with_logits_v2(
labels=target, logits=output)
if update_shape and output_rank >= 3:
# If our output includes timesteps or spatial dimensions we need to reshape
return array_ops.reshape(res, output_shape[:-1])
else:
return res
@keras_export('keras.backend.binary_crossentropy')
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
if from_logits:
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
if not isinstance(output, (ops.EagerTensor, variables_module.Variable)):
output = _backtrack_identity(output)
if output.op.type == 'Sigmoid':
# When sigmoid activation function is used for output operation, we
# use logits from the sigmoid function directly to compute loss in order
# to prevent collapsing zero when training.
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
# Compute cross entropy from probabilities.
bce = target * math_ops.log(output + epsilon())
bce += (1 - target) * math_ops.log(1 - output + epsilon())
return -bce
@keras_export('keras.backend.sigmoid')
def sigmoid(x):
"""Element-wise sigmoid.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.sigmoid(x)
@keras_export('keras.backend.hard_sigmoid')
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
point_two = _constant_to_tensor(0.2, x.dtype.base_dtype)
point_five = _constant_to_tensor(0.5, x.dtype.base_dtype)
x = math_ops.mul(x, point_two)
x = math_ops.add(x, point_five)
x = clip_ops.clip_by_value(x, 0., 1.)
return x
@keras_export('keras.backend.tanh')
def tanh(x):
"""Element-wise tanh.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.tanh(x)
@keras_export('keras.backend.dropout')
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random, while scaling the entire tensor.
Arguments:
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
Returns:
A tensor.
"""
if seed is None:
seed = np.random.randint(10e6)
return nn.dropout_v2(x, rate=level, noise_shape=noise_shape, seed=seed)
@keras_export('keras.backend.l2_normalize')
def l2_normalize(x, axis=None):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform normalization.
Returns:
A tensor.
"""
return nn.l2_normalize(x, axis=axis)
@keras_export('keras.backend.in_top_k')
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`.
Arguments:
predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
k: An `int`, number of top elements to consider.
Returns:
A 1D tensor of length `batch_size` and type `bool`.
`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
values of `predictions[i]`.
"""
return nn.in_top_k(predictions, targets, k)
# CONVOLUTIONS
def _preprocess_conv1d_input(x, data_format):
"""Transpose and cast the input before the conv1d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NWC' # to pass TF Conv2dNative operations
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 1)) # NCW -> NWC
else:
tf_data_format = 'NCW'
return x, tf_data_format
def _preprocess_conv2d_input(x, data_format, force_transpose=False):
"""Transpose and cast the input before the conv2d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
force_transpose: Boolean. If True, the input will always be transposed
from NCHW to NHWC if `data_format` is `"channels_first"`.
If False, the transposition only occurs on CPU (GPU ops are
assumed to support NCHW).
Returns:
A tensor.
"""
tf_data_format = 'NHWC'
if data_format == 'channels_first':
if not _has_nchw_support() or force_transpose:
x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = 'NCHW'
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
"""Transpose and cast the input before the conv3d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NDHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 4, 1))
else:
tf_data_format = 'NCDHW'
return x, tf_data_format
def _preprocess_padding(padding):
"""Convert keras' padding to TensorFlow's padding.
Arguments:
padding: string, one of 'same' , 'valid'
Returns:
a string, one of 'SAME', 'VALID'.
Raises:
ValueError: if invalid `padding'`
"""
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding: ' + str(padding))
return padding
@keras_export('keras.backend.conv1d')
def conv1d(x,
kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, one of "channels_last", "channels_first".
dilation_rate: integer dilate rate.
Returns:
A tensor, result of 1D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = kernel.shape.as_list()
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
padding = _preprocess_padding(padding)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.conv2d')
def conv2d(x,
kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 2 integers.
Returns:
A tensor, result of 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv2d_transpose')
def conv2d_transpose(x,
kernel,
output_shape,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D deconvolution (i.e.
transposed convolution).
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: Tuple of 2 integers.
Returns:
A tensor, result of transposed 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
# `atrous_conv2d_transpose` only supports NHWC format, even on GPU.
if data_format == 'channels_first' and dilation_rate != (1, 1):
force_transpose = True
else:
force_transpose = False
x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[1])
if output_shape[0] is None:
output_shape = (shape(x)[0],) + tuple(output_shape[1:])
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
if dilation_rate == (1, 1):
x = nn.conv2d_transpose(x, kernel, output_shape, strides,
padding=padding,
data_format=tf_data_format)
else:
assert dilation_rate[0] == dilation_rate[1]
x = nn.atrous_conv2d_transpose(
x,
kernel,
output_shape,
rate=dilation_rate[0],
padding=padding)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def separable_conv1d(x,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: stride integer.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: integer dilation rate.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NWC':
spatial_start_dim = 1
strides = (1,) + strides * 2 + (1,)
else:
spatial_start_dim = 2
strides = (1, 1) + strides * 2
x = array_ops.expand_dims(x, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)
dilation_rate = (1,) + dilation_rate
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
x = array_ops.squeeze(x, [spatial_start_dim])
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.separable_conv2d')
def separable_conv2d(x,
depthwise_kernel,
pointwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
ValueError: if `strides` is not a tuple of 2 integers.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.depthwise_conv2d')
def depthwise_conv2d(x,
depthwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv3d')
def conv3d(x,
kernel,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1)):
"""3D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 3 integers.
Returns:
A tensor, result of 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def conv3d_transpose(x,
kernel,
output_shape,
strides=(1, 1, 1),
padding='valid',
data_format=None):
"""3D deconvolution (i.e.
transposed convolution).
Arguments:
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor, result of transposed 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[4], output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv3d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
@keras_export('keras.backend.pool2d')
def pool2d(x,
pool_size,
strides=(1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""2D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 2D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_size` is not a tuple of 2 integers.
ValueError: if `strides` is not a tuple of 2 integers.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(pool_size) != 2:
raise ValueError('`pool_size` must be a tuple of 2 integers.')
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.pool3d')
def pool3d(x,
pool_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""3D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 3 integers.
strides: tuple of 3 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 3D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply N-D convolution with un-shared weights.
Arguments:
inputs: (N+2)-D tensor with shape
(batch_size, channels_in, d_in1, ..., d_inN)
if data_format='channels_first', or
(batch_size, d_in1, ..., d_inN, channels_in)
if data_format='channels_last'.
kernel: the unshared weight for N-D convolution,
with shape (output_items, feature_dim, channels_out), where
feature_dim = np.prod(kernel_size) * channels_in,
output_items = np.prod(output_shape).
kernel_size: a tuple of N integers, specifying the
spatial dimensions of the N-D convolution window.
strides: a tuple of N integers, specifying the strides
of the convolution along the spatial dimensions.
output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial
dimensionality of the output.
data_format: string, "channels_first" or "channels_last".
Returns:
An (N+2)-D tensor with shape:
(batch_size, channels_out) + output_shape
if data_format='channels_first', or:
(batch_size,) + output_shape + (channels_out,)
if data_format='channels_last'.
Raises:
ValueError: if `data_format` is neither
`channels_last` nor `channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = int_shape(kernel)
feature_dim = kernel_shape[1]
channels_out = kernel_shape[-1]
ndims = len(output_shape)
spatial_dimensions = list(range(ndims))
xs = []
output_axes_ticks = [range(axis_max) for axis_max in output_shape]
for position in itertools.product(*output_axes_ticks):
slices = [slice(None)]
if data_format == 'channels_first':
slices.append(slice(None))
slices.extend(
slice(position[d] * strides[d], position[d] * strides[d] +
kernel_size[d]) for d in spatial_dimensions)
if data_format == 'channels_last':
slices.append(slice(None))
xs.append(reshape(inputs[slices], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
output = batch_dot(x_aggregate, kernel)
output = reshape(output, output_shape + (-1, channels_out))
if data_format == 'channels_first':
permutation = [ndims, ndims + 1] + spatial_dimensions
else:
permutation = [ndims] + spatial_dimensions + [ndims + 1]
return permute_dimensions(output, permutation)
@keras_export('keras.backend.local_conv1d')
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
"""Apply 1D conv with un-shared weights.
Arguments:
inputs: 3D tensor with shape:
(batch_size, steps, input_dim)
if data_format is "channels_last" or
(batch_size, input_dim, steps)
if data_format is "channels_first".
kernel: the unshared weight for convolution,
with shape (output_length, feature_dim, filters).
kernel_size: a tuple of a single integer,
specifying the length of the 1D convolution window.
strides: a tuple of a single integer,
specifying the stride length of the convolution.
data_format: the data format, channels_first or channels_last.
Returns:
A 3d tensor with shape:
(batch_size, output_length, filters)
if data_format='channels_first'
or 3D tensor with shape:
(batch_size, filters, output_length)
if data_format='channels_last'.
"""
output_shape = (kernel.shape[0],)
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.local_conv2d')
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply 2D conv with un-shared weights.
Arguments:
inputs: 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
kernel: the unshared weight for convolution,
with shape (output_items, feature_dim, filters).
kernel_size: a tuple of 2 integers, specifying the
width and height of the 2D convolution window.
strides: a tuple of 2 integers, specifying the strides
of the convolution along the width and height.
output_shape: a tuple with (output_row, output_col).
data_format: the data format, channels_first or channels_last.
Returns:
A 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
"""
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.bias_add')
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
Arguments:
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
Output tensor.
Raises:
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' %
(len(bias_shape), ndim(x)))
if len(bias_shape) == 1:
if data_format == 'channels_first':
return nn.bias_add(x, bias, data_format='NCHW')
return nn.bias_add(x, bias, data_format='NHWC')
if ndim(x) in (3, 4, 5):
if data_format == 'channels_first':
bias_reshape_axis = (1, bias_shape[-1]) + bias_shape[:-1]
return x + reshape(bias, bias_reshape_axis)
return x + reshape(bias, (1,) + bias_shape)
return nn.bias_add(x, bias)
# RANDOMNESS
@keras_export('keras.backend.random_normal')
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with normal distribution of values.
It is an alias to `tf.random.normal`.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: A float, the mean value of the normal distribution to draw samples.
Default to 0.0.
stddev: A float, the standard deviation of the normal distribution
to draw samples. Default to 1.0.
dtype: `tf.dtypes.DType`, dtype of returned tensor. Default to use Keras
backend dtype which is float32.
seed: Integer, random seed. Will use a random numpy integer when not
specified.
Returns:
A tensor with normal distribution of values.
Example:
>>> random_normal_tensor = tf.keras.backend.random_normal(shape=(2,3),
... mean=0.0, stddev=1.0)
>>> random_normal_tensor
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=...,
dtype=float32)>
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_uniform')
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Returns a tensor with uniform distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
minval: A float, lower boundary of the uniform distribution
to draw samples.
maxval: A float, upper boundary of the uniform distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
Example:
>>> random_uniform_tensor = tf.keras.backend.random_uniform(shape=(2,3),
... minval=0.0, maxval=1.0)
>>> random_uniform_tensor
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=...,
dtype=float32)>
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
@deprecated(None, 'Use `tf.keras.backend.random_bernoulli` instead.')
@keras_export('keras.backend.random_binomial')
def random_binomial(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random binomial distribution of values.
DEPRECATED, use `tf.keras.backend.random_bernoulli` instead.
The binomial distribution with parameters `n` and `p` is the probability
distribution of the number of successful Bernoulli process. Only supports
`n` = 1 for now.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of binomial distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
Example:
>>> random_binomial_tensor = tf.keras.backend.random_binomial(shape=(2,3),
... p=0.5)
>>> random_binomial_tensor
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=...,
dtype=float32)>
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return array_ops.where_v2(
random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
@keras_export('keras.backend.random_bernoulli')
def random_bernoulli(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random bernoulli distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of bernoulli distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
return random_binomial(shape, p, dtype, seed)
@keras_export('keras.backend.truncated_normal')
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution
with specified mean and standard deviation,
except that values whose magnitude is more than
two standard deviations from the mean are dropped and re-picked.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: Mean of the values.
stddev: Standard deviation of the values.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.truncated_normal(
shape, mean, stddev, dtype=dtype, seed=seed)
# CTC
# TensorFlow has a native implementation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in TensorFlow's CTC implementation
@keras_export('keras.backend.ctc_label_dense_to_sparse')
def ctc_label_dense_to_sparse(labels, label_lengths):
"""Converts CTC labels from dense to sparse.
Arguments:
labels: dense CTC labels.
label_lengths: length of the labels.
Returns:
A sparse tensor representation of the labels.
"""
label_shape = array_ops.shape(labels)
num_batches_tns = array_ops.stack([label_shape[0]])
max_num_labels_tns = array_ops.stack([label_shape[1]])
def range_less_than(old_input, current_input):
return array_ops.expand_dims(
math_ops.range(array_ops.shape(old_input)[1]), 0) < array_ops.fill(
max_num_labels_tns, current_input)
init = math_ops.cast(
array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)
dense_mask = functional_ops.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = array_ops.boolean_mask(label_array, dense_mask)
batch_array = array_ops.transpose(
array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0)))
batch_ind = array_ops.boolean_mask(batch_array, dense_mask)
indices = array_ops.transpose(
array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))
vals_sparse = array_ops.gather_nd(labels, indices)
return sparse_tensor.SparseTensor(
math_ops.cast(indices, dtypes_module.int64), vals_sparse,
math_ops.cast(label_shape, dtypes_module.int64))
@keras_export('keras.backend.ctc_batch_cost')
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""Runs CTC loss algorithm on each batch element.
Arguments:
y_true: tensor `(samples, max_string_length)`
containing the truth labels.
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_pred`.
label_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_true`.
Returns:
Tensor with shape (samples,1) containing the
CTC loss of each element.
"""
label_length = math_ops.cast(
array_ops.squeeze(label_length, axis=-1), dtypes_module.int32)
input_length = math_ops.cast(
array_ops.squeeze(input_length, axis=-1), dtypes_module.int32)
sparse_labels = math_ops.cast(
ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32)
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
return array_ops.expand_dims(
ctc.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
@keras_export('keras.backend.ctc_decode')
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""Decodes the output of a softmax.
Can use either greedy search (also known as best path)
or a constrained dictionary search.
Arguments:
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, )` containing the sequence length for
each batch item in `y_pred`.
greedy: perform much faster best-path search if `true`.
This does not use a dictionary.
beam_width: if `greedy` is `false`: a beam search decoder will be used
with a beam of this width.
top_paths: if `greedy` is `false`,
how many of the most probable paths will be returned.
Returns:
Tuple:
List: if `greedy` is `true`, returns a list of one element that
contains the decoded sequence.
If `false`, returns the `top_paths` most probable
decoded sequences.
Important: blank labels are returned as `-1`.
Tensor `(top_paths, )` that contains
the log probability of each decoded sequence.
"""
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
input_length = math_ops.cast(input_length, dtypes_module.int32)
if greedy:
(decoded, log_prob) = ctc.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length)
else:
(decoded, log_prob) = ctc.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths)
decoded_dense = [
sparse_ops.sparse_to_dense(
st.indices, st.dense_shape, st.values, default_value=-1)
for st in decoded
]
return (decoded_dense, log_prob)
# HIGH ORDER FUNCTIONS
@keras_export('keras.backend.map_fn')
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
Arguments:
fn: Callable that will be called upon each element in elems
elems: tensor
name: A string name for the map node in the graph
dtype: Output data type.
Returns:
Tensor with dtype `dtype`.
"""
return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)
@keras_export('keras.backend.foldl')
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
Returns:
Tensor with same type and shape as `initializer`.
"""
return functional_ops.foldl(fn, elems, initializer=initializer, name=name)
@keras_export('keras.backend.foldr')
def foldr(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from right to left.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[-1]` in case of None)
name: A string name for the foldr node in the graph
Returns:
Same type and shape as initializer
"""
return functional_ops.foldr(fn, elems, initializer=initializer, name=name)
# Load Keras default configuration from config file if present.
# Set Keras base dir path given KERAS_HOME env variable, if applicable.
# Otherwise either ~/.keras or /tmp.
if 'KERAS_HOME' in os.environ:
_keras_dir = os.environ.get('KERAS_HOME')
else:
_keras_base_dir = os.path.expanduser('~')
_keras_dir = os.path.join(_keras_base_dir, '.keras')
_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json'))
if os.path.exists(_config_path):
try:
with open(_config_path) as fh:
_config = json.load(fh)
except ValueError:
_config = {}
_floatx = _config.get('floatx', floatx())
assert _floatx in {'float16', 'float32', 'float64'}
_epsilon = _config.get('epsilon', epsilon())
assert isinstance(_epsilon, float)
_image_data_format = _config.get('image_data_format', image_data_format())
assert _image_data_format in {'channels_last', 'channels_first'}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
# Save config file.
if not os.path.exists(_keras_dir):
try:
os.makedirs(_keras_dir)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
'floatx': floatx(),
'epsilon': epsilon(),
'backend': 'tensorflow',
'image_data_format': image_data_format()
}
try:
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
def configure_and_create_distributed_session(distribution_strategy):
"""Configure session config and create a session with it."""
def _create_session(distribution_strategy):
"""Create the Distributed Strategy session."""
session_config = get_default_session_config()
# If a session already exists, merge in its config; in the case there is a
# conflict, take values of the existing config.
global _SESSION
if getattr(_SESSION, 'session', None) and _SESSION.session._config:
session_config.MergeFrom(_SESSION.session._config)
if is_tpu_strategy(distribution_strategy):
# TODO(priyag, yuefengz): Remove this workaround when Distribute
# Coordinator is integrated with keras and we can create a session from
# there.
distribution_strategy.configure(session_config)
master = distribution_strategy.extended._tpu_cluster_resolver.master() # pylint: disable=protected-access
session = session_module.Session(config=session_config, target=master)
else:
worker_context = dc_context.get_current_worker_context()
if worker_context:
dc_session_config = worker_context.session_config
# Merge the default session config to the one from distribute
# coordinator, which is fine for now since they don't have
# conflicting configurations.
dc_session_config.MergeFrom(session_config)
session = session_module.Session(
config=dc_session_config, target=worker_context.master_target)
else:
distribution_strategy.configure(session_config)
session = session_module.Session(config=session_config)
set_session(session)
if distribution_strategy.extended._in_multi_worker_mode():
dc.run_distribute_coordinator(
_create_session,
distribution_strategy,
mode=dc.CoordinatorMode.INDEPENDENT_WORKER)
else:
_create_session(distribution_strategy)
def is_tpu_strategy(strategy):
"""We're executing TPU Strategy."""
return (strategy is not None and
strategy.__class__.__name__.startswith('TPUStrategy'))
def cast_variables_to_tensor(tensors):
def _cast_variables_to_tensor(tensor):
if isinstance(tensor, variables_module.Variable):
return array_ops.identity(tensor)
return tensor
return nest.map_structure(_cast_variables_to_tensor, tensors)
def _is_symbolic_tensor(x):
return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor)
def convert_inputs_if_ragged(inputs):
"""Converts any ragged tensors to dense."""
def _convert_ragged_input(inputs):
if isinstance(inputs, ragged_tensor.RaggedTensor):
return inputs.to_tensor()
return inputs
flat_inputs = nest.flatten(inputs)
contains_ragged = py_any(
isinstance(i, ragged_tensor.RaggedTensor) for i in flat_inputs)
if not contains_ragged:
return inputs, None
inputs = nest.map_structure(_convert_ragged_input, inputs)
# Multiple mask are not yet supported, so one mask is used on all inputs.
# We approach this similarly when using row lengths to ignore steps.
nested_row_lengths = math_ops.cast(flat_inputs[0].nested_row_lengths()[0],
'int32')
return inputs, nested_row_lengths
def maybe_convert_to_ragged(is_ragged_input, output, nested_row_lengths):
"""Converts any ragged input back to its initial structure."""
if not is_ragged_input:
return output
return ragged_tensor.RaggedTensor.from_tensor(output, nested_row_lengths)
class ContextValueCache(weakref.WeakKeyDictionary):
"""Container that caches (possibly tensor) values based on the context.
This class is similar to defaultdict, where values may be produced by the
default factory specified during initialization. This class also has a default
value for the key (when key is `None`) -- the key is set to the the current
graph or eager context. The default factories for key and value are only used
in `__getitem__` and `setdefault`. The `.get()` behavior remains the same.
This object will return the value of the current graph or closest parent graph
if the current graph is a function. This is to reflect the fact that if a
tensor is created in eager/graph, child functions may capture that tensor.
The default factory method may accept keyword arguments (unlike defaultdict,
which only accepts callables with 0 arguments). To pass keyword arguments to
`default_factory`, use the `setdefault` method instead of `__getitem__`.
An example of how this class can be used in different contexts:
```
cache = ContextValueCache(int)
# Eager mode
cache[None] += 2
cache[None] += 4
assert cache[None] == 6
# Graph mode
with tf.Graph().as_default() as g:
cache[None] += 5
cache[g] += 3
assert cache[g] == 8
```
Example of a default factory with arguments:
```
cache = ContextValueCache(lambda x: x + 1)
g = tf.get_default_graph()
# Example with keyword argument.
value = cache.setdefault(key=g, kwargs={'x': 3})
assert cache[g] == 4
```
"""
def __init__(self, default_factory):
self.default_factory = default_factory
weakref.WeakKeyDictionary.__init__(self)
def _key(self):
if context.executing_eagerly():
return _DUMMY_EAGER_GRAPH.key
else:
return ops.get_default_graph()
def _get_parent_graph(self, graph):
"""Returns the parent graph or dummy eager object."""
# TODO(b/149317164): Currently FuncGraphs use ops.get_default_graph() as the
# outer graph. This results in outer_graph always being a Graph,
# even in eager mode (get_default_graph will create a new Graph if there
# isn't a default graph). Because of this bug, we have to specially set the
# key when eager execution is enabled.
parent_graph = graph.outer_graph
if (not isinstance(parent_graph, func_graph.FuncGraph) and
ops.executing_eagerly_outside_functions()):
return _DUMMY_EAGER_GRAPH.key
return parent_graph
def _get_recursive(self, key):
"""Gets the value at key or the closest parent graph."""
value = self.get(key)
if value is not None:
return value
# Since FuncGraphs are able to capture tensors and variables from their
# parent graphs, recursively search to see if there is a value stored for
# one of the parent graphs.
if isinstance(key, func_graph.FuncGraph):
return self._get_recursive(self._get_parent_graph(key))
return None
def __getitem__(self, key):
"""Gets the value at key (or current context), or sets default value.
Args:
key: May be `None` or `Graph`object. When `None`, the key is set to the
current context.
Returns:
Either the cached or default value.
"""
if key is None:
key = self._key()
value = self._get_recursive(key)
if value is None:
value = self[key] = self.default_factory() # pylint:disable=not-callable
return value
def setdefault(self, key=None, default=None, kwargs=None):
"""Sets the default value if key is not in dict, and returns the value."""
if key is None:
key = self._key()
kwargs = kwargs or {}
if default is None and key not in self:
default = self.default_factory(**kwargs)
return weakref.WeakKeyDictionary.setdefault(self, key, default)
# This dictionary holds a mapping {graph: learning_phase}. In eager mode, a
# dummy object is used.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = ContextValueCache(_default_learning_phase)
# This dictionary holds a mapping {graph: set_of_freezable_variables}.
# Each set tracks objects created via `freezable_variable` in the graph.
_FREEZABLE_VARS = ContextValueCache(object_identity.ObjectIdentityWeakSet)
# This dictionary holds a mapping between a graph and variables to initialize
# in the graph.
_GRAPH_VARIABLES = ContextValueCache(object_identity.ObjectIdentityWeakSet)
# This dictionary holds a mapping between a graph and TF optimizers created in
# the graph.
_GRAPH_TF_OPTIMIZERS = ContextValueCache(object_identity.ObjectIdentityWeakSet)
| []
| []
| [
"OMP_NUM_THREADS",
"KERAS_HOME"
]
| [] | ["OMP_NUM_THREADS", "KERAS_HOME"] | python | 2 | 0 | |
game.py | from flask import Flask, request
from twilio.twiml.messaging_response import MessagingResponse
from gameIdGenerator import createNewGameId
from models import Game, Player, Player_Answers, Question
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
import dbManager
import logging
import gameManager
import messageSender
import os
from os.path import join, dirname
from dotenv import load_dotenv
app = Flask(__name__)
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
PRODUCTION_DATABASE_URL = os.environ.get("PRODUCTION_DATABASE_URL")
app.config['SQLALCHEMY_DATABASE_URI'] = PRODUCTION_DATABASE_URL
#app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://localhost/twibbage_db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
@app.route("/", methods=['GET', 'POST'])
def twibbage_game():
#INITIALIZE
from_number = request.values.get('From', None)
msg_body = request.values.get('Body', None)
lcase_msg_body = ''
if from_number is not None and msg_body is not None:
lcase_msg_body = unicode.encode(msg_body.lower())
lcase_msg_body = lcase_msg_body.strip()
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
ACCOUNT_SID = os.environ.get("TWILIO_ACCOUNT_SID")
AUTH_TOKEN = os.environ.get("TWILIO_AUTH_TOKEN")
#Gamestate Variables for testing
already_in_game = True
current_question_id = 1
max_questions = 4
max_players = 2
game_state = "fakeanswertime"
response_string = ""
points_for_correct_guess = 200
points_for_fakeout = 100
someone_just_jerked_around = False
resp = MessagingResponse()
#resp.message("something is wrong")
if lcase_msg_body.startswith("newgame"):
#checkGameState(from_number) or checkInGame()
# Check if from number is already in a game.
if dbManager.checkIfMdnInGame(from_number) == 2:
response_string = "You're already in a game. To exit that game, respond with \"exitgame\""
else:
#lets parse the message and get the max_players and max questions
game_settings = msg_body.split()
player_alias = str(from_number)
try:
max_players = int(game_settings[1])
print("{} requested a max number of players of {}".format(from_number, max_players))
except IndexError:
max_players = 3
print("{} Did not request a maximum number of questions, defaulting to {}".format(from_number, max_players))
try:
max_questions = int(game_settings[2])
print("{} requested a max number of questions of {}".format(from_number, max_questions))
except IndexError:
max_questions = 3
print("{} Did not request a maximum number of questions, defaulting to {}".format(from_number, max_questions))
try:
player_alias = game_settings[3]
print("{} requested a new name of {}".format(from_number, player_alias))
except IndexError:
player_alias = str(from_number)
print("{} Did not request an alias... defaulting to {}".format(from_number, from_number))
#max_questions = msg_body[9:9]
#max_players = msg_body[11:]
#createGame(from_number, num_questions)
new_game = gameManager.createGame(from_number, max_questions, max_players, player_alias)
# creates a new game object, returns
#gameId = "A1B2"
response_string = "\r Starting a new game... \r - Game ID: {} \r - {} Questions \r - {} Players. " \
"\r Tell your so-called friends to text {} to this number to join. Text rules for... rules.".format(new_game, max_questions, max_players, new_game)
#send rules to host.
#gameManager.sendRules(from_number)
elif lcase_msg_body.startswith("exitgame"):
print("********** {} requested to exit the game. Removing player from game.".format(from_number))
#call exitGame(from_number) which should remove the person from a game
player_id = gameManager.removePlayerFromGame(from_number)
#now lets double check to make sure that this person Wasn't the game host.
#dbManager.updateGameState()
if player_id != 0:
#Check to see if the player ID is a host of an active game
if dbManager.isActiveHost(player_id):
print("********** {} was game host. Fully ending the game.".format(from_number))
ended_game = gameManager.endGameByPlayer(player_id)
response_string = "You have been removed. You were host and ended game too."
else:
response_string = "You have been removed from your current game. Bye!"
else:
response_string = "You asked to be removed, but we couldn't find you!"
elif (lcase_msg_body.startswith("rules") or lcase_msg_body.startswith("info")):
#send rules to host.
gameManager.sendRules(from_number)
else:
# So it's not a new game, which means this can be one of 4 things
#1. First we should check to see if the person is in a game
usr_status = dbManager.checkIfMdnInGame(from_number)
#if the user is either not found, or found, but not in a game,
#lets see what they've written
if usr_status == 0 or usr_status ==1:
#we assume the person is joining a game, so lets get the first 5 bytes
game_settings = lcase_msg_body.split()
try:
player_alias = game_settings[1]
print("{} requested a max number of players of {}".format(from_number, max_players))
except IndexError:
#if we're here that means they only entered 1 thing, the game ID
player_alias = from_number
print("{} Did not request a maximum number of questions, defaulting to {}".format(from_number, max_players))
response_string = gameManager.handleGameJoin(lcase_msg_body[:5].upper(),usr_status,from_number,player_alias)
#gameManager.sendRules(from_number)
#ITS AT THIS POINT WELL WANT TO CHECK TO SEE HOW MANY PLAYERS ARE NOW IN ONCE IVE Joined
my_game = dbManager.getActiveGameByPlayerNumber(from_number)
max_players = my_game.max_players
my_game_token = my_game.game_id
my_game_id = my_game.id
player_diff = max_players - dbManager.getPlayerCount(my_game_token)
if player_diff == 0 :
#holy shit it is timeee
resp.message(response_string)
response_string = "OHHH YEAH We're READY TO START THE GAME"
gameManager.startGame(my_game_id)
#if we've joined, and we're now the last player, then lets start the game
else:
#lets get this person's game object.
my_game = dbManager.getActiveGameByPlayerNumber(from_number)
max_players = my_game.max_players
my_game_token = my_game.game_id
my_player = dbManager.getPlayerByMdn(from_number)
#if we're here, then there are 3 possibilities for game state
#1. In The Lobby
if my_game.game_state == "lobby" :
# Still waiitng for pepole to join something = 1
player_diff = max_players - dbManager.getPlayerCount(my_game_token)
response_string = "\r Still waiting for {} player(s). Text rules for... rules".format(player_diff)
# Store off their fake answer in a DB with Question #, Game ID, from_number, realAnswer ==false
elif my_game.game_state == "fakeanswers":
#if it is fake answer time, we should be expecting questions here. So we'll want to store off people's answers
# 0. First lets make sure that I haven't already answered this question
print("Player About to Answer - My current q seq: {}".format(str(my_game.current_question_sequence_number)))
if dbManager.checkIfPlayerAlreadyAnswered(my_game.id, my_game.current_question_sequence_number,my_player.id):
print("Player Already Answered - My current q seq: {}".format(str(my_game.current_question_sequence_number)))
response_string = "You already answered!"
else:
#print("Not Yet Answered - My current q seq: {}".format(str(my_game.current_question_sequence_number)))
#Check if person faked the right answer like a jerkface
if gameManager.fakeAnswerIsRealAnswer(my_game.current_question_id, lcase_msg_body):
response_string = "Well done hotshot... You selected the correct answer. Please reply with a FAKE answer..."
print("{} tried faking the correct answer...".format(from_number))
else:
print("")
# 1. Store off fake answer
dbManager.addPlayerAnswer(my_game.id, my_game.current_question_sequence_number,my_player.id,lcase_msg_body)
response_string = ""
messageSender.sendMessage(from_number, "Thanks for your fake answer! Waiting for other Players to enter theirs...")
#2. Check if I'm the last to answer
answer_count = dbManager.checkNumberPlayerAnswers(my_game.id,my_game.current_question_sequence_number)
player_count = dbManager.getPlayerCount(my_game_token)
answers_missing = player_count - answer_count
print("answers missing: " + str(answers_missing))
# If I'm last to answer,
if answers_missing == 0:
gameManager.moveToGuessTime(my_game.id)
elif my_game.game_state == "guesstime" :
#Get a person's Guess and store a person's guess
player_guess = lcase_msg_body
#check if the person already answered
if dbManager.checkIfPlayerAlreadyGuessed(my_game.id, my_game.current_question_sequence_number,my_player.id):
print("Player Already Guessed - My current q seq: {}".format(str(my_game.current_question_sequence_number)))
response_string = "\r You already Guessed!"
else:
#So this person hasn't submitted a valid guess yet...
#0. Lets get my curent player answer
my_player_answer = dbManager.getPlayerAnswer(my_game.id, my_game.current_question_sequence_number,my_player.id)
#If no, give the person Whos response was selected, a point
guessed_player_answer = dbManager.getPlayerAnswerByGuessId(my_game.id, my_game.current_question_sequence_number, player_guess[:1])
#is this person being an ass?
if lcase_msg_body == my_player_answer.fake_answer_guess_id:
response_string = "Come on now, you can't guess your own answer. Please sumbit another answer."
#is this an invalid answer?
elif lcase_msg_body.isdigit() == False:
response_string = "You just need to enter the NUMBER of the guess you wish to make. Try again. Like 1, or maybe 2!"
else:
#1. Finally... we can Store off guess
dbManager.updatePlayerAnswerGuess(my_player_answer.id, player_guess)
#Is this person's guess the right answer?
if dbManager.checkIfGuessRightAnswer(my_game.current_question_id, player_guess):
dbManager.updatePlayerScore(my_player.id, points_for_correct_guess)
messageSender.sendMessage(from_number, "\r Yay you got it correct! +{} points!".format(str(points_for_correct_guess)))
#Is this not even a valid response number?
elif guessed_player_answer is None:
#well shit, we already allowed him to save off his shit. we should undo thats
dbManager.updatePlayerAnswerGuess(my_player_answer.id, None)
someone_just_jerked_around = True
else:
dbManager.updatePlayerScore(guessed_player_answer.player_id, points_for_fakeout)
#message guesser saying "WRONG"
messageSender.sendMessage(from_number, "\r WRONG! You guessed someone else's fake answer!")
guessed_player_answer_mdn = dbManager.getPlayerMdnById(guessed_player_answer.player_id)
guessed_player_alias = dbManager.getPlayerById(guessed_player_answer.player_id)
#message faker saying someone guessed your shit! +x Points
#messageSender.sendMessage(guessed_player_answer_mdn, "HAHAHAHA. {} guessed your answer! +{} for fakeout!".format(from_number,points_for_fakeout))
messageSender.sendMessage(guessed_player_answer_mdn, "HAHAHAHA. {} guessed your answer! +{} for fakeout!".format(guessed_player_alias.player_name,points_for_fakeout))
if someone_just_jerked_around:
response_string = "You selected an invalid answer. Sry Bro"
else:
#now lets check whether i was the last to answer, then send scoreboard, and shift Gamestate
num_guesses = dbManager.getTotalGuesses(my_game.id,my_game.current_question_sequence_number)
total_players = dbManager.getPlayerCount(my_game_token)
if num_guesses == total_players:
#its time to change game state and send out results of the round
gameManager.sendResults(my_game.id)
game_continuing = gameManager.nextRound(my_game.id)
if not game_continuing:
response_string = "GAME OVER"
else:
response_string = ""
else:
#do nothing really - we're still waiting on other people
response_string = "Waiting for others to guess..."
else:
response_string = ""
return("<h1>Welcome to Twibbage</h1><br/><p>To play, text newgame q p to the number, whwere q is the number of quesitons, and p is the number of players you want in a game.</p>")
#finally, respond.
resp.message(response_string)
return str(resp)
if __name__ == "__main__":
app.run(debug=True)
| []
| []
| [
"PRODUCTION_DATABASE_URL",
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
]
| [] | ["PRODUCTION_DATABASE_URL", "TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"] | python | 3 | 0 | |
cryptoxlib/WebsocketMgr.py | import websockets
import json
import logging
import asyncio
import ssl
import aiohttp
from abc import ABC, abstractmethod
from typing import List, Callable, Any, Optional, Union
from cryptoxlib.version_conversions import async_create_task
from cryptoxlib.exceptions import CryptoXLibException, WebsocketReconnectionException, WebsocketClosed, WebsocketError
LOG = logging.getLogger(__name__)
CallbacksType = List[Callable[..., Any]]
class Websocket(ABC):
def __init__(self):
pass
@abstractmethod
async def connect(self):
pass
@abstractmethod
async def close(self):
pass
@abstractmethod
async def receive(self):
pass
@abstractmethod
async def send(self, message: str):
pass
class FullWebsocket(Websocket):
def __init__(self, websocket_uri: str, builtin_ping_interval: Optional[float] = 20,
max_message_size: int = 2**20, ssl_context: ssl.SSLContext = None):
super().__init__()
self.websocket_uri = websocket_uri
self.builtin_ping_interval = builtin_ping_interval
self.max_message_size = max_message_size
self.ssl_context = ssl_context
self.ws = None
async def connect(self):
if self.ws is not None:
raise CryptoXLibException("Websocket reattempted to make connection while previous one is still active.")
LOG.debug(f"Connecting to websocket {self.websocket_uri}")
self.ws = await websockets.connect(self.websocket_uri,
ping_interval = self.builtin_ping_interval,
max_size = self.max_message_size,
ssl = self.ssl_context)
async def close(self):
if self.ws is None:
raise CryptoXLibException("Websocket attempted to close connection while connection not open.")
await self.ws.close()
self.ws = None
async def receive(self):
if self.ws is None:
raise CryptoXLibException("Websocket attempted to read data while connection not open.")
return await self.ws.recv()
async def send(self, message: str):
if self.ws is None:
raise CryptoXLibException("Websocket attempted to send data while connection not open.")
return await self.ws.send(message)
class AiohttpWebsocket(Websocket):
def __init__(self, websocket_uri: str, builtin_ping_interval: Optional[float] = 20,
max_message_size: int = 2 ** 20, ssl_context: ssl.SSLContext = None):
super().__init__()
self.websocket_uri = websocket_uri
self.builtin_ping_interval = builtin_ping_interval
self.max_message_size = max_message_size
self.ssl_context = ssl_context
self.ws = None
async def connect(self):
if self.ws is not None:
raise CryptoXLibException("Websocket reattempted to make connection while previous one is still active.")
self.session = aiohttp.ClientSession()
self.ws = await self.session.ws_connect(url = self.websocket_uri,
max_msg_size = self.max_message_size,
autoping = True,
heartbeat = self.builtin_ping_interval,
ssl = self.ssl_context)
async def close(self):
if self.ws is None:
raise CryptoXLibException("Websocket attempted to close connection while connection not open.")
await self.ws.close()
await self.session.close()
self.ws = None
self.session = None
async def receive(self):
if self.ws is None:
raise CryptoXLibException("Websocket attempted to read data while connection not open.")
message = await self.ws.receive()
if message.type == aiohttp.WSMsgType.TEXT:
if message.data == 'close cmd':
raise WebsocketClosed(f'Websocket was closed: {message.data}')
else:
return message.data
elif message.type == aiohttp.WSMsgType.CLOSED:
raise WebsocketClosed(f'Websocket was closed: {message.data}')
elif message.type == aiohttp.WSMsgType.ERROR:
raise WebsocketError(f'Websocket error: {message.data}')
async def send(self, message: str):
if self.ws is None:
raise CryptoXLibException("Websocket attempted to send data while connection not open.")
return await self.ws.send_str(message)
class WebsocketOutboundMessage(ABC):
@abstractmethod
def to_json(self):
pass
class ClientWebsocketHandle(object):
def __init__(self, websocket: Websocket):
self.websocket = websocket
async def send(self, message: Union[str, dict, WebsocketOutboundMessage]):
if isinstance(message, str):
pass
elif isinstance(message, dict):
message = json.dumps(message)
elif isinstance(message, WebsocketOutboundMessage):
message = json.dumps(message.to_json())
else:
raise CryptoXLibException("Only string or JSON serializable objects can be sent over the websocket.")
LOG.debug(f"> {message}")
return await self.websocket.send(message)
async def receive(self):
return await self.websocket.receive()
class WebsocketMessage(object):
def __init__(self, subscription_id: Any, message: dict, websocket: ClientWebsocketHandle = None) -> None:
self.subscription_id = subscription_id
self.message = message
self.websocket = websocket
class Subscription(ABC):
def __init__(self, callbacks: CallbacksType = None):
self.callbacks = callbacks
self.subscription_id = None
@abstractmethod
def construct_subscription_id(self) -> Any:
pass
@abstractmethod
def get_subscription_message(self, **kwargs) -> dict:
pass
def get_subscription_id(self) -> Any:
if self.subscription_id is None:
self.subscription_id = self.construct_subscription_id()
LOG.debug(f"New subscription id constructed: {self.subscription_id}")
return self.subscription_id
async def initialize(self, **kwargs) -> None:
pass
async def process_message(self, message: WebsocketMessage) -> None:
await self.process_callbacks(message)
async def process_callbacks(self, message: WebsocketMessage) -> None:
if self.callbacks is not None:
tasks = []
for cb in self.callbacks:
# If message contains a websocket, then the websocket handle will be passed to the callbacks.
# This is useful for duplex websockets
if message.websocket is not None:
tasks.append(async_create_task(cb(message.message, message.websocket)))
else:
tasks.append(async_create_task(cb(message.message)))
await asyncio.gather(*tasks)
class WebsocketMgr(ABC):
def __init__(self, websocket_uri: str, subscriptions: List[Subscription], builtin_ping_interval: Optional[float] = 20,
max_message_size: int = 2**20, periodic_timeout_sec: int = None, ssl_context = None,
auto_reconnect: bool = False, startup_delay_ms: int = 0) -> None:
self.websocket_uri = websocket_uri
self.subscriptions = subscriptions
self.builtin_ping_interval = builtin_ping_interval
self.max_message_size = max_message_size
self.periodic_timeout_sec = periodic_timeout_sec
self.ssl_context = ssl_context
self.auto_reconnect = auto_reconnect
self.startup_delay_ms = startup_delay_ms
@abstractmethod
async def _process_message(self, websocket: Websocket, response: str) -> None:
pass
async def _process_periodic(self, websocket: Websocket) -> None:
pass
def get_websocket_uri_variable_part(self):
return ""
def get_websocket(self) -> Websocket:
return self.get_full_websocket()
def get_full_websocket(self) -> Websocket:
return FullWebsocket(websocket_uri = self.websocket_uri + self.get_websocket_uri_variable_part(),
builtin_ping_interval = self.builtin_ping_interval,
max_message_size = self.max_message_size,
ssl_context = self.ssl_context)
def get_aiohttp_websocket(self) -> Websocket:
return AiohttpWebsocket(websocket_uri = self.websocket_uri + self.get_websocket_uri_variable_part(),
builtin_ping_interval = self.builtin_ping_interval,
max_message_size = self.max_message_size,
ssl_context = self.ssl_context)
async def validate_subscriptions(self) -> None:
pass
async def initialize_subscriptions(self) -> None:
for subscription in self.subscriptions:
await subscription.initialize()
async def _authenticate(self, websocket: Websocket):
pass
async def _subscribe(self, websocket: Websocket):
subscription_messages = []
for subscription in self.subscriptions:
subscription_messages.append(subscription.get_subscription_message())
LOG.debug(f"> {subscription_messages}")
await websocket.send(json.dumps(subscription_messages))
async def main_loop(self, websocket: Websocket):
await self._authenticate(websocket)
await self._subscribe(websocket)
# start processing incoming messages
while True:
message = await websocket.receive()
LOG.debug(f"< {message}")
await self._process_message(websocket, message)
async def periodic_loop(self, websocket: Websocket):
if self.periodic_timeout_sec is not None:
while True:
await self._process_periodic(websocket)
await asyncio.sleep(self.periodic_timeout_sec)
async def run(self) -> None:
await self.validate_subscriptions()
await self.initialize_subscriptions()
try:
# main loop ensuring proper reconnection if required
while True:
LOG.debug(f"Initiating websocket connection.")
websocket = None
try:
# sleep for the requested period before initiating the connection. This is useful when client
# opens many connections at the same time and server cannot handle the load
await asyncio.sleep(self.startup_delay_ms / 1000.0)
LOG.debug(f"Websocket initiation delayed by {self.startup_delay_ms}ms.")
websocket = self.get_websocket()
await websocket.connect()
done, pending = await asyncio.wait(
[async_create_task(self.main_loop(websocket)),
async_create_task(self.periodic_loop(websocket))],
return_when = asyncio.FIRST_EXCEPTION
)
for task in done:
try:
task.result()
except Exception as e:
LOG.debug(f"Websocket processing has led to an exception, all pending tasks will be cancelled.")
for task in pending:
if not task.cancelled():
task.cancel()
if len(pending) > 0:
try:
await asyncio.wait(pending, return_when = asyncio.ALL_COMPLETED)
except asyncio.CancelledError:
await asyncio.wait(pending, return_when = asyncio.ALL_COMPLETED)
raise
finally:
LOG.debug("All pending tasks cancelled successfully.")
raise
except (websockets.ConnectionClosedError,
websockets.ConnectionClosedOK,
websockets.InvalidStatusCode,
ConnectionResetError,
WebsocketClosed,
WebsocketError,
WebsocketReconnectionException) as e:
if self.auto_reconnect:
LOG.info("A recoverable exception has occurred, the websocket will be restarted automatically.")
self._print_subscriptions()
LOG.info(f"Exception: {e}")
else:
raise
finally:
if websocket is not None:
LOG.debug("Closing websocket connection.")
await websocket.close()
except asyncio.CancelledError:
LOG.warning(f"The websocket was requested to be shutdown.")
except Exception:
LOG.error(f"An exception occurred. The websocket manager will be closed.")
self._print_subscriptions()
raise
async def publish_message(self, message: WebsocketMessage) -> None:
for subscription in self.subscriptions:
if subscription.get_subscription_id() == message.subscription_id:
await subscription.process_message(message)
return
LOG.warning(f"Websocket message with subscription id {message.subscription_id} did not identify any subscription!")
def _print_subscriptions(self):
subscription_messages = []
for subscription in self.subscriptions:
subscription_messages.append(subscription.get_subscription_id())
LOG.info(f"Subscriptions: {subscription_messages}") | []
| []
| []
| [] | [] | python | null | null | null |
tests/util/pilot_server.go | // Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"io/ioutil"
"net"
"net/http"
"os"
"strconv"
"time"
"istio.io/pkg/log"
"istio.io/istio/pilot/pkg/bootstrap"
"istio.io/istio/pilot/pkg/serviceregistry"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/keepalive"
"istio.io/istio/pkg/test/env"
"k8s.io/apimachinery/pkg/util/wait"
)
var (
// MockPilotGrpcAddr is the address to be used for grpc connections.
MockPilotGrpcAddr string
// MockPilotSecureAddr is the address to be used for secure grpc connections.
MockPilotSecureAddr string
// MockPilotHTTPPort is the dynamic port for pilot http
MockPilotHTTPPort int
// MockPilotGrpcPort is the dynamic port for pilot grpc
MockPilotGrpcPort int
)
// TearDownFunc is to be called to tear down a test server.
type TearDownFunc func()
// EnsureTestServer will ensure a pilot server is running in process and initializes
// the MockPilotUrl and MockPilotGrpcAddr to allow connections to the test pilot.
func EnsureTestServer(args ...func(*bootstrap.PilotArgs)) (*bootstrap.Server, TearDownFunc) {
server, tearDown, err := setup(args...)
if err != nil {
log.Errora("Failed to start in-process server: ", err)
panic(err)
}
return server, tearDown
}
func setup(additionalArgs ...func(*bootstrap.PilotArgs)) (*bootstrap.Server, TearDownFunc, error) {
// TODO: point to test data directory
// Setting FileDir (--configDir) disables k8s client initialization, including for registries,
// and uses a 100ms scan. Must be used with the mock registry (or one of the others)
// This limits the options -
// When debugging a test or running locally it helps having a static port for /debug
// "0" is used on shared environment (it's not actually clear if such thing exists since
// we run the tests in isolated VMs)
pilotHTTP := os.Getenv("PILOT_HTTP")
if len(pilotHTTP) == 0 {
pilotHTTP = "0"
}
httpAddr := ":" + pilotHTTP
meshConfig := mesh.DefaultMeshConfig()
// Create a test pilot discovery service configured to watch the tempDir.
args := bootstrap.PilotArgs{
Namespace: "testing",
DiscoveryOptions: bootstrap.DiscoveryServiceOptions{
HTTPAddr: httpAddr,
GrpcAddr: ":0",
SecureGrpcAddr: ":0",
EnableProfiling: true,
},
//TODO: start mixer first, get its address
Mesh: bootstrap.MeshArgs{
MixerAddress: "istio-mixer.istio-system:9091",
},
Config: bootstrap.ConfigArgs{
KubeConfig: env.IstioSrc + "/tests/util/kubeconfig",
},
Service: bootstrap.ServiceArgs{
// Using the Mock service registry, which provides the hello and world services.
Registries: []string{
string(serviceregistry.MockRegistry)},
},
MeshConfig: &meshConfig,
MCPMaxMessageSize: 1024 * 1024 * 4,
KeepaliveOptions: keepalive.DefaultOption(),
ForceStop: true,
// TODO: add the plugins, so local tests are closer to reality and test full generation
// Plugins: bootstrap.DefaultPlugins,
}
// Static testdata, should include all configs we want to test.
args.Config.FileDir = env.IstioSrc + "/tests/testdata/config"
bootstrap.PilotCertDir = env.IstioSrc + "/tests/testdata/certs/pilot"
for _, apply := range additionalArgs {
apply(&args)
}
// Create and setup the controller.
s, err := bootstrap.NewServer(args)
if err != nil {
return nil, nil, err
}
stop := make(chan struct{})
// Start the server.
if err := s.Start(stop); err != nil {
return nil, nil, err
}
// Extract the port from the network address.
_, port, err := net.SplitHostPort(s.HTTPListeningAddr.String())
if err != nil {
return nil, nil, err
}
httpURL := "http://localhost:" + port
MockPilotHTTPPort, _ = strconv.Atoi(port)
_, port, err = net.SplitHostPort(s.GRPCListeningAddr.String())
if err != nil {
return nil, nil, err
}
MockPilotGrpcAddr = "localhost:" + port
MockPilotGrpcPort, _ = strconv.Atoi(port)
_, port, err = net.SplitHostPort(s.SecureGRPCListeningAddr.String())
if err != nil {
return nil, nil, err
}
MockPilotSecureAddr = "localhost:" + port
// Wait a bit for the server to come up.
err = wait.Poll(500*time.Millisecond, 5*time.Second, func() (bool, error) {
client := &http.Client{Timeout: 1 * time.Second}
resp, err := client.Get(httpURL + "/ready")
if err != nil {
return false, nil
}
defer func() { _ = resp.Body.Close() }()
_, _ = ioutil.ReadAll(resp.Body)
if resp.StatusCode == http.StatusOK {
return true, nil
}
return false, nil
})
return s, func() {
close(stop)
}, err
}
| [
"\"PILOT_HTTP\""
]
| []
| [
"PILOT_HTTP"
]
| [] | ["PILOT_HTTP"] | go | 1 | 0 | |
test/integration/013_context_var_tests/test_context_vars.py | from nose.plugins.attrib import attr
from test.integration.base import DBTIntegrationTest
import os
class TestContextVars(DBTIntegrationTest):
def setUp(self):
DBTIntegrationTest.setUp(self)
os.environ["DBT_TEST_013_ENV_VAR"] = "1"
os.environ["DBT_TEST_013_USER"] = "root"
os.environ["DBT_TEST_013_PASS"] = "password"
self.fields = [
'this',
'this.name',
'this.schema',
'this.table',
'target.dbname',
'target.host',
'target.name',
'target.port',
'target.schema',
'target.threads',
'target.type',
'target.user',
'target.pass',
'run_started_at',
'invocation_id',
'env_var'
]
@property
def schema(self):
return "context_vars_013"
@property
def models(self):
return "test/integration/013_context_var_tests/models"
@property
def profile_config(self):
return {
'test': {
'outputs': {
# don't use env_var's here so the integration tests can run
# seed sql statements and the like. default target is used
'dev': {
'type': 'postgres',
'threads': 1,
'host': 'database',
'port': 5432,
'user': "root",
'pass': "password",
'dbname': 'dbt',
'schema': self.unique_schema()
},
'prod': {
'type': 'postgres',
'threads': 1,
'host': 'database',
'port': 5432,
# root/password
'user': "{{ env_var('DBT_TEST_013_USER') }}",
'pass': "{{ env_var('DBT_TEST_013_PASS') }}",
'dbname': 'dbt',
'schema': self.unique_schema()
}
},
'target': 'dev'
}
}
def get_ctx_vars(self):
field_list = ", ".join(['"{}"'.format(f) for f in self.fields])
query = 'select {field_list} from {schema}.context'.format(
field_list=field_list,
schema=self.unique_schema())
vals = self.run_sql(query, fetch='all')
ctx = dict([(k, v) for (k, v) in zip(self.fields, vals[0])])
return ctx
@attr(type='postgres')
def test_env_vars_dev(self):
results = self.run_dbt(['run'])
self.assertEqual(len(results), 1)
ctx = self.get_ctx_vars()
self.assertEqual(
ctx['this'],
'"{}"."context__dbt_tmp"'.format(self.unique_schema()))
self.assertEqual(ctx['this.name'], 'context')
self.assertEqual(ctx['this.schema'], self.unique_schema())
self.assertEqual(ctx['this.table'], 'context__dbt_tmp')
self.assertEqual(ctx['target.dbname'], 'dbt')
self.assertEqual(ctx['target.host'], 'database')
self.assertEqual(ctx['target.name'], 'dev')
self.assertEqual(ctx['target.port'], 5432)
self.assertEqual(ctx['target.schema'], self.unique_schema())
self.assertEqual(ctx['target.threads'], 1)
self.assertEqual(ctx['target.type'], 'postgres')
self.assertEqual(ctx['target.user'], 'root')
self.assertEqual(ctx['target.pass'], '')
self.assertEqual(ctx['env_var'], '1')
@attr(type='postgres')
def test_env_vars_prod(self):
results = self.run_dbt(['run', '--target', 'prod'])
self.assertEqual(len(results), 1)
ctx = self.get_ctx_vars()
self.assertEqual(
ctx['this'],
'"{}"."context__dbt_tmp"'.format(self.unique_schema()))
self.assertEqual(ctx['this.name'], 'context')
self.assertEqual(ctx['this.schema'], self.unique_schema())
self.assertEqual(ctx['this.table'], 'context__dbt_tmp')
self.assertEqual(ctx['target.dbname'], 'dbt')
self.assertEqual(ctx['target.host'], 'database')
self.assertEqual(ctx['target.name'], 'prod')
self.assertEqual(ctx['target.port'], 5432)
self.assertEqual(ctx['target.schema'], self.unique_schema())
self.assertEqual(ctx['target.threads'], 1)
self.assertEqual(ctx['target.type'], 'postgres')
self.assertEqual(ctx['target.user'], 'root')
self.assertEqual(ctx['target.pass'], '')
self.assertEqual(ctx['env_var'], '1')
| []
| []
| [
"DBT_TEST_013_USER",
"DBT_TEST_013_PASS",
"DBT_TEST_013_ENV_VAR"
]
| [] | ["DBT_TEST_013_USER", "DBT_TEST_013_PASS", "DBT_TEST_013_ENV_VAR"] | python | 3 | 0 | |
tasks/src/main/java/com/example/task/CreateHttpTaskWithToken.java | /*
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.task;
// [START cloud_tasks_create_http_task_with_token]
import com.google.cloud.tasks.v2beta3.CloudTasksClient;
import com.google.cloud.tasks.v2beta3.HttpMethod;
import com.google.cloud.tasks.v2beta3.HttpRequest;
import com.google.cloud.tasks.v2beta3.OidcToken;
import com.google.cloud.tasks.v2beta3.QueueName;
import com.google.cloud.tasks.v2beta3.Task;
import com.google.protobuf.ByteString;
import java.nio.charset.Charset;
public class CreateHttpTaskWithToken {
public static void main(String[] args) throws Exception {
String projectId = System.getenv("PROJECT_ID");
String queueName = System.getenv("QUEUE_ID");
String location = System.getenv("LOCATION_ID");
String url = System.getenv("URL");
// Instantiates a client.
try (CloudTasksClient client = CloudTasksClient.create()) {
// Variables provided by the system variables.
// projectId = "my-project-id";
// queueName = "my-appengine-queue";
// location = "us-central1";
// url = "https://example.com/tasks/create";
String payload = "hello";
// Construct the fully qualified queue name.
String queuePath = QueueName.of(projectId, location, queueName).toString();
// Add your service account email to construct the OIDC token.
// in order to add an authentication header to the request.
OidcToken.Builder oidcTokenBuilder =
OidcToken.newBuilder().setServiceAccountEmail("<SERVICE_ACCOUNT_EMAIL>");
// Construct the task body.
Task.Builder taskBuilder =
Task.newBuilder()
.setHttpRequest(
HttpRequest.newBuilder()
.setBody(ByteString.copyFrom(payload, Charset.defaultCharset()))
.setHttpMethod(HttpMethod.POST)
.setUrl(url)
.setOidcToken(oidcTokenBuilder)
.build());
// Send create task request.
Task task = client.createTask(queuePath, taskBuilder.build());
System.out.println("Task created: " + task.getName());
}
}
}
// [END cloud_tasks_create_http_task_with_token]
| [
"\"PROJECT_ID\"",
"\"QUEUE_ID\"",
"\"LOCATION_ID\"",
"\"URL\""
]
| []
| [
"URL",
"PROJECT_ID",
"LOCATION_ID",
"QUEUE_ID"
]
| [] | ["URL", "PROJECT_ID", "LOCATION_ID", "QUEUE_ID"] | java | 4 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.