filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
src/main/java/com/animalshelter/StartAnimalShelter.java
|
package com.animalshelter;
import java.sql.Connection;
import java.sql.SQLException;
import org.apache.log4j.Logger;
import com.animalshelter.dao.DatabaseUserDAO;
import com.animalshelter.exception.UserNotFoundException;
import com.animalshelter.model.User;
import com.animalshelter.util.JDBCUtility;
public class StartAnimalShelter {
public static void main(String[] args) throws UserNotFoundException {
System.out.println("Welcome to Animal Shelter Web API");
User user1 = new User();
user1.setFirstName("Andrew");
user1.setLastName("Capp");
System.out.println(user1.toString());
try (Connection connection = JDBCUtility.getConnection()) {
System.out.println("Connection to DB made!");
} catch (SQLException e) {
System.out.println(e.toString());
}
System.out.println(new DatabaseUserDAO().getAllUsers());
// Logger is currently saving logs to Animal Shelter project when run as a Java app
// Logger is saving logs to /Applications/SpringToolSuite4.app/Contents/MacOS when server is running
Logger logger = Logger.getLogger(StartAnimalShelter.class);
logger.debug("Animal ShelterStarted " + StartAnimalShelter.class);
System.out.println(System.getenv("HOME"));
System.out.println(System.getProperty("user.dir"));
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
java
| 1 | 0 | |
core/config/etcd/config_test.go
|
// Copyright 2020
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcd
import (
"encoding/json"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
clientv3 "go.etcd.io/etcd/client/v3"
)
func TestEtcdConfigerProvider_Parse(t *testing.T) {
provider := &EtcdConfigerProvider{}
cfger, err := provider.Parse(readEtcdConfig())
assert.Nil(t, err)
assert.NotNil(t, cfger)
}
func TestEtcdConfiger(t *testing.T) {
provider := &EtcdConfigerProvider{}
cfger, _ := provider.Parse(readEtcdConfig())
subCfger, err := cfger.Sub("sub.")
assert.Nil(t, err)
assert.NotNil(t, subCfger)
subSubCfger, err := subCfger.Sub("sub.")
assert.NotNil(t, subSubCfger)
assert.Nil(t, err)
str, err := subSubCfger.String("key1")
assert.Nil(t, err)
assert.Equal(t, "sub.sub.key", str)
// we cannot test it
subSubCfger.OnChange("watch", func(value string) {
// do nothing
})
defStr := cfger.DefaultString("not_exit", "default value")
assert.Equal(t, "default value", defStr)
defInt64 := cfger.DefaultInt64("not_exit", -1)
assert.Equal(t, int64(-1), defInt64)
defInt := cfger.DefaultInt("not_exit", -2)
assert.Equal(t, -2, defInt)
defFlt := cfger.DefaultFloat("not_exit", 12.3)
assert.Equal(t, 12.3, defFlt)
defBl := cfger.DefaultBool("not_exit", true)
assert.True(t, defBl)
defStrs := cfger.DefaultStrings("not_exit", []string{"hello"})
assert.Equal(t, []string{"hello"}, defStrs)
fl, err := cfger.Float("current.float")
assert.Nil(t, err)
assert.Equal(t, 1.23, fl)
bl, err := cfger.Bool("current.bool")
assert.Nil(t, err)
assert.True(t, bl)
it, err := cfger.Int("current.int")
assert.Nil(t, err)
assert.Equal(t, 11, it)
str, err = cfger.String("current.string")
assert.Nil(t, err)
assert.Equal(t, "hello", str)
tn := &TestEntity{}
err = cfger.Unmarshaler("current.serialize.", tn)
assert.Nil(t, err)
assert.Equal(t, "test", tn.Name)
}
type TestEntity struct {
Name string `yaml:"name"`
Sub SubEntity `yaml:"sub"`
}
type SubEntity struct {
SubName string `yaml:"subName"`
}
func readEtcdConfig() string {
addr := os.Getenv("ETCD_ADDR")
if addr == "" {
addr = "localhost:2379"
}
obj := clientv3.Config{
Endpoints: []string{addr},
DialTimeout: 3 * time.Second,
}
cfg, _ := json.Marshal(obj)
return string(cfg)
}
|
[
"\"ETCD_ADDR\""
] |
[] |
[
"ETCD_ADDR"
] |
[]
|
["ETCD_ADDR"]
|
go
| 1 | 0 | |
renku/cli/_providers/zenodo.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Zenodo API integration."""
import json
import os
import pathlib
import urllib
from urllib.parse import urlparse
import attr
import requests
from requests import HTTPError
from tqdm import tqdm
from renku._compat import Path
from renku.cli._providers.api import ExporterApi, ProviderApi
from renku.cli._providers.doi import DOIProvider
from renku.models.datasets import Dataset, DatasetFile
from renku.utils.doi import is_doi
ZENODO_BASE_URL = 'https://zenodo.org'
ZENODO_SANDBOX_URL = 'https://sandbox.zenodo.org/'
ZENODO_API_PATH = 'api'
ZENODO_DEPOSIT_PATH = 'deposit'
ZENODO_PUBLISH_PATH = 'record'
ZENODO_PUBLISH_ACTION_PATH = 'depositions/{0}/actions/publish'
ZENODO_METADATA_URL = 'depositions/{0}'
ZENODO_FILES_URL = 'depositions/{0}/files'
ZENODO_NEW_DEPOSIT_URL = 'depositions'
def make_records_url(record_id):
"""Create URL to access record by ID."""
return urllib.parse.urljoin(
ZENODO_BASE_URL,
pathlib.posixpath.join(ZENODO_API_PATH, 'records', record_id)
)
def check_or_raise(response):
"""Check for expected response status code."""
if response.status_code not in [200, 201, 202]:
if response.status_code == 401:
raise HTTPError('Access unauthorized - update access token.')
if response.status_code == 400:
err_response = response.json()
errors = [
'"{0}" failed with "{1}"'.format(err['field'], err['message'])
for err in err_response['errors']
]
raise HTTPError('\n' + '\n'.join(errors))
else:
raise HTTPError(response.content)
@attr.s
class ZenodoFileSerializer:
"""Zenodo record file."""
id = attr.ib(default=None, kw_only=True)
checksum = attr.ib(default=None, kw_only=True)
links = attr.ib(default=None, kw_only=True)
filename = attr.ib(default=None, kw_only=True)
filesize = attr.ib(default=None, kw_only=True)
@property
def remote_url(self):
"""Get remote URL as ``urllib.ParseResult``."""
return urllib.parse.urlparse(self.links['download'])
@property
def type(self):
"""Get file type."""
return self.filename.split('.')[-1]
@attr.s
class ZenodoMetadataSerializer:
"""Zenodo metadata."""
access_right = attr.ib(default=None, kw_only=True)
communities = attr.ib(default=None, kw_only=True)
contributors = attr.ib(default=None, kw_only=True)
creators = attr.ib(default=None, kw_only=True)
description = attr.ib(default=None, kw_only=True)
doi = attr.ib(default=None, kw_only=True)
grants = attr.ib(default=None, kw_only=True)
image_type = attr.ib(default=None, kw_only=True)
journal_issue = attr.ib(default=None, kw_only=True)
journal_pages = attr.ib(default=None, kw_only=True)
journal_title = attr.ib(default=None, kw_only=True)
journal_volume = attr.ib(default=None, kw_only=True)
keywords = attr.ib(default=None, kw_only=True)
language = attr.ib(default=None, kw_only=True)
license = attr.ib(default=None, kw_only=True)
notes = attr.ib(default=None, kw_only=True)
prereserve_doi = attr.ib(default=None, kw_only=True)
publication_date = attr.ib(default=None, kw_only=True)
publication_type = attr.ib(default=None, kw_only=True)
references = attr.ib(default=None, kw_only=True)
related_identifiers = attr.ib(default=None, kw_only=True)
title = attr.ib(default=None, kw_only=True)
upload_type = attr.ib(default=None, kw_only=True)
version = attr.ib(default=None, kw_only=True)
def _metadata_converter(data):
"""Convert dict to ZenodoMetadata instance."""
return ZenodoMetadataSerializer(**data)
@attr.s
class ZenodoRecordSerializer:
"""Zenodo record."""
_jsonld = attr.ib(default=None, init=False)
id = attr.ib(default=None, kw_only=True)
doi = attr.ib(default=None, kw_only=True)
doi_url = attr.ib(default=None, kw_only=True)
title = attr.ib(default=None, kw_only=True)
files = attr.ib(default=None, kw_only=True)
links = attr.ib(default=None, kw_only=True)
metadata = attr.ib(
default=None,
kw_only=True,
type=ZenodoMetadataSerializer,
converter=_metadata_converter
)
modified = attr.ib(default=None, kw_only=True)
owner = attr.ib(default=None, kw_only=True)
record_id = attr.ib(default=None, kw_only=True)
state = attr.ib(default=None, kw_only=True)
submitted = attr.ib(default=None, kw_only=True)
created = attr.ib(default=None, kw_only=True)
conceptrecid = attr.ib(default=None, kw_only=True)
conceptdoi = attr.ib(default=None, kw_only=True)
_zenodo = attr.ib(default=None, kw_only=True)
_uri = attr.ib(default=None, kw_only=True)
@property
def version(self):
"""Get record version."""
return self.metadata.version
def is_last_version(self, uri):
"""Check if record is at last possible version."""
if is_doi(uri):
return uri == self.metadata.prereserve_doi['doi']
record_id = self.metadata.prereserve_doi['recid']
return ZenodoProvider.record_id(uri) == record_id
def get_jsonld(self):
"""Get record metadata as jsonld."""
response = self._zenodo.accept_jsonld().make_request(self._uri)
self._jsonld = response.json()
return self._jsonld
def get_files(self):
"""Get Zenodo files metadata as ``ZenodoFile``."""
if len(self.files) == 0:
raise LookupError('no files have been found')
return [ZenodoFileSerializer(**file_) for file_ in self.files]
def as_dataset(self, client):
"""Deserialize `ZenodoRecordSerializer` to `Dataset`."""
files = self.get_files()
metadata = self.get_jsonld()
dataset = Dataset.from_jsonld(metadata, client=client)
serialized_files = []
for file_ in files:
remote_ = file_.remote_url
dataset_file = DatasetFile(
url=remote_,
id=file_.id,
checksum=file_.checksum,
filename=file_.filename,
filesize=file_.filesize,
filetype=file_.type,
dataset=dataset.name,
path='',
)
serialized_files.append(dataset_file)
dataset.files = serialized_files
if isinstance(dataset.url, dict) and '_id' in dataset.url:
dataset.url = urllib.parse.urlparse(dataset.url.pop('_id'))
dataset.url = dataset.url.geturl()
return dataset
@attr.s
class ZenodoDeposition:
"""Zenodo record for deposit."""
exporter = attr.ib()
id = attr.ib(default=None)
@property
def publish_url(self):
"""Returns publish URL."""
url = urllib.parse.urljoin(
self.exporter.zenodo_url,
pathlib.posixpath.join(
ZENODO_API_PATH, ZENODO_DEPOSIT_PATH,
ZENODO_PUBLISH_ACTION_PATH.format(self.id)
)
)
return url
@property
def attach_metadata_url(self):
"""Return URL for attaching metadata."""
url = urllib.parse.urljoin(
self.exporter.zenodo_url,
pathlib.posixpath.join(
ZENODO_API_PATH, ZENODO_DEPOSIT_PATH,
ZENODO_METADATA_URL.format(self.id)
)
)
return url
@property
def upload_file_url(self):
"""Return URL for uploading file."""
url = urllib.parse.urljoin(
self.exporter.zenodo_url,
pathlib.posixpath.join(
ZENODO_API_PATH, ZENODO_DEPOSIT_PATH,
ZENODO_FILES_URL.format(self.id)
)
)
return url
@property
def new_deposit_url(self):
"""Return URL for creating new deposit."""
url = urllib.parse.urljoin(
self.exporter.zenodo_url,
pathlib.posixpath.join(
ZENODO_API_PATH, ZENODO_DEPOSIT_PATH, ZENODO_NEW_DEPOSIT_URL
)
)
return url
@property
def published_at(self):
"""Return published at URL."""
url = urllib.parse.urljoin(
self.exporter.zenodo_url,
pathlib.posixpath.join(ZENODO_PUBLISH_PATH, str(self.id))
)
return url
@property
def deposit_at(self):
"""Return deposit at URL."""
url = urllib.parse.urljoin(
self.exporter.zenodo_url,
pathlib.posixpath.join(ZENODO_DEPOSIT_PATH, str(self.id))
)
return url
def new_deposition(self):
"""Create new deposition on Zenodo."""
response = requests.post(
url=self.new_deposit_url,
params=self.exporter.default_params,
json={},
headers=self.exporter.HEADERS
)
check_or_raise(response)
return response
def upload_file(self, filepath):
"""Upload and attach a file to existing deposition on Zenodo."""
request_payload = {'filename': Path(filepath).name}
file = {'file': open(str(filepath), 'rb')}
response = requests.post(
url=self.upload_file_url,
params=self.exporter.default_params,
data=request_payload,
files=file,
)
check_or_raise(response)
return response
def attach_metadata(self, dataset):
"""Attach metadata to deposition on Zenodo."""
request_payload = {
'metadata': {
'title': dataset.name,
'upload_type': 'dataset',
'description': dataset.description,
'creators': [{
'name': creator.name,
'affiliation': creator.affiliation
} for creator in dataset.creator]
}
}
response = requests.put(
url=self.attach_metadata_url,
params=self.exporter.default_params,
data=json.dumps(request_payload),
headers=self.exporter.HEADERS
)
check_or_raise(response)
return response
def publish_deposition(self, secret):
"""Publish existing deposition."""
response = requests.post(
url=self.publish_url, params=self.exporter.default_params
)
check_or_raise(response)
return response
def __attrs_post_init__(self):
"""Post-Init hook to set _id field."""
response = self.new_deposition()
self.id = response.json()['id']
@attr.s
class ZenodoExporter(ExporterApi):
"""Zenodo export manager."""
HEADERS = {'Content-Type': 'application/json'}
dataset = attr.ib()
access_token = attr.ib()
@property
def zenodo_url(self):
"""Returns correct Zenodo URL based on environment."""
if 'ZENODO_USE_SANDBOX' in os.environ:
return ZENODO_SANDBOX_URL
return ZENODO_BASE_URL
def set_access_token(self, access_token):
"""Set access token."""
self.access_token = access_token
def access_token_url(self):
"""Return endpoint for creation of access token."""
return urllib.parse.urlparse(
'https://zenodo.org/account/settings/applications/tokens/new/'
).geturl()
@property
def default_params(self):
"""Create request default params."""
return {'access_token': self.access_token}
def dataset_to_request(self):
"""Prepare dataset metadata for request."""
jsonld = self.dataset.asjsonld()
jsonld['upload_type'] = 'dataset'
return jsonld
def export(self, publish):
"""Execute entire export process."""
# Step 1. Create new deposition
deposition = ZenodoDeposition(exporter=self)
# Step 2. Upload all files to created deposition
with tqdm(total=len(self.dataset.files)) as progressbar:
for file_ in self.dataset.files:
deposition.upload_file(file_.full_path, )
progressbar.update(1)
# Step 3. Attach metadata to deposition
deposition.attach_metadata(self.dataset)
# Step 4. Publish newly created deposition
if publish:
deposition.publish_deposition(self.access_token)
return deposition.published_at
return deposition.deposit_at
@attr.s
class ZenodoProvider(ProviderApi):
"""zenodo.org registry API provider."""
is_doi = attr.ib(default=False)
_accept = attr.ib(default='application/json')
@staticmethod
def record_id(uri):
"""Extract record id from uri."""
return urlparse(uri).path.split('/')[-1]
def accept_json(self):
"""Receive response as json."""
self._accept = 'application/json'
return self
def accept_jsonld(self):
"""Receive response as jsonld."""
self._accept = 'application/ld+json'
return self
def make_request(self, uri):
"""Execute network request."""
record_id = ZenodoProvider.record_id(uri)
response = requests.get(
make_records_url(record_id), headers={'Accept': self._accept}
)
if response.status_code != 200:
raise LookupError('record not found')
return response
def find_record(self, uri):
"""Retrieves a record from Zenodo.
:raises: ``LookupError``
:param uri: DOI or URL
:return: ``ZenodoRecord``
"""
if self.is_doi:
return self.find_record_by_doi(uri)
return self.get_record(uri)
def find_record_by_doi(self, doi):
"""Resolve the DOI and make a record for the retrieved record id."""
doi = DOIProvider().find_record(doi)
return self.get_record(ZenodoProvider.record_id(doi.url))
def get_record(self, uri):
"""Retrieve record metadata and return ``ZenodoRecord``."""
response = self.make_request(uri)
return ZenodoRecordSerializer(**response.json(), zenodo=self, uri=uri)
def get_exporter(self, dataset, access_token):
"""Create export manager for given dataset."""
return ZenodoExporter(dataset=dataset, access_token=access_token)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
vendor/github.com/cosmos72/gomacro/base/util.go
|
/*
* gomacro - A Go interpreter with Lisp-like macros
*
* Copyright (C) 2017-2018 Massimiliano Ghilardi
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
*
* util.go
*
* Created on: Feb 19, 2017
* Author: Massimiliano Ghilardi
*/
package base
import (
"fmt"
"go/build"
"os"
"path/filepath"
r "reflect"
"strings"
xr "github.com/cosmos72/gomacro/xreflect"
)
func PackValues(val0 r.Value, values []r.Value) []r.Value {
if len(values) == 0 && val0 != None {
values = []r.Value{val0}
}
return values
}
func PackTypes(typ0 xr.Type, types []xr.Type) []xr.Type {
if len(types) == 0 && typ0 != nil {
types = []xr.Type{typ0}
}
return types
}
func PackValuesAndTypes(val0 r.Value, values []r.Value, typ0 xr.Type, types []xr.Type) ([]r.Value, []xr.Type) {
return PackValues(val0, values), PackTypes(typ0, types)
}
func UnpackValues(vals []r.Value) (r.Value, []r.Value) {
val0 := None
if len(vals) > 0 {
val0 = vals[0]
}
return val0, vals
}
// ValueInterface() is a zero-value-safe version of reflect.Value.Interface()
func ValueInterface(v r.Value) interface{} {
if !v.IsValid() || !v.CanInterface() || v == None {
return nil
}
return v.Interface()
}
// ValueType() is a zero-value-safe version of reflect.Value.Type()
func ValueType(value r.Value) r.Type {
if !value.IsValid() || value == None {
return nil
}
return value.Type()
}
func IsNillableKind(k r.Kind) bool {
switch k {
case r.Invalid, // nil is nillable...
r.Chan, r.Func, r.Interface, r.Map, r.Ptr, r.Slice:
return true
default:
return false
}
}
// split 's' into a prefix and suffix separated by 'separator'.
// suffix is trimmed with strings.TrimSpace() before returning it
func Split2(s string, separator rune) (string, string) {
var prefix, suffix string
if space := strings.IndexByte(s, ' '); space > 0 {
prefix = s[:space]
suffix = strings.TrimSpace(s[space+1:])
} else {
prefix = s
}
return prefix, suffix
}
// always use forward slashes. they work also on Windows...
func unixpath(path string) string {
if os.PathSeparator != '/' && len(path) != 0 {
path = strings.Replace(path, string(os.PathSeparator), "/", -1)
}
return path
}
// find user's home directory, see https://stackoverflow.com/questions/2552416/how-can-i-find-the-users-home-dir-in-a-cross-platform-manner-using-c
// without importing "os/user" - which requires cgo to work thus makes cross-compile difficult, see https://github.com/golang/go/issues/11797
func UserHomeDir() string {
home := os.Getenv("HOME")
if len(home) == 0 {
home = os.Getenv("USERPROFILE")
if len(home) == 0 {
home = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
}
}
return unixpath(home)
}
func Subdir(dirs ...string) string {
// should use string(os.PathSeparator) instead of "/', but:
// 1) package names use '/', not os.PathSeparator
// 2) it would complicate DirName()
return strings.Join(dirs, "/")
}
var (
GoPkg = filepath.FromSlash("github.com/gopherdata/gophernotes/vendor/github.com/cosmos72/gomacro")
GoSrcDir = Subdir(filepath.SplitList(build.Default.GOPATH)[0], "src")
GomacroDir = findGomacroDir(GoPkg)
)
func findGomacroDir(pkg string) string {
gopath := build.Default.GOPATH
for _, dir := range filepath.SplitList(gopath) {
path := filepath.Join(dir, "src", pkg)
if _, err := os.Stat(path); err == nil {
return path
}
}
defaultDir := Subdir(GoSrcDir, pkg)
fmt.Printf("// warning: could not find package %q in $GOPATH = %q, assuming package is located in %q\n", pkg, gopath, defaultDir)
return defaultDir
}
|
[
"\"HOME\"",
"\"USERPROFILE\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\""
] |
[] |
[
"HOME",
"HOMEPATH",
"HOMEDRIVE",
"USERPROFILE"
] |
[]
|
["HOME", "HOMEPATH", "HOMEDRIVE", "USERPROFILE"]
|
go
| 4 | 0 | |
samples/snippets/create_data_labeling_job_image_segmentation_sample_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
import pytest
import create_data_labeling_job_image_segmentation_sample
import helpers
API_ENDPOINT = os.getenv("DATA_LABELING_API_ENDPOINT")
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
LOCATION = "us-central1"
DATASET_ID = "5111009432972558336"
INPUTS_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_segmentation_1.0.0.yaml"
DISPLAY_NAME = f"temp_create_data_labeling_job_image_segmentation_test_{uuid.uuid4()}"
INSTRUCTIONS_GCS_URI = (
"gs://ucaip-sample-resources/images/datalabeling_instructions.pdf"
)
ANNOTATION_SPEC = {"color": {"red": 1.0}, "displayName": "rose"}
ANNOTATION_SET_NAME = f"temp_image_segmentation_{uuid.uuid4()}"
@pytest.fixture(scope="function", autouse=True)
def teardown(teardown_data_labeling_job):
yield
# Creating a data labeling job for images
def test_create_data_labeling_job_image_segmentation_sample(capsys, shared_state):
dataset = f"projects/{PROJECT_ID}/locations/{LOCATION}/datasets/{DATASET_ID}"
create_data_labeling_job_image_segmentation_sample.create_data_labeling_job_image_segmentation_sample(
project=PROJECT_ID,
display_name=DISPLAY_NAME,
dataset=dataset,
instruction_uri=INSTRUCTIONS_GCS_URI,
inputs_schema_uri=INPUTS_SCHEMA_URI,
annotation_spec=ANNOTATION_SPEC,
annotation_set_name=ANNOTATION_SET_NAME,
api_endpoint=API_ENDPOINT,
)
out, _ = capsys.readouterr()
# Save resource name of the newly created data labeing job
shared_state["data_labeling_job_name"] = helpers.get_name(out)
|
[] |
[] |
[
"DATA_LABELING_API_ENDPOINT",
"BUILD_SPECIFIC_GCLOUD_PROJECT"
] |
[]
|
["DATA_LABELING_API_ENDPOINT", "BUILD_SPECIFIC_GCLOUD_PROJECT"]
|
python
| 2 | 0 | |
taskcluster/mozillavpn_taskgraph/parameters.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from taskgraph.parameters import extend_parameters_schema
from voluptuous import All, Any, Range, Required
def get_defaults(repo_root):
return {
"pull_request_number": None,
"version": "",
}
extend_parameters_schema(
{
Required("pull_request_number"): Any(All(int, Range(min=1)), None),
Required("version"): str,
},
defaults_fn=get_defaults
)
def get_decision_parameters(graph_config, parameters):
head_tag = parameters["head_tag"]
parameters["version"] = head_tag[1:] if head_tag else ""
pr_number = os.environ.get("MOZILLAVPN_PULL_REQUEST_NUMBER", None)
parameters["pull_request_number"] = None if pr_number is None else int(pr_number)
|
[] |
[] |
[
"MOZILLAVPN_PULL_REQUEST_NUMBER"
] |
[]
|
["MOZILLAVPN_PULL_REQUEST_NUMBER"]
|
python
| 1 | 0 | |
shell.go
|
package main
import (
"fmt"
"io"
"net"
"os"
"os/signal"
"path"
"strings"
"sync"
"syscall"
"time"
"github.com/jessevdk/go-flags"
"github.com/mitchellh/go-homedir"
log "github.com/tillberg/ansi-log"
"github.com/tillberg/autorestart"
"github.com/tillberg/bismuth"
)
var OptsCommon struct {
Verbose bool `short:"v" long:"verbose" description:"Show verbose debug information"`
Version bool `long:"version" description:"Print gut-sync version"`
NoColor bool `long:"no-color" description:"Disable ANSI colors"`
}
var OptsSync struct {
IdentityFile string `short:"i" long:"identity"`
Positional struct {
LocalPath string
} `positional-args:"yes" required:"yes"`
}
type FileEvent struct {
ctx *SyncContext
filepath string
}
const shutdownChanLen = 20
var shutdownChan = make(chan bool, shutdownChanLen)
func IsShuttingDown() bool {
select {
case <-shutdownChan:
return true
default:
return false
}
}
const commitDebounceDuration = 100 * time.Millisecond
const reconnectMinDelay = 2 * time.Second
func (ctx *SyncContext) StartReverseTunnel(srcAddr string, destAddr string) (reconnectChan chan bool, err error) {
isFirstTime := true
firstTimeChan := make(chan error)
go func() {
logger := ctx.Logger()
lastConnectStartTime := time.Now()
for {
listener, tunnelErrChan, err := ctx.ReverseTunnel(srcAddr, destAddr)
if isFirstTime {
firstTimeChan <- err
isFirstTime = false
} else {
reconnectChan <- true
}
if err == nil {
err = <-tunnelErrChan
}
if IsShuttingDown() {
logger.Printf("@(dim)Reverse tunnel exiting (shutting down).@(r)\n")
return
}
if err == io.EOF {
logger.Printf("@(error:Connection lost.)\n")
} else {
logger.Printf("@(error:Encountered error on reverse-tunnel: %v)\n", err)
}
if listener != nil {
listener.Close() // Ignore any errors; it might already be closed.
}
reconnectLogger := ctx.NewLogger("")
reconnectStart := time.Now()
elapsedSeconds := func() int {
return int(time.Since(reconnectStart).Seconds())
}
for {
reconnectLogger.Replacef("@(dim)Reconnecting (%ds)...@(r)", elapsedSeconds())
// Rate-limit calls to Connect. The delay should be zero on timeout errors, assuming that the
// network timeout in bismuth is greater than reconnectMinDelay.
time.Sleep(reconnectMinDelay - time.Since(lastConnectStartTime))
lastConnectStartTime = time.Now()
err = ctx.Connect()
if err != nil {
squelch := false
netErr, ok := err.(net.Error)
if ok && netErr.Timeout() {
squelch = true
}
errStr := err.Error()
if strings.Contains(errStr, "no route to host") {
squelch = true
}
if strings.Contains(errStr, "connection refused") {
squelch = true
}
if !squelch {
logger.Printf("@(dim:Error while reconnecting: %v)\n", err)
}
} else {
reconnectLogger.Replacef("@(dim:Connection re-established after %d seconds.)\n", elapsedSeconds())
break
}
}
reconnectLogger.Close()
}
}()
reconnectChan = make(chan bool)
err = <-firstTimeChan
return reconnectChan, err
}
const reconnectBufferLength = 2
const eventBufferLength = 100
const forceFullSyncCheckString = "**force full sync check**"
func Sync(local *SyncContext, remotes []*SyncContext) (err error) {
status := local.NewLogger("sync")
defer status.Close()
allContexts := append([]*SyncContext{local}, remotes...)
hostsStrs := []string{}
for _, ctx := range allContexts {
hostsStrs = append(hostsStrs, ctx.SyncPathAnsi())
}
hostsStr := JoinWithAndAndCommas(hostsStrs...)
status.Printf("@(dim:Starting gut-sync between) %s@(dim:.)\n", hostsStr)
for _, ctx := range allContexts {
_, err = EnsureBuild(local, ctx)
if err != nil {
status.Bail(err)
}
}
ports, err := FindOpenPorts(1, allContexts...)
if err != nil {
status.Bail(err)
}
// status.Printf("Using ports %v\n", ports)
gutdPort := ports[0]
gutdAddr := fmt.Sprintf("localhost:%d", gutdPort)
repoName := RandSeq(8) + local.getPidfileScope()
eventChan := make(chan FileEvent, eventBufferLength)
// Start up gut-daemon on the local host, and create a reverse tunnel from each of the remote hosts
// back to the local gut-daemon. All hosts can connect to gut-daemon at localhost:<gutdPort>, which
// makes configuration a little simpler.
ready := make(chan bool)
numTasks := 0
goTask := func(taskCtx *SyncContext, fn func(*SyncContext)) {
numTasks++
go func() {
fn(taskCtx)
ready <- true
}()
}
joinTasks := func() {
for numTasks > 0 {
<-ready
numTasks--
}
}
if len(remotes) > 0 {
goTask(local, func(taskCtx *SyncContext) {
err := taskCtx.GutDaemon(repoName, gutdPort)
if err != nil {
status.Bail(err)
}
})
}
for _, ctx := range remotes {
if !ctx.IsLocal() {
goTask(ctx, func(taskCtx *SyncContext) {
reconnectChan, err := taskCtx.StartReverseTunnel(gutdAddr, gutdAddr)
if err != nil {
status.Bail(err)
}
go func() {
for {
<-reconnectChan
eventChan <- FileEvent{taskCtx, forceFullSyncCheckString}
}
}()
})
}
}
joinTasks()
// Fetch the tail hash for all contexts in parallel
for _, ctx := range allContexts {
goTask(ctx, func(taskCtx *SyncContext) {
taskCtx.UpdateTailHash()
})
}
joinTasks()
// Iterate over the contexts, finding the common tailHash, if any. Bail if there are conflicting tailHashes.
tailHash := ""
var tailHashFoundOn *SyncContext
localTailHash := local.GetTailHash()
if localTailHash != "" {
tailHash = localTailHash
tailHashFoundOn = local
}
contextsNeedInit := []*SyncContext{}
for _, ctx := range remotes {
myTailHash := ctx.GetTailHash()
if myTailHash == "" {
err = ctx.AssertSyncFolderIsEmpty()
if err != nil {
status.Bail(err)
}
contextsNeedInit = append(contextsNeedInit, ctx)
} else {
if tailHash == "" {
tailHash = myTailHash
tailHashFoundOn = ctx
} else {
if tailHash != myTailHash {
status.Printf("@(error:Found different gut repo base commits:)\n")
status.Printf("@(commit:%s) @(error:at) %s\n",
TrimCommit(tailHash), tailHashFoundOn.SyncPathAnsi())
status.Printf("@(commit:%s) @(error:at) %s\n",
TrimCommit(myTailHash), ctx.SyncPathAnsi())
Shutdown(status.Colorify("@(error:Cannot sync incompatible gut repos.)"), 1)
}
goTask(ctx, func(taskCtx *SyncContext) {
err := taskCtx.GutSetupOrigin(repoName, gutdPort)
if err != nil {
status.Bail(err)
}
})
}
}
}
// If local needs to be initialized, do so, either from scratch or by bootstrapping from tailHashFoundOn.
if localTailHash == "" {
if tailHash == "" {
status.Printf("@(dim:No existing gut repo found. Initializing gut repo in %s.)\n", local.SyncPathAnsi())
err = local.GutInit()
if err != nil {
status.Bail(err)
}
err = local.GutSetupOrigin(repoName, gutdPort)
if err != nil {
status.Bail(err)
}
err = local.GutEnsureInitialCommit()
if err != nil {
status.Bail(err)
}
local.UpdateTailHash()
tailHash = local.GetTailHash()
if tailHash == "" {
Shutdown(status.Colorify("Failed to initialize new gut repo."), 1)
}
tailHashFoundOn = local
} else {
err = local.GutInit()
if err != nil {
status.Bail(err)
}
err = local.GutSetupOrigin(repoName, gutdPort)
if err != nil {
status.Bail(err)
}
joinTasks() // Wait for GutSetupOrigin on tailHashFoundOn to finish
err = tailHashFoundOn.GutPush()
if err != nil {
status.Bail(err)
}
err = local.GutCheckoutAsMaster(tailHashFoundOn.BranchName())
if err != nil {
status.Bail(err)
}
}
} else {
goTask(local, func(taskCtx *SyncContext) {
err := taskCtx.GutSetupOrigin(repoName, gutdPort)
if err != nil {
status.Bail(err)
}
})
}
// Bootstrap any non-local contexts that need it:
for _, ctx := range contextsNeedInit {
goTask(ctx, func(taskCtx *SyncContext) {
err := taskCtx.GutInit()
if err != nil {
status.Bail(err)
}
err = taskCtx.GutSetupOrigin(repoName, gutdPort)
if err != nil {
status.Bail(err)
}
err = taskCtx.GutPull()
if err != nil {
status.Bail(err)
}
})
}
joinTasks()
commitScoped := func(src *SyncContext, changedPaths []string, updateUntracked bool) (changed bool, err error) {
prefix := CommonPathPrefix(changedPaths...)
if prefix != "" {
// git is annoying if you try to git-add git-ignored files (printing a message that is very helpful when there is a human
// attached to stdin/stderr), so let's always just target the last *folder* by lopping off everything after the last slash:
lastIndex := strings.LastIndex(prefix, "/")
if lastIndex == -1 {
prefix = ""
} else {
prefix = prefix[:lastIndex+1]
}
}
if prefix == "" {
prefix = "."
}
changed, err = src.GutCommit(prefix, updateUntracked)
if err != nil {
return false, err
}
return changed, nil
}
// Start up an instance of fswatch/inotifywait for each context to watch for file changes
for _, ctx := range allContexts {
goTask(ctx, func(taskCtx *SyncContext) {
taskCtx.WatchForChanges(func(filepath string) {
eventChan <- FileEvent{taskCtx, filepath}
})
})
}
joinTasks()
var haveChanges bool
var changedPaths map[*SyncContext]map[string]bool
var changedIgnore map[*SyncContext]bool
var forceSyncCheck bool
clearChanges := func() {
haveChanges = false
changedPaths = make(map[*SyncContext]map[string]bool)
changedIgnore = make(map[*SyncContext]bool)
forceSyncCheck = false
}
clearChanges()
flushChanges := func() {
// Flush all file changes, in three phases:
// - Commit on all nodes that have seen recent changes
// - Push and merge all changes to the local master
// - Pull changes back out to the remotes.
// First phase, Commit.
// (This is typically just one context, except at startup, when we create a pseudo-change event for each context.)
changedCtxChan := make(chan *SyncContext)
for ctx, pathMap := range changedPaths {
go func(taskCtx *SyncContext, taskPathMap map[string]bool) {
paths := []string{}
for path := range taskPathMap {
paths = append(paths, path)
}
_, changedThisIgnore := changedIgnore[taskCtx]
// log.Printf("Starting commitScoped on %s\n", taskCtx.NameAnsi())
changed, err := commitScoped(taskCtx, paths, changedThisIgnore)
// log.Printf("Finished commitScoped on %s\n", taskCtx.NameAnsi())
if err != nil {
status.Printf("@(error:Commit failed on) %s@(error:: %v)\n", taskCtx.NameAnsi(), err)
changedCtxChan <- nil
} else {
if changed {
changedCtxChan <- taskCtx
} else {
changedCtxChan <- nil
}
}
}(ctx, pathMap)
}
changedCtxs := []*SyncContext{}
for _ = range changedPaths {
ctx := <-changedCtxChan
if ctx != nil {
changedCtxs = append(changedCtxs, ctx)
}
}
if !forceSyncCheck && len(changedCtxs) == 0 {
clearChanges()
return
}
clearChanges()
// Second phase, Push to local.
// XXX if remote has a previous change (i.e. from when it was the local), we don't necessarily pick up that change here.
for _, ctx := range changedCtxs {
if ctx != local {
// log.Printf("Starting GutPush on %s\n", ctx.NameAnsi())
err = ctx.GutPush()
// log.Printf("Finished GutPush on %s\n", ctx.NameAnsi())
if err != nil {
status.Printf("@(error:Failed to push changes from) %s @(error:to local: %v)\n", ctx.NameAnsi(), err)
continue
}
// log.Printf("Starting GutMerge on %s\n", ctx.NameAnsi())
err = local.GutMerge(ctx.BranchName())
// log.Printf("Finished GutMerge on %s\n", ctx.NameAnsi())
if err != nil {
status.Printf("@(error:Failed to merge) %s @(error:into) master@(error:: %v)\n", ctx.BranchName(), err)
}
}
}
masterCommitChan := make(chan string, len(remotes))
go func() {
masterCommit, err := local.GutRevParseHead()
if err != nil {
status.Printf("@(error:Failed to rev-parse head on local: %v)\n", err)
masterCommit = ""
}
for i := 0; i < len(remotes); i++ {
masterCommitChan <- masterCommit
}
}()
// Third phase, Pull to remotes.
done := make(chan error)
for _, ctx := range remotes {
go func(taskCtx *SyncContext) {
if !taskCtx.IsConnected() {
status.Printf("@(dim:Skipping sync to disconnected remote) %s\n", taskCtx.NameAnsi())
done <- nil
return
}
// log.Printf("Starting GutRevParseHead on %s\n", taskCtx.NameAnsi())
myCommit, err := taskCtx.GutRevParseHead()
// log.Printf("Finished GutRevParseHead on %s\n", taskCtx.NameAnsi())
if err != nil {
done <- err
return
}
localMasterCommit := <-masterCommitChan
if localMasterCommit != "" && myCommit != localMasterCommit {
// log.Printf("Starting GutPull on %s\n", taskCtx.NameAnsi())
err = taskCtx.GutPull()
// log.Printf("Finished GutPull on %s\n", taskCtx.NameAnsi())
}
// log.Printf("Finished third phase on %s\n", taskCtx.NameAnsi())
done <- err
}(ctx)
}
for _, ctx := range remotes {
select {
case err = <-done:
if err == NeedsCommitError {
status.Printf("@(dim:Need to commit on) %s @(dim:before it can pull.)\n", ctx.NameAnsi())
go func() {
eventChan <- FileEvent{ctx, forceFullSyncCheckString}
}()
err = nil
}
if err != nil {
status.Printf("@(error:Failed to pull changes to) %s@(error:: %v)\n", ctx.NameAnsi(), err)
}
case <-time.After(60 * time.Second):
status.Printf("@(warn:Timed out while waiting for a remote to finish syncing.)\n")
}
}
}
go func() {
// Note: The filesystem watchers are not necessarily listening to all updates yet, so we could miss file changes that occur between
// the commit_and_update calls below and the time that the filesystem watches are attached.
for _, ctx := range allContexts {
// Queue up an event to force checking for changes.
eventChan <- FileEvent{ctx, forceFullSyncCheckString}
}
}()
// Process messages from eventChan forever. Read as many messages as possible before needing to wait at least
// commitDebounceDuration, at which point we flush all the events (and commit & sync changes, etc).
var event FileEvent
for {
if haveChanges {
select {
case event = <-eventChan:
break
case <-time.After(commitDebounceDuration):
flushChanges()
continue
}
} else {
event = <-eventChan
}
if event.filepath == forceFullSyncCheckString {
// Force an attempt to update all the remotes, even if there are no new commits.
forceSyncCheck = true
// And also force a full commit & update-untracked on this node
changedIgnore[event.ctx] = true
}
parts := strings.Split(event.filepath, "/")
skip := false
for _, part := range parts {
if part == ".gut" {
skip = true
} else if part == ".gutignore" {
changedIgnore[event.ctx] = true
}
}
if skip {
continue
}
// status.Printf("@(dim:[)%s@(dim:] changed on) %s\n", event.filepath, event.ctx.NameAnsi())
haveChanges = true
ctxChanged, ok := changedPaths[event.ctx]
if !ok {
ctxChanged = make(map[string]bool)
changedPaths[event.ctx] = ctxChanged
}
ctxChanged[event.filepath] = true
}
}
var shutdownLock sync.Mutex
func Shutdown(reason string, exitcode int) {
shutdownLock.Lock()
for i := 0; i < shutdownChanLen; i++ {
shutdownChan <- true
}
status := log.New(os.Stderr, "", 0)
if reason != "" {
status.Printf("%s ", reason)
}
status.Printf("Stopping all subprocesses...\n")
done := make(chan bool)
for _, _ctx := range AllSyncContexts {
go func(ctx *SyncContext) {
if ctx.IsConnected() {
ctx.KillAllSessions()
// This generally shouldn't *do* anything other than
// clean up the PID files, as the killing would have
// been done already in KillAllSessions.
ctx.KillAllViaPidfiles()
ctx.Close()
}
done <- true
}(_ctx)
}
for range AllSyncContexts {
select {
case <-done:
case <-time.After(3 * time.Second):
}
}
status.Printf("Exiting.")
os.Stderr.WriteString("\n")
os.Exit(exitcode)
}
func printUsageInfoAndExit() {
status := log.New(os.Stderr, "", 0)
status.Println("")
status.Println("Usage: gut sync [option]... path [{ [user@]host:path | path }]...")
status.Println("")
status.Println("Options:")
status.Println(" --no-color: Disable ANSI colors")
status.Println(" --verbose: Show all commands executed")
status.Println(" --build-deps: Build gut-commands from git source instead of downloading tarball")
status.Println("--build-parallel: Build gut-commands in parallel via make -j {num_cores}")
status.Println("")
status.Println("Examples:")
status.Println(" Sync folder with one remote: gut sync ~/stuff/ [email protected]:~/stuff/")
status.Println(" Sync folder with two remotes: gut sync stuff/ remotehost1.com:~/work/ [email protected]:/tmp/sync")
status.Println(" Sync folders locally: gut sync ~/mywork /mnt/backup/mywork/")
status.Println("Just track changes, no syncing: gut sync ~/mywork")
status.Println("")
os.Exit(0)
}
func main() {
log.EnableColorTemplate()
log.AddAnsiColorCode("error", 31)
log.AddAnsiColorCode("commit", 32)
log.AddAnsiColorCode("path", 36)
var args []string = os.Args[1:]
if len(args) == 0 {
fmt.Println("You must specify a gut-command, e.g. `gut sync ...`")
os.Exit(1)
}
var cmd = args[0]
if IsGitCommand(cmd) {
if IsDangerousGitCommand(cmd) {
if len(args) < 2 || args[1] != "--danger" {
status := log.New(os.Stderr, "", 0)
status.Printf("@(dim:In order to prevent damage caused by accidentally using `)gut %s ...@(dim:`)\n", cmd)
status.Printf("@(dim:in cases where `)git %s ...@(dim:` was intended, you must append `)--danger@(dim:`)\n", cmd)
status.Printf("@(dim:immediately after the command, i.e. `)gut %s --danger ...@(dim:`.)\n", cmd)
status.Printf("@(dim:Alternatively, you could invoke) gut @(dim:directly at) @(path:%s)@(dim:.)\n", GutExePath)
status.Printf("@(dim:The commands that require this flag are:) %s\n", strings.Join(DangerousGitCommands, " "))
os.Exit(1)
}
// Split the "--danger" flag out before handing off the args list to the gut-command:
if len(args) > 2 {
args = append(args[:1], args[2:]...)
} else {
args = args[:1]
}
}
homeDir, err := homedir.Dir()
if err != nil {
log.Bail(err)
}
var gutExe = path.Join(homeDir, GutExePath[2:])
syscall.Exec(gutExe, append([]string{gutExe}, args...), os.Environ())
fmt.Printf("Failed to exec %s", gutExe)
os.Exit(1)
}
autorestart.CleanUpChildZombiesQuietly()
go autorestart.RestartOnChange()
status := log.New(os.Stderr, "", 0)
args = args[1:]
parser := flags.NewParser(&OptsCommon, flags.IgnoreUnknown)
var argsRemaining, err = parser.ParseArgs(args)
if err != nil {
printUsageInfoAndExit()
}
if OptsCommon.NoColor {
log.DisableColor()
}
if OptsCommon.Version {
status.Printf("gut-sync %s\n", GutVersion)
os.Exit(0)
}
bismuth.SetVerbose(OptsCommon.Verbose)
go func() {
sigintChan := make(chan os.Signal, 1)
signal.Notify(sigintChan, os.Interrupt)
<-sigintChan
Shutdown("Received SIGINT.", 1)
}()
go func() {
sighupChan := autorestart.NotifyOnSighup()
<-sighupChan
Shutdown("Received SIGHUP.", 0)
}()
if cmd == "build" {
var local = NewSyncContext()
err := local.Connect()
if err != nil {
status.Bail(err)
}
err = local.CheckLocalDeps()
if err != nil {
status.Bail(err)
}
didSomething, err := EnsureBuild(local, local)
if err != nil {
status.Bail(err)
}
if !didSomething {
status.Printf("@(dim:gut) " + GitVersion + " @(dim:has already been built.)\n")
}
} else if cmd == "sync" {
var remoteArgs, err = flags.ParseArgs(&OptsSync, argsRemaining)
if err != nil {
printUsageInfoAndExit()
}
ready := make(chan error)
local := NewSyncContext()
err = local.ParseSyncPath(OptsSync.Positional.LocalPath)
if err != nil {
status.Bail(err)
}
if len(remoteArgs) > 0 && os.Getenv("SSH_AUTH_SOCK") == "" {
log.Printf("@(error:SSH_AUTH_SOCK is not set in environment. Start up an ssh agent first before running gut-sync.)\n")
Shutdown("", 1)
}
go func() {
err = local.Connect()
if err != nil {
status.Bail(err)
}
err = local.CheckLocalDeps()
if err != nil {
status.Bail(err)
}
local.KillAllViaPidfiles()
local.SaveDaemonPid("gut", os.Getpid())
ready <- nil
}()
remotes := []*SyncContext{}
for _, remotePath := range remoteArgs {
remote := NewSyncContext()
remotes = append(remotes, remote)
err = remote.ParseSyncPath(remotePath)
if err != nil {
status.Bail(err)
}
go func(_remote *SyncContext) {
err = _remote.Connect()
if err != nil {
status.Printf("@(error:Failed to connect to %s: %s)\n", remote.Hostname(), err)
Shutdown("", 1)
}
_remote.KillAllViaPidfiles()
err = _remote.CheckRemoteDeps()
if err != nil {
status.Bail(err)
}
ready <- nil
}(remote)
}
for i := 0; i < len(remotes)+1; i++ {
<-ready
}
err = Sync(local, remotes)
if err != nil {
status.Bail(err)
}
}
}
|
[
"\"SSH_AUTH_SOCK\""
] |
[] |
[
"SSH_AUTH_SOCK"
] |
[]
|
["SSH_AUTH_SOCK"]
|
go
| 1 | 0 | |
samples/oauth/oauth_on_appengine/appengine_utilities/flash.py
|
"""
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import sys
import Cookie
import pickle
from time import strftime
from django.utils import simplejson
COOKIE_NAME = 'appengine-utilities-flash'
class Flash(object):
"""
Send messages to the user between pages.
When you instantiate the class, the attribute 'msg' will be set from the
cookie, and the cookie will be deleted. If there is no flash cookie, 'msg'
will default to None.
To set a flash message for the next page, simply set the 'msg' attribute.
Example psuedocode:
if new_entity.put():
flash = Flash()
flash.msg = 'Your new entity has been created!'
return redirect_to_entity_list()
Then in the template on the next page:
{% if flash.msg %}
<div class="flash-msg">{{ flash.msg }}</div>
{% endif %}
"""
def __init__(self, cookie=None):
"""
Load the flash message and clear the cookie.
"""
self.no_cache_headers()
# load cookie
if cookie is None:
browser_cookie = os.environ.get('HTTP_COOKIE', '')
self.cookie = Cookie.SimpleCookie()
self.cookie.load(browser_cookie)
else:
self.cookie = cookie
# check for flash data
if self.cookie.get(COOKIE_NAME):
# set 'msg' attribute
cookie_val = self.cookie[COOKIE_NAME].value
# we don't want to trigger __setattr__(), which creates a cookie
try:
self.__dict__['msg'] = simplejson.loads(cookie_val)
except:
# not able to load the json, so do not set message. This should
# catch for when the browser doesn't delete the cookie in time for
# the next request, and only blanks out the content.
pass
# clear the cookie
self.cookie[COOKIE_NAME] = ''
self.cookie[COOKIE_NAME]['path'] = '/'
self.cookie[COOKIE_NAME]['expires'] = 0
print self.cookie[COOKIE_NAME]
else:
# default 'msg' attribute to None
self.__dict__['msg'] = None
def __setattr__(self, name, value):
"""
Create a cookie when setting the 'msg' attribute.
"""
if name == 'cookie':
self.__dict__['cookie'] = value
elif name == 'msg':
self.__dict__['msg'] = value
self.__dict__['cookie'][COOKIE_NAME] = simplejson.dumps(value)
self.__dict__['cookie'][COOKIE_NAME]['path'] = '/'
print self.cookie
else:
raise ValueError('You can only set the "msg" attribute.')
def no_cache_headers(self):
"""
Adds headers, avoiding any page caching in the browser. Useful for highly
dynamic sites.
"""
print "Expires: Tue, 03 Jul 2001 06:00:00 GMT"
print strftime("Last-Modified: %a, %d %b %y %H:%M:%S %Z")
print "Cache-Control: no-store, no-cache, must-revalidate, max-age=0"
print "Cache-Control: post-check=0, pre-check=0"
print "Pragma: no-cache"
|
[] |
[] |
[
"HTTP_COOKIE"
] |
[]
|
["HTTP_COOKIE"]
|
python
| 1 | 0 | |
dist-test/tf-dist-bak.py
|
import datetime
import numpy as np
import os
import argparse
import tensorflow as tf
epoch = 10
def main(jtg, tig, is_chief):
# Create train data
train_X = np.linspace(-1, 1, 100)
train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.33 + 10
learning_rate = 0.01
start_training_time = datetime.datetime.now()
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Exampmle: {"cluster": {"ps": ["127.0.0.1:3001"], "worker": ["127.0.0.1:3002", "127.0.0.1:3003"]}, "task": {"index": 0, "type": "worker"}}
#env = json.loads(os.environ.get("TF_CONFIG", "{}"))
#task_data = env.get("task", None)
cluster_spec = {'ps': ['128.135.24.250:1111','128.135.24.251:1111'], 'worker': ['128.135.24.252:1111', '128.135.24.253:1111']}
task_type = jtg
task_index = tig
cluster = tf.train.ClusterSpec(cluster_spec)
server = tf.train.Server(cluster,
job_name=task_type,
task_index=task_index)
if task_type == "ps":
server.join()
elif task_type == "worker":
with tf.device(tf.train.replica_device_setter(
worker_device="/job:{}/task:{}".format(task_type, task_index),
cluster=cluster)):
# Define the model
keys_placeholder = tf.placeholder(tf.int32, shape=[None, 1])
keys = tf.identity(keys_placeholder)
X = tf.placeholder("float", shape=[None, 1])
Y = tf.placeholder("float", shape=[None, 1])
w = tf.Variable(0.0, name="weight")
b = tf.Variable(0.0, name="bias")
global_step = tf.Variable(0, name="global_step", trainable=False)
loss = tf.reduce_sum(tf.square(Y - tf.multiply(X, w) - b))
train_op = optimizer.minimize(loss, global_step=global_step)
predict_op = tf.multiply(X, w) + b
#tf.summary.scalar("loss", loss)
#summary_op = tf.summary.merge_all()
#init_op = tf.global_variables_initializer()
#saver = tf.train.Saver()
#saver = tf.train.Saver(sharded=True)
with tf.Session(server.target) as sess:
print("Run training with epoch number: {}".format(epoch))
sess.run(tf.global_variables_initializer())
for i in range(epoch):
for (x, y) in zip(train_X, train_Y):
x = np.array([[x]])
y = np.array([[y]])
sess.run(train_op, feed_dict={X: x, Y: y})
end_training_time = datetime.datetime.now()
print("[{}] End of distributed training.".format(
end_training_time - start_training_time))
'''
sv = tf.train.Supervisor(is_chief=is_chief,
logdir='./checkpoint/',
init_op=init_op,
#summary_op=summary_op,
summary_op=None,
saver=saver,
global_step=global_step,
save_model_secs=60)
try:
with sv.managed_session(server.target) as sess:
#print("Save tensorboard files into: {}".format(FLAGS.output_path))
#writer = tf.summary.FileWriter(FLAGS.output_path, sess.graph)
print("Run training with epoch number: {}".format(epoch))
for i in range(epoch):
for (x, y) in zip(train_X, train_Y):
x = np.array([[x]])
y = np.array([[y]])
sess.run(train_op, feed_dict={X: x, Y: y})
end_training_time = datetime.datetime.now()
print("[{}] End of distributed training.".format(
end_training_time - start_training_time))
except Exception as e:
print(e)
'''
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-j', '--jobname', action='store', type=str, default='none', help='ps or worker')
parser.add_argument('-t', '--taskid', action='store', type=int, default=0)
parser.add_argument('-c', '--chief', action='store_true', default=False)
args = parser.parse_args()
job_type_global = args.jobname
task_idx_global = args.taskid
is_chief_global = args.chief
print()
main(job_type_global, task_idx_global, is_chief_global)
|
[] |
[] |
[
"TF_CONFIG"
] |
[]
|
["TF_CONFIG"]
|
python
| 1 | 0 | |
telegram/bot.go
|
package telegram
import (
"context"
"os"
"golang.org/x/xerrors"
)
// RunUntilCanceled is client callback which
// locks until client context is canceled.
func RunUntilCanceled(ctx context.Context, client *Client) error {
<-ctx.Done()
return ctx.Err()
}
// BotFromEnvironment creates bot client using ClientFromEnvironment
// connects to server and authenticates it.
//
// Variables:
// BOT_TOKEN — token from BotFather.
func BotFromEnvironment(
ctx context.Context,
opts Options,
setup func(ctx context.Context, client *Client) error,
cb func(ctx context.Context, client *Client) error,
) error {
client, err := ClientFromEnvironment(opts)
if err != nil {
return xerrors.Errorf("create client: %w", err)
}
if err := setup(ctx, client); err != nil {
return xerrors.Errorf("setup: %w", err)
}
return client.Run(ctx, func(ctx context.Context) error {
status, err := client.Auth().Status(ctx)
if err != nil {
return xerrors.Errorf("auth status: %w", err)
}
if !status.Authorized {
if _, err := client.Auth().Bot(ctx, os.Getenv("BOT_TOKEN")); err != nil {
return xerrors.Errorf("login: %w", err)
}
}
return cb(ctx, client)
})
}
|
[
"\"BOT_TOKEN\""
] |
[] |
[
"BOT_TOKEN"
] |
[]
|
["BOT_TOKEN"]
|
go
| 1 | 0 | |
cmd/root.go
|
package cmd
import (
"fmt"
"os"
"github.com/docker/machine/commands/mcndirs"
"github.com/spf13/cobra"
)
// needs export=docker-machine-vm-base-path
var machinePath string
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: "machine-utils",
Short: "provides few missing functions of docker-machine",
Long: `Provides few extra functionality from docker-machine.
This tool complementry tool to docker-machine
It uses same environment variables of docker-machine,
So without docker-machine it may not be usable`,
}
// Execute adds all child commands to the root command sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
func init() {
cobra.OnInitialize(initConfig)
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
machinePath = os.Getenv("docker_machine_dir")
if machinePath == "" {
machinePath = mcndirs.GetBaseDir()
}
fmt.Println(machinePath)
}
|
[
"\"docker_machine_dir\""
] |
[] |
[
"docker_machine_dir"
] |
[]
|
["docker_machine_dir"]
|
go
| 1 | 0 | |
bus-health/src/main/java/org/aoju/bus/health/linux/software/LinuxOperatingSystem.java
|
/*********************************************************************************
* *
* The MIT License (MIT) *
* *
* Copyright (c) 2015-2020 aoju.org OSHI and other contributors. *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in *
* all copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN *
* THE SOFTWARE. *
********************************************************************************/
package org.aoju.bus.health.linux.software;
import com.sun.jna.Native;
import com.sun.jna.platform.linux.LibC;
import com.sun.jna.platform.linux.LibC.Sysinfo;
import org.aoju.bus.core.annotation.ThreadSafe;
import org.aoju.bus.core.lang.Normal;
import org.aoju.bus.core.lang.RegEx;
import org.aoju.bus.core.lang.Symbol;
import org.aoju.bus.core.lang.tuple.Triple;
import org.aoju.bus.core.toolkit.FileKit;
import org.aoju.bus.health.Builder;
import org.aoju.bus.health.Executor;
import org.aoju.bus.health.builtin.software.*;
import org.aoju.bus.health.builtin.software.OSProcess.State;
import org.aoju.bus.health.linux.LinuxLibc;
import org.aoju.bus.health.linux.ProcPath;
import org.aoju.bus.health.linux.drivers.CpuStat;
import org.aoju.bus.health.linux.drivers.ProcessStat;
import org.aoju.bus.health.linux.drivers.UpTime;
import org.aoju.bus.health.linux.drivers.Who;
import org.aoju.bus.logger.Logger;
import java.io.File;
import java.util.*;
import static org.aoju.bus.health.builtin.software.OSService.State.RUNNING;
import static org.aoju.bus.health.builtin.software.OSService.State.STOPPED;
/**
* Linux is a family of open source Unix-like operating systems based on the
* Linux kernel, an operating system kernel first released on September 17,
* 1991, by Linus Torvalds. Linux is typically packaged in a Linux distribution.
*
* @author Kimi Liu
* @version 6.0.1
* @since JDK 1.8+
*/
@ThreadSafe
public class LinuxOperatingSystem extends AbstractOperatingSystem {
// Package private for access from LinuxOSProcess
static final long BOOTTIME;
private static final String OS_RELEASE_LOG = "os-release: {}";
private static final String LSB_RELEASE_A_LOG = "lsb_release -a: {}";
private static final String LSB_RELEASE_LOG = "lsb-release: {}";
private static final String RELEASE_DELIM = " release ";
private static final String DOUBLE_QUOTES = "^\"|\"$";
/**
* Jiffies per second, used for process time counters.
*/
private static final long USER_HZ = Builder.parseLongOrDefault(Executor.getFirstAnswer("getconf CLK_TCK"),
100L);
// PPID is 4th numeric value in proc pid stat; subtract 1 for 0-index
private static final int[] PPID_INDEX = {3};
static {
long tempBT = CpuStat.getBootTime();
// If above fails, current time minus uptime.
if (tempBT == 0) {
tempBT = System.currentTimeMillis() / 1000L - (long) UpTime.getSystemUptimeSeconds();
}
BOOTTIME = tempBT;
}
/**
* <p>
* Constructor for LinuxOperatingSystem.
* </p>
*/
public LinuxOperatingSystem() {
super.getVersionInfo();
}
private static int getParentPidFromProcFile(int pid) {
String stat = Builder.getStringFromFile(String.format("/proc/%d/stat", pid));
// A race condition may leave us with an empty string
if (stat.isEmpty()) {
return 0;
}
// Grab PPID
long[] statArray = Builder.parseStringToLongArray(stat, PPID_INDEX, ProcessStat.PROC_PID_STAT_LENGTH, ' ');
return (int) statArray[0];
}
private static Triple<String, String, String> queryFamilyVersionCodenameFromReleaseFiles() {
Triple<String, String, String> familyVersionCodename;
// There are two competing options for family/version information.
// Newer systems are adopting a standard /etc/os-release file:
// https://www.freedesktop.org/software/systemd/man/os-release.html
//
// Some systems are still using the lsb standard which parses a
// variety of /etc/*-release files and is most easily accessed via
// the commandline lsb_release -a, see here:
// http://linux.die.net/man/1/lsb_release
// In this case, the /etc/lsb-release file (if it exists) has
// optional overrides to the information in the /etc/distrib-release
// files, which show: "Distributor release x.x (Codename)"
// Attempt to read /etc/system-release which has more details than
// os-release on (CentOS and Fedora)
if ((familyVersionCodename = readDistribRelease("/etc/system-release")) != null) {
// If successful, we're done. this.family has been set and
// possibly the versionID and codeName
return familyVersionCodename;
}
// Attempt to read /etc/os-release file.
if ((familyVersionCodename = readOsRelease()) != null) {
// If successful, we're done. this.family has been set and
// possibly the versionID and codeName
return familyVersionCodename;
}
// Attempt to execute the `lsb_release` command
if ((familyVersionCodename = execLsbRelease()) != null) {
// If successful, we're done. this.family has been set and
// possibly the versionID and codeName
return familyVersionCodename;
}
// The above two options should hopefully work on most
// distributions. If not, we keep having fun.
// Attempt to read /etc/lsb-release file
if ((familyVersionCodename = readLsbRelease()) != null) {
// If successful, we're done. this.family has been set and
// possibly the versionID and codeName
return familyVersionCodename;
}
// If we're still looking, we search for any /etc/*-release (or
// similar) filename, for which the first line should be of the
// "Distributor release x.x (Codename)" format or possibly a
// "Distributor VERSION x.x (Codename)" format
String etcDistribRelease = getReleaseFilename();
if ((familyVersionCodename = readDistribRelease(etcDistribRelease)) != null) {
// If successful, we're done. this.family has been set and
// possibly the versionID and codeName
return familyVersionCodename;
}
// If we've gotten this far with no match, use the distrib-release
// filename (defaults will eventually give "Unknown")
String family = filenameToFamily(etcDistribRelease.replace("/etc/", Normal.EMPTY).replace("release", Normal.EMPTY)
.replace("version", Normal.EMPTY).replace(Symbol.HYPHEN, Normal.EMPTY).replace(Symbol.UNDERLINE, Normal.EMPTY));
return Triple.of(family, Normal.UNKNOWN, Normal.UNKNOWN);
}
/**
* Attempts to read /etc/os-release
*
* @return a triplet with the parsed family, versionID and codeName if file
* successfully read and NAME= found, null otherwise
*/
private static Triple<String, String, String> readOsRelease() {
String family = null;
String versionId = Normal.UNKNOWN;
String codeName = Normal.UNKNOWN;
List<String> osRelease = FileKit.readLines("/etc/os-release");
// Search for NAME=
for (String line : osRelease) {
if (line.startsWith("VERSION=")) {
Logger.debug(OS_RELEASE_LOG, line);
// remove beginning and ending '"' characters, etc from
// VERSION="14.04.4 LTS, Trusty Tahr" (Ubuntu style)
// or VERSION="17 (Beefy Miracle)" (os-release doc style)
line = line.replace("VERSION=", Normal.EMPTY).replaceAll(DOUBLE_QUOTES, Normal.EMPTY).trim();
String[] split = line.split("[()]");
if (split.length <= 1) {
// If no parentheses, check for Ubuntu's comma format
split = line.split(", ");
}
if (split.length > 0) {
versionId = split[0].trim();
}
if (split.length > 1) {
codeName = split[1].trim();
}
} else if (line.startsWith("NAME=") && family == null) {
Logger.debug(OS_RELEASE_LOG, line);
// remove beginning and ending '"' characters, etc from
// NAME="Ubuntu"
family = line.replace("NAME=", Normal.EMPTY).replaceAll(DOUBLE_QUOTES, Normal.EMPTY).trim();
} else if (line.startsWith("VERSION_ID=") && versionId.equals(Normal.UNKNOWN)) {
Logger.debug(OS_RELEASE_LOG, line);
// remove beginning and ending '"' characters, etc from
// VERSION_ID="14.04"
versionId = line.replace("VERSION_ID=", Normal.EMPTY).replaceAll(DOUBLE_QUOTES, Normal.EMPTY).trim();
}
}
return family == null ? null : Triple.of(family, versionId, codeName);
}
/**
* Attempts to execute `lsb_release -a`
*
* @return a Triple with the parsed family, versionID and codeName if the
* command successfully executed and Distributor ID: or Description:
* found, null otherwise
*/
private static Triple<String, String, String> execLsbRelease() {
String family = null;
String versionId = Normal.UNKNOWN;
String codeName = Normal.UNKNOWN;
// If description is of the format Distrib release x.x (Codename)
// that is primary, otherwise use Distributor ID: which returns the
// distribution concatenated, e.g., RedHat instead of Red Hat
for (String line : Executor.runNative("lsb_release -a")) {
if (line.startsWith("Description:")) {
Logger.debug(LSB_RELEASE_A_LOG, line);
line = line.replace("Description:", Normal.EMPTY).trim();
if (line.contains(RELEASE_DELIM)) {
Triple<String, String, String> Triple = parseRelease(line, RELEASE_DELIM);
family = Triple.getLeft();
if (versionId.equals(Normal.UNKNOWN)) {
versionId = Triple.getMiddle();
}
if (codeName.equals(Normal.UNKNOWN)) {
codeName = Triple.getRight();
}
}
} else if (line.startsWith("Distributor ID:") && family == null) {
Logger.debug(LSB_RELEASE_A_LOG, line);
family = line.replace("Distributor ID:", "").trim();
} else if (line.startsWith("Release:") && versionId.equals(Normal.UNKNOWN)) {
Logger.debug(LSB_RELEASE_A_LOG, line);
versionId = line.replace("Release:", "").trim();
} else if (line.startsWith("Codename:") && codeName.equals(Normal.UNKNOWN)) {
Logger.debug(LSB_RELEASE_A_LOG, line);
codeName = line.replace("Codename:", "").trim();
}
}
return family == null ? null : Triple.of(family, versionId, codeName);
}
/**
* Attempts to read /etc/lsb-release
*
* @return a Triple with the parsed family, versionID and codeName if file
* successfully read and and DISTRIB_ID or DISTRIB_DESCRIPTION, null
* otherwise
*/
private static Triple<String, String, String> readLsbRelease() {
String family = null;
String versionId = Normal.UNKNOWN;
String codeName = Normal.UNKNOWN;
List<String> osRelease = FileKit.readLines("/etc/lsb-release");
// Search for NAME=
for (String line : osRelease) {
if (line.startsWith("DISTRIB_DESCRIPTION=")) {
Logger.debug(LSB_RELEASE_LOG, line);
line = line.replace("DISTRIB_DESCRIPTION=", "").replaceAll(DOUBLE_QUOTES, "").trim();
if (line.contains(RELEASE_DELIM)) {
Triple<String, String, String> Triple = parseRelease(line, RELEASE_DELIM);
family = Triple.getLeft();
if (versionId.equals(Normal.UNKNOWN)) {
versionId = Triple.getMiddle();
}
if (codeName.equals(Normal.UNKNOWN)) {
codeName = Triple.getRight();
}
}
} else if (line.startsWith("DISTRIB_ID=") && family == null) {
Logger.debug(LSB_RELEASE_LOG, line);
family = line.replace("DISTRIB_ID=", "").replaceAll(DOUBLE_QUOTES, "").trim();
} else if (line.startsWith("DISTRIB_RELEASE=") && versionId.equals(Normal.UNKNOWN)) {
Logger.debug(LSB_RELEASE_LOG, line);
versionId = line.replace("DISTRIB_RELEASE=", "").replaceAll(DOUBLE_QUOTES, "").trim();
} else if (line.startsWith("DISTRIB_CODENAME=") && codeName.equals(Normal.UNKNOWN)) {
Logger.debug(LSB_RELEASE_LOG, line);
codeName = line.replace("DISTRIB_CODENAME=", "").replaceAll(DOUBLE_QUOTES, "").trim();
}
}
return family == null ? null : Triple.of(family, versionId, codeName);
}
/**
* Attempts to read /etc/distrib-release (for some value of distrib)
*
* @return a Triple with the parsed family, versionID and codeName if file
* successfully read and " release " or " VERSION " found, null
* otherwise
*/
private static Triple<String, String, String> readDistribRelease(String filename) {
if (new File(filename).exists()) {
List<String> osRelease = FileKit.readLines(filename);
// Search for Distrib release x.x (Codename)
for (String line : osRelease) {
Logger.debug("{}: {}", filename, line);
if (line.contains(RELEASE_DELIM)) {
// If this parses properly we're done
return parseRelease(line, RELEASE_DELIM);
} else if (line.contains(" VERSION ")) {
// If this parses properly we're done
return parseRelease(line, " VERSION ");
}
}
}
return null;
}
/**
* Helper method to parse version description line style
*
* @param line a String of the form "Distributor release x.x (Codename)"
* @param splitLine A regex to split on, e.g. " release "
* @return a Triple with the parsed family, versionID and codeName
*/
private static Triple<String, String, String> parseRelease(String line, String splitLine) {
String[] split = line.split(splitLine);
String family = split[0].trim();
String versionId = Normal.UNKNOWN;
String codeName = Normal.UNKNOWN;
if (split.length > 1) {
split = split[1].split("[()]");
if (split.length > 0) {
versionId = split[0].trim();
}
if (split.length > 1) {
codeName = split[1].trim();
}
}
return Triple.of(family, versionId, codeName);
}
/**
* Looks for a collection of possible distrib-release filenames
*
* @return The first valid matching filename
*/
protected static String getReleaseFilename() {
// Look for any /etc/*-release, *-version, and variants
File etc = new File("/etc");
// Find any *_input files in that path
File[] matchingFiles = etc.listFiles(//
f -> (f.getName().endsWith("-release") || //
f.getName().endsWith("-version") || //
f.getName().endsWith("_release") || //
f.getName().endsWith("_version")) //
&& !(f.getName().endsWith("os-release") || //
f.getName().endsWith("lsb-release") || //
f.getName().endsWith("system-release")));
if (matchingFiles != null && matchingFiles.length > 0) {
return matchingFiles[0].getPath();
}
if (new File("/etc/release").exists()) {
return "/etc/release";
}
// If all else fails, try this
return "/etc/issue";
}
/**
* Converts a portion of a filename (e.g. the 'redhat' in /etc/redhat-release)
* to a mixed case string representing the family (e.g., Red Hat)
*
* @param name Stripped version of filename after removing /etc and -release
* @return Mixed case family
*/
private static String filenameToFamily(String name) {
switch (name.toLowerCase()) {
// Handle known special cases
case "":
return "Solaris";
case "blackcat":
return "Black Cat";
case "bluewhite64":
return "BlueWhite64";
case "e-smith":
return "SME Server";
case "eos":
return "FreeEOS";
case "hlfs":
return "HLFS";
case "lfs":
return "Linux-From-Scratch";
case "linuxppc":
return "Linux-PPC";
case "meego":
return "MeeGo";
case "mandakelinux":
return "Mandrake";
case "mklinux":
return "MkLinux";
case "nld":
return "Novell Linux Desktop";
case "novell":
case "SuSE":
return "SUSE Linux";
case "pld":
return "PLD";
case "redhat":
return "Red Hat Linux";
case "sles":
return "SUSE Linux ES9";
case "sun":
return "Sun JDS";
case "synoinfo":
return "Synology";
case "tinysofa":
return "Tiny Sofa";
case "turbolinux":
return "TurboLinux";
case "ultrapenguin":
return "UltraPenguin";
case "va":
return "VA-Linux";
case "vmware":
return "VMWareESX";
case "yellowdog":
return "Yellow Dog";
// /etc/issue will end up here:
case "issue":
return "Unknown";
// If not a special case just capitalize first letter
default:
return name.substring(0, 1).toUpperCase() + name.substring(1);
}
}
/**
* Gets Jiffies per second, useful for converting ticks to milliseconds and vice
* versa.
*
* @return Jiffies per second.
*/
public static long getHz() {
return USER_HZ;
}
@Override
public String queryManufacturer() {
return "GNU/Linux";
}
@Override
public FamilyVersionInfo queryFamilyVersionInfo() {
Triple<String, String, String> familyVersionCodename = queryFamilyVersionCodenameFromReleaseFiles();
String buildNumber = null;
List<String> procVersion = FileKit.readLines(ProcPath.VERSION);
if (!procVersion.isEmpty()) {
String[] split = RegEx.SPACES.split(procVersion.get(0));
for (String s : split) {
if (!"Linux".equals(s) && !"version".equals(s)) {
buildNumber = s;
break;
}
}
}
OSVersionInfo versionInfo = new OSVersionInfo(familyVersionCodename.getMiddle(), familyVersionCodename.getRight(),
buildNumber);
return new FamilyVersionInfo(familyVersionCodename.getLeft(), versionInfo);
}
@Override
protected int queryBitness(int jvmBitness) {
if (jvmBitness < 64 && Executor.getFirstAnswer("uname -m").indexOf("64") == -1) {
return jvmBitness;
}
return 64;
}
@Override
protected boolean queryElevated() {
return System.getenv("SUDO_COMMAND") != null;
}
@Override
public FileSystem getFileSystem() {
return new LinuxFileSystem();
}
@Override
public InternetProtocolStats getInternetProtocolStats() {
return new LinuxInternetProtocolStats();
}
@Override
public List<OSSession> getSessions() {
return Collections.unmodifiableList(USE_WHO_COMMAND ? super.getSessions() : Who.queryUtxent());
}
@Override
public List<OSProcess> getProcesses(int limit, ProcessSort sort) {
List<OSProcess> procs = new ArrayList<>();
File[] pids = ProcessStat.getPidFiles();
// now for each file (with digit name) get process info
for (File pidFile : pids) {
int pid = Builder.parseIntOrDefault(pidFile.getName(), 0);
OSProcess proc = new LinuxOSProcess(pid);
if (!proc.getState().equals(State.INVALID)) {
procs.add(proc);
}
}
// Sort
List<OSProcess> sorted = processSort(procs, limit, sort);
return Collections.unmodifiableList(sorted);
}
@Override
public OSProcess getProcess(int pid) {
OSProcess proc = new LinuxOSProcess(pid);
if (!proc.getState().equals(State.INVALID)) {
return proc;
}
return null;
}
@Override
public List<OSProcess> getChildProcesses(int parentPid, int limit, ProcessSort sort) {
List<OSProcess> procs = new ArrayList<>();
File[] procFiles = ProcessStat.getPidFiles();
// now for each file (with digit name) get process info
for (File procFile : procFiles) {
int pid = Builder.parseIntOrDefault(procFile.getName(), 0);
if (parentPid == getParentPidFromProcFile(pid)) {
OSProcess proc = new LinuxOSProcess(pid);
if (!proc.getState().equals(State.INVALID)) {
procs.add(proc);
}
}
}
List<OSProcess> sorted = processSort(procs, limit, sort);
return Collections.unmodifiableList(sorted);
}
@Override
public int getProcessId() {
return LinuxLibc.INSTANCE.getpid();
}
@Override
public int getProcessCount() {
return ProcessStat.getPidFiles().length;
}
@Override
public int getThreadCount() {
try {
Sysinfo info = new Sysinfo();
if (0 != LibC.INSTANCE.sysinfo(info)) {
Logger.error("Failed to get process thread count. Error code: {}", Native.getLastError());
return 0;
}
return info.procs;
} catch (UnsatisfiedLinkError | NoClassDefFoundError e) {
Logger.error("Failed to get procs from sysinfo. {}", e.getMessage());
}
return 0;
}
@Override
public long getSystemUptime() {
return (long) UpTime.getSystemUptimeSeconds();
}
@Override
public long getSystemBootTime() {
return BOOTTIME;
}
@Override
public NetworkParams getNetworkParams() {
return new LinuxNetworkParams();
}
@Override
public OSService[] getServices() {
// Get running services
List<OSService> services = new ArrayList<>();
Set<String> running = new HashSet<>();
for (OSProcess p : getChildProcesses(1, 0, ProcessSort.PID)) {
OSService s = new OSService(p.getName(), p.getProcessID(), RUNNING);
services.add(s);
running.add(p.getName());
}
boolean systemctlFound = false;
List<String> systemctl = Executor.runNative("systemctl list-unit-files");
for (String str : systemctl) {
String[] split = RegEx.SPACES.split(str);
if (split.length == 2 && split[0].endsWith(".service") && "enabled".equals(split[1])) {
// remove .service extension
String name = split[0].substring(0, split[0].length() - 8);
int index = name.lastIndexOf(Symbol.C_DOT);
String shortName = (index < 0 || index > name.length() - 2) ? name : name.substring(index + 1);
if (!running.contains(name) && !running.contains(shortName)) {
OSService s = new OSService(name, 0, STOPPED);
services.add(s);
systemctlFound = true;
}
}
}
if (!systemctlFound) {
// Get Directories for stopped services
File dir = new File("/etc/init");
if (dir.exists() && dir.isDirectory()) {
for (File f : dir.listFiles((f, name) -> name.toLowerCase().endsWith(".conf"))) {
// remove .conf extension
String name = f.getName().substring(0, f.getName().length() - 5);
int index = name.lastIndexOf(Symbol.C_DOT);
String shortName = (index < 0 || index > name.length() - 2) ? name : name.substring(index + 1);
if (!running.contains(name) && !running.contains(shortName)) {
OSService s = new OSService(name, 0, STOPPED);
services.add(s);
}
}
} else {
Logger.error("Directory: /etc/init does not exist");
}
}
return services.toArray(new OSService[0]);
}
}
|
[
"\"SUDO_COMMAND\""
] |
[] |
[
"SUDO_COMMAND"
] |
[]
|
["SUDO_COMMAND"]
|
java
| 1 | 0 | |
sts/client_test.go
|
/*
Copyright (c) 2018 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sts
import (
"context"
"crypto/tls"
"encoding/base64"
"encoding/pem"
"log"
"net/url"
"os"
"testing"
"github.com/vmware/govmomi/session"
"github.com/vmware/govmomi/ssoadmin"
"github.com/vmware/govmomi/ssoadmin/types"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/methods"
"github.com/vmware/govmomi/vim25/soap"
)
// solutionUserCreate ensures that solution user "govmomi-test" exists for uses with the tests that follow.
func solutionUserCreate(ctx context.Context, info *url.Userinfo, sts *Client) error {
s, err := sts.Issue(ctx, TokenRequest{Userinfo: info})
if err != nil {
return err
}
admin, err := ssoadmin.NewClient(ctx, &vim25.Client{Client: sts.Client})
if err != nil {
return err
}
header := soap.Header{Security: s}
if err = admin.Login(sts.WithHeader(ctx, header)); err != nil {
return err
}
defer admin.Logout(ctx)
id := types.PrincipalId{
Name: "govmomi-test",
Domain: admin.Domain,
}
user, err := admin.FindSolutionUser(ctx, id.Name)
if err != nil {
return err
}
if user == nil {
block, _ := pem.Decode([]byte(LocalhostCert))
details := types.AdminSolutionDetails{
Certificate: base64.StdEncoding.EncodeToString(block.Bytes),
Description: "govmomi test solution user",
}
if err = admin.CreateSolutionUser(ctx, id.Name, details); err != nil {
return err
}
}
if _, err = admin.GrantWSTrustRole(ctx, id, types.RoleActAsUser); err != nil {
return err
}
_, err = admin.SetRole(ctx, id, types.RoleAdministrator)
return err
}
func solutionUserCert() *tls.Certificate {
cert, err := tls.X509KeyPair(LocalhostCert, LocalhostKey)
if err != nil {
panic(err)
}
return &cert
}
func TestIssueHOK(t *testing.T) {
ctx := context.Background()
url := os.Getenv("GOVC_TEST_URL")
if url == "" {
t.SkipNow()
}
u, err := soap.ParseURL(url)
if err != nil {
t.Fatal(err)
}
c, err := vim25.NewClient(ctx, soap.NewClient(u, true))
if err != nil {
log.Fatal(err)
}
if !c.IsVC() {
t.SkipNow()
}
sts, err := NewClient(ctx, c)
if err != nil {
t.Fatal(err)
}
if err = solutionUserCreate(ctx, u.User, sts); err != nil {
t.Fatal(err)
}
req := TokenRequest{
Certificate: solutionUserCert(),
Delegatable: true,
}
s, err := sts.Issue(ctx, req)
if err != nil {
t.Fatal(err)
}
header := soap.Header{Security: s}
err = session.NewManager(c).LoginByToken(c.WithHeader(ctx, header))
if err != nil {
t.Fatal(err)
}
now, err := methods.GetCurrentTime(ctx, c)
if err != nil {
t.Fatal(err)
}
log.Printf("current time=%s", now)
}
func TestIssueBearer(t *testing.T) {
ctx := context.Background()
url := os.Getenv("GOVC_TEST_URL")
if url == "" {
t.SkipNow()
}
u, err := soap.ParseURL(url)
if err != nil {
t.Fatal(err)
}
c, err := vim25.NewClient(ctx, soap.NewClient(u, true))
if err != nil {
log.Fatal(err)
}
if !c.IsVC() {
t.SkipNow()
}
sts, err := NewClient(ctx, c)
if err != nil {
t.Fatal(err)
}
// Test that either Certificate or Userinfo is set.
_, err = sts.Issue(ctx, TokenRequest{})
if err == nil {
t.Error("expected error")
}
req := TokenRequest{
Userinfo: u.User,
}
s, err := sts.Issue(ctx, req)
if err != nil {
t.Fatal(err)
}
header := soap.Header{Security: s}
err = session.NewManager(c).LoginByToken(c.WithHeader(ctx, header))
if err != nil {
t.Fatal(err)
}
now, err := methods.GetCurrentTime(ctx, c)
if err != nil {
t.Fatal(err)
}
log.Printf("current time=%s", now)
}
func TestIssueActAs(t *testing.T) {
ctx := context.Background()
url := os.Getenv("GOVC_TEST_URL")
if url == "" {
t.SkipNow()
}
u, err := soap.ParseURL(url)
if err != nil {
t.Fatal(err)
}
c, err := vim25.NewClient(ctx, soap.NewClient(u, true))
if err != nil {
log.Fatal(err)
}
if !c.IsVC() {
t.SkipNow()
}
sts, err := NewClient(ctx, c)
if err != nil {
t.Fatal(err)
}
if err = solutionUserCreate(ctx, u.User, sts); err != nil {
t.Fatal(err)
}
req := TokenRequest{
Delegatable: true,
Userinfo: u.User,
}
s, err := sts.Issue(ctx, req)
if err != nil {
t.Fatal(err)
}
req = TokenRequest{
ActAs: s.Token,
Certificate: solutionUserCert(),
}
s, err = sts.Issue(ctx, req)
if err != nil {
t.Fatal(err)
}
header := soap.Header{Security: s}
err = session.NewManager(c).LoginByToken(c.WithHeader(ctx, header))
if err != nil {
t.Fatal(err)
}
now, err := methods.GetCurrentTime(ctx, c)
if err != nil {
t.Fatal(err)
}
log.Printf("current time=%s", now)
}
|
[
"\"GOVC_TEST_URL\"",
"\"GOVC_TEST_URL\"",
"\"GOVC_TEST_URL\""
] |
[] |
[
"GOVC_TEST_URL"
] |
[]
|
["GOVC_TEST_URL"]
|
go
| 1 | 0 | |
integration_tests/samples/basic_usage/emoji_reactions.py
|
import logging
logging.basicConfig(level=logging.DEBUG)
# export SLACK_API_TOKEN=xoxb-***
# python3 integration_tests/samples/basic_usage/emoji_reactions.py
import os
from slack_sdk.web import WebClient
client = WebClient(token=os.environ["SLACK_API_TOKEN"])
if __name__ == "__main__":
channel_id = "#random"
user_id = client.users_list()["members"][0]["id"]
else:
channel_id = "C0XXXXXX"
user_id = "U0XXXXXXX"
response = client.chat_postMessage(channel=channel_id, text="Give me some reaction!")
# Ensure the channel_id is not a name
channel_id = response["channel"]
ts = response["message"]["ts"]
response = client.reactions_add(channel=channel_id, name="thumbsup", timestamp=ts)
response = client.reactions_remove(channel=channel_id, name="thumbsup", timestamp=ts)
|
[] |
[] |
[
"SLACK_API_TOKEN"
] |
[]
|
["SLACK_API_TOKEN"]
|
python
| 1 | 0 | |
vendor/github.com/peterh/liner/input.go
|
// Copyright 2018 The MATRIX Authors as well as Copyright 2014-2017 The go-ethereum Authors
// This file is consisted of the MATRIX library and part of the go-ethereum library.
//
// The MATRIX-ethereum library is free software: you can redistribute it and/or modify it under the terms of the MIT License.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
//and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject tothe following conditions:
//
//The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
//WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISINGFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
//OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// +build linux darwin openbsd freebsd netbsd
package liner
import (
"bufio"
"errors"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
)
type nexter struct {
r rune
err error
}
// State represents an open terminal
type State struct {
commonState
origMode termios
defaultMode termios
next <-chan nexter
winch chan os.Signal
pending []rune
useCHA bool
}
// NewLiner initializes a new *State, and sets the terminal into raw mode. To
// restore the terminal to its previous state, call State.Close().
func NewLiner() *State {
var s State
s.r = bufio.NewReader(os.Stdin)
s.terminalSupported = TerminalSupported()
if m, err := TerminalMode(); err == nil {
s.origMode = *m.(*termios)
} else {
s.inputRedirected = true
}
if _, err := getMode(syscall.Stdout); err != 0 {
s.outputRedirected = true
}
if s.inputRedirected && s.outputRedirected {
s.terminalSupported = false
}
if s.terminalSupported && !s.inputRedirected && !s.outputRedirected {
mode := s.origMode
mode.Iflag &^= icrnl | inpck | istrip | ixon
mode.Cflag |= cs8
mode.Lflag &^= syscall.ECHO | icanon | iexten
mode.ApplyMode()
winch := make(chan os.Signal, 1)
signal.Notify(winch, syscall.SIGWINCH)
s.winch = winch
s.checkOutput()
}
if !s.outputRedirected {
s.outputRedirected = !s.getColumns()
}
return &s
}
var errTimedOut = errors.New("timeout")
func (s *State) startPrompt() {
if s.terminalSupported {
if m, err := TerminalMode(); err == nil {
s.defaultMode = *m.(*termios)
mode := s.defaultMode
mode.Lflag &^= isig
mode.ApplyMode()
}
}
s.restartPrompt()
}
func (s *State) inputWaiting() bool {
return len(s.next) > 0
}
func (s *State) restartPrompt() {
next := make(chan nexter, 200)
go func() {
for {
var n nexter
n.r, _, n.err = s.r.ReadRune()
next <- n
// Shut down nexter loop when an end condition has been reached
if n.err != nil || n.r == '\n' || n.r == '\r' || n.r == ctrlC || n.r == ctrlD {
close(next)
return
}
}
}()
s.next = next
}
func (s *State) stopPrompt() {
if s.terminalSupported {
s.defaultMode.ApplyMode()
}
}
func (s *State) nextPending(timeout <-chan time.Time) (rune, error) {
select {
case thing, ok := <-s.next:
if !ok {
return 0, ErrInternal
}
if thing.err != nil {
return 0, thing.err
}
s.pending = append(s.pending, thing.r)
return thing.r, nil
case <-timeout:
rv := s.pending[0]
s.pending = s.pending[1:]
return rv, errTimedOut
}
}
func (s *State) readNext() (interface{}, error) {
if len(s.pending) > 0 {
rv := s.pending[0]
s.pending = s.pending[1:]
return rv, nil
}
var r rune
select {
case thing, ok := <-s.next:
if !ok {
return 0, ErrInternal
}
if thing.err != nil {
return nil, thing.err
}
r = thing.r
case <-s.winch:
s.getColumns()
return winch, nil
}
if r != esc {
return r, nil
}
s.pending = append(s.pending, r)
// Wait at most 50 ms for the rest of the escape sequence
// If nothing else arrives, it was an actual press of the esc key
timeout := time.After(50 * time.Millisecond)
flag, err := s.nextPending(timeout)
if err != nil {
if err == errTimedOut {
return flag, nil
}
return unknown, err
}
switch flag {
case '[':
code, err := s.nextPending(timeout)
if err != nil {
if err == errTimedOut {
return code, nil
}
return unknown, err
}
switch code {
case 'A':
s.pending = s.pending[:0] // escape code complete
return up, nil
case 'B':
s.pending = s.pending[:0] // escape code complete
return down, nil
case 'C':
s.pending = s.pending[:0] // escape code complete
return right, nil
case 'D':
s.pending = s.pending[:0] // escape code complete
return left, nil
case 'F':
s.pending = s.pending[:0] // escape code complete
return end, nil
case 'H':
s.pending = s.pending[:0] // escape code complete
return home, nil
case 'Z':
s.pending = s.pending[:0] // escape code complete
return shiftTab, nil
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
num := []rune{code}
for {
code, err := s.nextPending(timeout)
if err != nil {
if err == errTimedOut {
return code, nil
}
return nil, err
}
switch code {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
num = append(num, code)
case ';':
// Modifier code to follow
// This only supports Ctrl-left and Ctrl-right for now
x, _ := strconv.ParseInt(string(num), 10, 32)
if x != 1 {
// Can't be left or right
rv := s.pending[0]
s.pending = s.pending[1:]
return rv, nil
}
num = num[:0]
for {
code, err = s.nextPending(timeout)
if err != nil {
if err == errTimedOut {
rv := s.pending[0]
s.pending = s.pending[1:]
return rv, nil
}
return nil, err
}
switch code {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
num = append(num, code)
case 'C', 'D':
// right, left
mod, _ := strconv.ParseInt(string(num), 10, 32)
if mod != 5 {
// Not bare Ctrl
rv := s.pending[0]
s.pending = s.pending[1:]
return rv, nil
}
s.pending = s.pending[:0] // escape code complete
if code == 'C' {
return wordRight, nil
}
return wordLeft, nil
default:
// Not left or right
rv := s.pending[0]
s.pending = s.pending[1:]
return rv, nil
}
}
case '~':
s.pending = s.pending[:0] // escape code complete
x, _ := strconv.ParseInt(string(num), 10, 32)
switch x {
case 2:
return insert, nil
case 3:
return del, nil
case 5:
return pageUp, nil
case 6:
return pageDown, nil
case 7:
return home, nil
case 8:
return end, nil
case 15:
return f5, nil
case 17:
return f6, nil
case 18:
return f7, nil
case 19:
return f8, nil
case 20:
return f9, nil
case 21:
return f10, nil
case 23:
return f11, nil
case 24:
return f12, nil
default:
return unknown, nil
}
default:
// unrecognized escape code
rv := s.pending[0]
s.pending = s.pending[1:]
return rv, nil
}
}
}
case 'O':
code, err := s.nextPending(timeout)
if err != nil {
if err == errTimedOut {
return code, nil
}
return nil, err
}
s.pending = s.pending[:0] // escape code complete
switch code {
case 'c':
return wordRight, nil
case 'd':
return wordLeft, nil
case 'H':
return home, nil
case 'F':
return end, nil
case 'P':
return f1, nil
case 'Q':
return f2, nil
case 'R':
return f3, nil
case 'S':
return f4, nil
default:
return unknown, nil
}
case 'b':
s.pending = s.pending[:0] // escape code complete
return altB, nil
case 'f':
s.pending = s.pending[:0] // escape code complete
return altF, nil
case 'y':
s.pending = s.pending[:0] // escape code complete
return altY, nil
default:
rv := s.pending[0]
s.pending = s.pending[1:]
return rv, nil
}
// not reached
return r, nil
}
// Close returns the terminal to its previous mode
func (s *State) Close() error {
signal.Stop(s.winch)
if !s.inputRedirected {
s.origMode.ApplyMode()
}
return nil
}
// TerminalSupported returns true if the current terminal supports
// line editing features, and false if liner will use the 'dumb'
// fallback for input.
// Note that TerminalSupported does not check all factors that may
// cause liner to not fully support the terminal (such as stdin redirection)
func TerminalSupported() bool {
bad := map[string]bool{"": true, "dumb": true, "cons25": true}
return !bad[strings.ToLower(os.Getenv("TERM"))]
}
|
[
"\"TERM\""
] |
[] |
[
"TERM"
] |
[]
|
["TERM"]
|
go
| 1 | 0 | |
agent/s3.go
|
package agent
import (
"errors"
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/buildkite/agent/logger"
)
type credentialsProvider struct {
retrieved bool
}
func (e *credentialsProvider) Retrieve() (creds credentials.Value, err error) {
e.retrieved = false
creds.AccessKeyID = os.Getenv("BUILDKITE_S3_ACCESS_KEY_ID")
if creds.AccessKeyID == "" {
creds.AccessKeyID = os.Getenv("BUILDKITE_S3_ACCESS_KEY")
}
creds.SecretAccessKey = os.Getenv("BUILDKITE_S3_SECRET_ACCESS_KEY")
if creds.SecretAccessKey == "" {
creds.SecretAccessKey = os.Getenv("BUILDKITE_S3_SECRET_KEY")
}
if creds.AccessKeyID == "" {
err = errors.New("BUILDKITE_S3_ACCESS_KEY_ID or BUILDKITE_S3_ACCESS_KEY not found in environment")
}
if creds.SecretAccessKey == "" {
err = errors.New("BUILDKITE_S3_SECRET_ACCESS_KEY or BUILDKITE_S3_SECRET_KEY not found in environment")
}
e.retrieved = true
return
}
func (e *credentialsProvider) IsExpired() bool {
return !e.retrieved
}
func awsS3RegionFromEnv() (region string, err error) {
regionName := "us-east-1"
if os.Getenv("BUILDKITE_S3_DEFAULT_REGION") != "" {
regionName = os.Getenv("BUILDKITE_S3_DEFAULT_REGION")
} else {
var err error
regionName, err = awsRegion()
if err != nil {
return "", err
}
}
// Check to make sure the region exists.
resolver := endpoints.DefaultResolver()
partitions := resolver.(endpoints.EnumPartitions).Partitions()
for _, p := range partitions {
for id := range p.Regions() {
if id == regionName {
return regionName, nil
}
}
}
return "", fmt.Errorf("Unknown AWS S3 Region %q", regionName)
}
func awsS3Session(region string) (*session.Session, error) {
// Chicken and egg... but this is kinda how they do it in the sdk
sess, err := session.NewSession()
if err != nil {
return nil, err
}
sess.Config.Region = aws.String(region)
sess.Config.Credentials = credentials.NewChainCredentials(
[]credentials.Provider{
&credentialsProvider{},
&credentials.EnvProvider{},
// EC2 and ECS meta-data providers
defaults.RemoteCredProvider(*sess.Config, sess.Handlers),
})
return sess, nil
}
func newS3Client(l logger.Logger, bucket string) (*s3.S3, error) {
region, err := awsS3RegionFromEnv()
if err != nil {
return nil, err
}
sess, err := awsS3Session(region)
if err != nil {
return nil, err
}
l.Debug("Authorizing S3 credentials and finding bucket `%s` in region `%s`...", bucket, region)
s3client := s3.New(sess)
// Test the authentication by trying to list the first 0 objects in the bucket.
_, err = s3client.ListObjects(&s3.ListObjectsInput{
Bucket: aws.String(bucket),
MaxKeys: aws.Int64(0),
})
if err != nil {
if err == credentials.ErrNoValidProvidersFoundInChain {
return nil, fmt.Errorf("Could not find a valid authentication strategy to connect to S3. Try setting BUILDKITE_S3_ACCESS_KEY and BUILDKITE_S3_SECRET_KEY")
}
return nil, fmt.Errorf("Failed to authenticate to bucket `%s` in region `%s` (%s)", bucket, region, err.Error())
}
return s3client, nil
}
|
[
"\"BUILDKITE_S3_ACCESS_KEY_ID\"",
"\"BUILDKITE_S3_ACCESS_KEY\"",
"\"BUILDKITE_S3_SECRET_ACCESS_KEY\"",
"\"BUILDKITE_S3_SECRET_KEY\"",
"\"BUILDKITE_S3_DEFAULT_REGION\"",
"\"BUILDKITE_S3_DEFAULT_REGION\""
] |
[] |
[
"BUILDKITE_S3_ACCESS_KEY",
"BUILDKITE_S3_ACCESS_KEY_ID",
"BUILDKITE_S3_DEFAULT_REGION",
"BUILDKITE_S3_SECRET_ACCESS_KEY",
"BUILDKITE_S3_SECRET_KEY"
] |
[]
|
["BUILDKITE_S3_ACCESS_KEY", "BUILDKITE_S3_ACCESS_KEY_ID", "BUILDKITE_S3_DEFAULT_REGION", "BUILDKITE_S3_SECRET_ACCESS_KEY", "BUILDKITE_S3_SECRET_KEY"]
|
go
| 5 | 0 | |
config/src/main/java/com/alibaba/nacos/config/server/service/ServerListService.java
|
/*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.nacos.config.server.service;
import com.alibaba.nacos.common.utils.IoUtils;
import com.alibaba.nacos.config.server.constant.Constants;
import com.alibaba.nacos.config.server.monitor.MetricsMonitor;
import com.alibaba.nacos.config.server.service.notify.NotifyService;
import com.alibaba.nacos.config.server.service.notify.NotifyService.HttpResult;
import com.alibaba.nacos.config.server.utils.PropertyUtil;
import com.alibaba.nacos.config.server.utils.RunningConfigUtils;
import com.alibaba.nacos.config.server.utils.event.EventDispatcher;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpResponse;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.utils.HttpClientUtils;
import org.apache.http.concurrent.FutureCallback;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.impl.nio.client.HttpAsyncClients;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.web.context.WebServerInitializedEvent;
import org.springframework.context.ApplicationListener;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import javax.annotation.PostConstruct;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static com.alibaba.nacos.config.server.utils.LogUtil.defaultLog;
import static com.alibaba.nacos.config.server.utils.LogUtil.fatalLog;
import static com.alibaba.nacos.core.utils.SystemUtils.*;
/**
* Serverlist service
*
* @author Nacos
*/
@Service
public class ServerListService implements ApplicationListener<WebServerInitializedEvent> {
private final ServletContext servletContext;
@Value("${server.port:8848}")
private int port;
@Value("${useAddressServer}")
private Boolean isUseAddressServer = true;
public ServerListService(ServletContext servletContext) {
this.servletContext = servletContext;
}
@PostConstruct
public void init() {
String envDomainName = System.getenv("address_server_domain");
if (StringUtils.isBlank(envDomainName)) {
domainName = System.getProperty("address.server.domain", "jmenv.tbsite.net");
} else {
domainName = envDomainName;
}
String envAddressPort = System.getenv("address_server_port");
if (StringUtils.isBlank(envAddressPort)) {
addressPort = System.getProperty("address.server.port", "8080");
} else {
addressPort = envAddressPort;
}
addressUrl = System.getProperty("address.server.url",
servletContext.getContextPath() + "/" + RunningConfigUtils.getClusterName());
addressServerUrl = "http://" + domainName + ":" + addressPort + addressUrl;
envIdUrl = "http://" + domainName + ":" + addressPort + "/env";
defaultLog.info("ServerListService address-server port:" + addressPort);
defaultLog.info("ADDRESS_SERVER_URL:" + addressServerUrl);
isHealthCheck = PropertyUtil.isHealthCheck();
maxFailCount = PropertyUtil.getMaxHealthCheckFailCount();
fatalLog.warn("useAddressServer:{}", isUseAddressServer);
GetServerListTask task = new GetServerListTask();
task.run();
if (CollectionUtils.isEmpty(serverList)) {
fatalLog.error("########## cannot get serverlist, so exit.");
throw new RuntimeException("cannot get serverlist, so exit.");
} else {
TimerTaskService.scheduleWithFixedDelay(task, 0L, 5L, TimeUnit.SECONDS);
}
}
public List<String> getServerList() {
return new ArrayList<String>(serverList);
}
public static void setServerList(List<String> serverList) {
ServerListService.serverList = serverList;
}
public static List<String> getServerListUnhealth() {
return new ArrayList<String>(serverListUnhealth);
}
public static Boolean isFirstIp() {
return serverList.get(0).contains(LOCAL_IP);
}
public boolean isHealthCheck() {
return isHealthCheck;
}
/**
* serverList has changed
*/
static public class ServerListChangeEvent implements EventDispatcher.Event {
}
private void updateIfChanged(List<String> newList) {
if (CollectionUtils.isEmpty(newList)||newList.equals(serverList)) {
return;
}
boolean isContainSelfIp = newList.stream().anyMatch(ipPortTmp -> ipPortTmp.contains(LOCAL_IP));
if (isContainSelfIp) {
isInIpList = true;
} else {
isInIpList = false;
String selfAddr = getFormatServerAddr(LOCAL_IP);
newList.add(selfAddr);
fatalLog.error("########## [serverlist] self ip {} not in serverlist {}", selfAddr, newList);
}
serverList = new ArrayList<String>(newList);
if(!serverListUnhealth.isEmpty()){
List<String> unhealthyRemoved = serverListUnhealth.stream()
.filter(unhealthyIp -> !newList.contains(unhealthyIp)).collect(Collectors.toList());
serverListUnhealth.removeAll(unhealthyRemoved);
List<String> unhealthyCountRemoved = serverIp2unhealthCount.keySet().stream()
.filter(key -> !newList.contains(key)).collect(Collectors.toList());
for (String unhealthyCountTmp : unhealthyCountRemoved) {
serverIp2unhealthCount.remove(unhealthyCountTmp);
}
}
defaultLog.warn("[serverlist] updated to {}", serverList);
/**
* 非并发fireEvent
*/
EventDispatcher.fireEvent(new ServerListChangeEvent());
}
/**
* 保证不返回NULL
*
* @return serverlist
*/
private List<String> getApacheServerList() {
if (STANDALONE_MODE) {
List<String> serverIps = new ArrayList<String>();
serverIps.add(getFormatServerAddr(LOCAL_IP));
return serverIps;
}
// 优先从文件读取服务列表
try {
List<String> serverIps = new ArrayList<String>();
List<String> serverAddrLines = readClusterConf();
if (!CollectionUtils.isEmpty(serverAddrLines)) {
for (String serverAddr : serverAddrLines) {
if (StringUtils.isNotBlank(serverAddr.trim())) {
serverIps.add(getFormatServerAddr(serverAddr));
}
}
}
if (serverIps.size() > 0) {
return serverIps;
}
} catch (Exception e) {
defaultLog.error("nacos-XXXX", "[serverlist] failed to get serverlist from disk!", e);
}
if (isUseAddressServer()) {
try {
HttpResult result = NotifyService.invokeURL(addressServerUrl, null, null);
if (HttpServletResponse.SC_OK == result.code) {
isAddressServerHealth = true;
addressServerFailCount = 0;
List<String> lines = IoUtils.readLines(new StringReader(result.content));
List<String> ips = new ArrayList<String>(lines.size());
for (String serverAddr : lines) {
if (StringUtils.isNotBlank(serverAddr)) {
ips.add(getFormatServerAddr(serverAddr));
}
}
return ips;
} else {
addressServerFailCount++;
if (addressServerFailCount >= maxFailCount) {
isAddressServerHealth = false;
}
defaultLog.error("[serverlist] failed to get serverlist, error code {}", result.code);
return Collections.emptyList();
}
} catch (IOException e) {
addressServerFailCount++;
if (addressServerFailCount >= maxFailCount) {
isAddressServerHealth = false;
}
defaultLog.error("[serverlist] exception, " + e.toString(), e);
return Collections.emptyList();
}
} else {
List<String> serverIps = new ArrayList<String>();
serverIps.add(getFormatServerAddr(LOCAL_IP));
return serverIps;
}
}
private String getFormatServerAddr(String serverAddr) {
if (StringUtils.isBlank(serverAddr)) {
throw new IllegalArgumentException("invalid serverlist");
}
String[] ipPort = serverAddr.trim().split(":");
String ip = ipPort[0].trim();
if (ipPort.length == 1 && port != 0) {
return (ip + ":" + port);
} else {
return serverAddr;
}
}
class GetServerListTask implements Runnable {
@Override
public void run() {
try {
updateIfChanged(getApacheServerList());
} catch (Exception e) {
defaultLog.error("[serverlist] failed to get serverlist, " + e.toString(), e);
}
}
}
private void checkServerHealth() {
long startCheckTime = System.currentTimeMillis();
for (String serverIp : serverList) {
// Compatible with old codes,use status.taobao
String url = "http://" + serverIp + servletContext.getContextPath() + Constants.HEALTH_CONTROLLER_PATH;
// "/nacos/health";
HttpGet request = new HttpGet(url);
httpclient.execute(request, new AsyncCheckServerHealthCallBack(serverIp));
}
long endCheckTime = System.currentTimeMillis();
long cost = endCheckTime - startCheckTime;
defaultLog.debug("checkServerHealth cost: {}", cost);
}
class AsyncCheckServerHealthCallBack implements FutureCallback<HttpResponse> {
private String serverIp;
public AsyncCheckServerHealthCallBack(String serverIp) {
this.serverIp = serverIp;
}
@Override
public void completed(HttpResponse response) {
if (response.getStatusLine().getStatusCode() == HttpServletResponse.SC_OK) {
serverListUnhealth.remove(serverIp);
HttpClientUtils.closeQuietly(response);
}
}
@Override
public void failed(Exception ex) {
computeFailCount();
}
@Override
public void cancelled() {
computeFailCount();
}
private void computeFailCount() {
int failCount = serverIp2unhealthCount.compute(serverIp,(key,oldValue)->oldValue == null?1:oldValue+1);
if (failCount > maxFailCount) {
if (!serverListUnhealth.contains(serverIp)) {
serverListUnhealth.add(serverIp);
}
defaultLog.error("unhealthIp:{}, unhealthCount:{}", serverIp, failCount);
MetricsMonitor.getUnhealthException().increment();
}
}
}
class CheckServerHealthTask implements Runnable {
@Override
public void run() {
checkServerHealth();
}
}
private Boolean isUseAddressServer() {
return isUseAddressServer;
}
static class CheckServerThreadFactory implements ThreadFactory {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r, "com.alibaba.nacos.CheckServerThreadFactory");
thread.setDaemon(true);
return thread;
}
}
public static boolean isAddressServerHealth() {
return isAddressServerHealth;
}
public static boolean isInIpList() {
return isInIpList;
}
// ==========================
/**
* 和其他server的连接超时和socket超时
*/
static final int TIMEOUT = 5000;
private int maxFailCount = 12;
private static volatile List<String> serverList = new ArrayList<String>();
private static volatile List<String> serverListUnhealth = Collections.synchronizedList(new ArrayList<String>());;
private static volatile boolean isAddressServerHealth = true;
private static volatile int addressServerFailCount = 0;
private static volatile boolean isInIpList = true;
/**
* ip unhealth count
*/
private static Map<String, Integer> serverIp2unhealthCount = new ConcurrentHashMap<>();
private RequestConfig requestConfig = RequestConfig.custom()
.setConnectTimeout(PropertyUtil.getNotifyConnectTimeout())
.setSocketTimeout(PropertyUtil.getNotifySocketTimeout()).build();
private CloseableHttpAsyncClient httpclient = HttpAsyncClients.custom().setDefaultRequestConfig(requestConfig)
.build();
public String domainName;
public String addressPort;
public String addressUrl;
public String envIdUrl;
public String addressServerUrl;
private boolean isHealthCheck = true;
@Override
public void onApplicationEvent(WebServerInitializedEvent event) {
if (port == 0) {
port = event.getWebServer().getPort();
List<String> newList = new ArrayList<String>();
for (String serverAddrTmp : serverList) {
newList.add(getFormatServerAddr(serverAddrTmp));
}
setServerList(new ArrayList<String>(newList));
}
httpclient.start();
CheckServerHealthTask checkServerHealthTask = new CheckServerHealthTask();
TimerTaskService.scheduleWithFixedDelay(checkServerHealthTask, 0L, 5L, TimeUnit.SECONDS);
}
}
|
[
"\"address_server_domain\"",
"\"address_server_port\""
] |
[] |
[
"address_server_port",
"address_server_domain"
] |
[]
|
["address_server_port", "address_server_domain"]
|
java
| 2 | 0 | |
cmd/kubernetesDeploy_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/validation"
"github.com/spf13/cobra"
)
type kubernetesDeployOptions struct {
AdditionalParameters []string `json:"additionalParameters,omitempty"`
APIServer string `json:"apiServer,omitempty"`
AppTemplate string `json:"appTemplate,omitempty"`
ChartPath string `json:"chartPath,omitempty"`
ContainerRegistryPassword string `json:"containerRegistryPassword,omitempty"`
ContainerImageName string `json:"containerImageName,omitempty"`
ContainerImageTag string `json:"containerImageTag,omitempty"`
ContainerRegistryURL string `json:"containerRegistryUrl,omitempty"`
ContainerRegistryUser string `json:"containerRegistryUser,omitempty"`
ContainerRegistrySecret string `json:"containerRegistrySecret,omitempty"`
CreateDockerRegistrySecret bool `json:"createDockerRegistrySecret,omitempty"`
DeploymentName string `json:"deploymentName,omitempty"`
DeployTool string `json:"deployTool,omitempty" validate:"possible-values=kubectl helm helm3"`
ForceUpdates bool `json:"forceUpdates,omitempty"`
HelmDeployWaitSeconds int `json:"helmDeployWaitSeconds,omitempty"`
HelmValues []string `json:"helmValues,omitempty"`
Image string `json:"image,omitempty"`
IngressHosts []string `json:"ingressHosts,omitempty"`
KeepFailedDeployments bool `json:"keepFailedDeployments,omitempty"`
RunHelmTests bool `json:"runHelmTests,omitempty"`
ShowTestLogs bool `json:"showTestLogs,omitempty"`
KubeConfig string `json:"kubeConfig,omitempty"`
KubeContext string `json:"kubeContext,omitempty"`
KubeToken string `json:"kubeToken,omitempty"`
Namespace string `json:"namespace,omitempty"`
TillerNamespace string `json:"tillerNamespace,omitempty"`
DockerConfigJSON string `json:"dockerConfigJSON,omitempty"`
DeployCommand string `json:"deployCommand,omitempty" validate:"possible-values=apply replace"`
}
// KubernetesDeployCommand Deployment to Kubernetes test or production namespace within the specified Kubernetes cluster.
func KubernetesDeployCommand() *cobra.Command {
const STEP_NAME = "kubernetesDeploy"
metadata := kubernetesDeployMetadata()
var stepConfig kubernetesDeployOptions
var startTime time.Time
var logCollector *log.CollectorHook
var splunkClient *splunk.Splunk
telemetryClient := &telemetry.Telemetry{}
var createKubernetesDeployCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Deployment to Kubernetes test or production namespace within the specified Kubernetes cluster.",
Long: `Deployment to Kubernetes test or production namespace within the specified Kubernetes cluster.
!!! note "Deployment supports multiple deployment tools"
Currently the following are supported:
* [Helm](https://helm.sh/) command line tool and [Helm Charts](https://docs.helm.sh/developing_charts/#charts).
* [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and ` + "`" + `kubectl apply` + "`" + ` command.
## Helm
Following helm command will be executed by default:
` + "`" + `` + "`" + `` + "`" + `
helm upgrade <deploymentName> <chartPath> --install --force --namespace <namespace> --wait --timeout <helmDeployWaitSeconds> --set "image.repository=<yourRegistry>/<yourImageName>,image.tag=<yourImageTag>,secret.dockerconfigjson=<dockerSecret>,ingress.hosts[0]=<ingressHosts[0]>,,ingress.hosts[1]=<ingressHosts[1]>,...
` + "`" + `` + "`" + `` + "`" + `
* ` + "`" + `yourRegistry` + "`" + ` will be retrieved from ` + "`" + `containerRegistryUrl` + "`" + `
* ` + "`" + `yourImageName` + "`" + `, ` + "`" + `yourImageTag` + "`" + ` will be retrieved from ` + "`" + `image` + "`" + `
* ` + "`" + `dockerSecret` + "`" + ` will be calculated with a call to ` + "`" + `kubectl create secret generic <containerRegistrySecret> --from-file=.dockerconfigjson=<dockerConfigJson> --type=kubernetes.io/dockerconfigjson --insecure-skip-tls-verify=true --dry-run=client --output=json` + "`" + ``,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.ContainerRegistryPassword)
log.RegisterSecret(stepConfig.ContainerRegistryUser)
log.RegisterSecret(stepConfig.KubeConfig)
log.RegisterSecret(stepConfig.KubeToken)
log.RegisterSecret(stepConfig.DockerConfigJSON)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient = &splunk.Splunk{}
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages())
if err != nil {
return err
}
if err = validation.ValidateStruct(stepConfig); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
stepTelemetryData := telemetry.CustomData{}
stepTelemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
stepTelemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
stepTelemetryData.ErrorCategory = log.GetErrorCategory().String()
stepTelemetryData.PiperCommitHash = GitCommit
telemetryClient.SetData(&stepTelemetryData)
telemetryClient.Send()
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetryClient.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
kubernetesDeploy(stepConfig, &stepTelemetryData)
stepTelemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addKubernetesDeployFlags(createKubernetesDeployCmd, &stepConfig)
return createKubernetesDeployCmd
}
func addKubernetesDeployFlags(cmd *cobra.Command, stepConfig *kubernetesDeployOptions) {
cmd.Flags().StringSliceVar(&stepConfig.AdditionalParameters, "additionalParameters", []string{}, "Defines additional parameters for \"helm install\" or \"kubectl apply\" command.")
cmd.Flags().StringVar(&stepConfig.APIServer, "apiServer", os.Getenv("PIPER_apiServer"), "Defines the Url of the API Server of the Kubernetes cluster.")
cmd.Flags().StringVar(&stepConfig.AppTemplate, "appTemplate", os.Getenv("PIPER_appTemplate"), "Defines the filename for the kubernetes app template (e.g. k8s_apptemplate.yaml). Within this file `image` needs to be set as `image: <image-name>` for the image to be overwritten with other parameters.")
cmd.Flags().StringVar(&stepConfig.ChartPath, "chartPath", os.Getenv("PIPER_chartPath"), "Defines the chart path for deployments using helm. It is a mandatory parameter when `deployTool:helm` or `deployTool:helm3`.")
cmd.Flags().StringVar(&stepConfig.ContainerRegistryPassword, "containerRegistryPassword", os.Getenv("PIPER_containerRegistryPassword"), "Password for container registry access - typically provided by the CI/CD environment.")
cmd.Flags().StringVar(&stepConfig.ContainerImageName, "containerImageName", os.Getenv("PIPER_containerImageName"), "Name of the container which will be built - will be used together with `containerImageTag` instead of parameter `containerImage`")
cmd.Flags().StringVar(&stepConfig.ContainerImageTag, "containerImageTag", os.Getenv("PIPER_containerImageTag"), "Tag of the container which will be built - will be used together with `containerImageName` instead of parameter `containerImage`")
cmd.Flags().StringVar(&stepConfig.ContainerRegistryURL, "containerRegistryUrl", os.Getenv("PIPER_containerRegistryUrl"), "http(s) url of the Container registry where the image to deploy is located.")
cmd.Flags().StringVar(&stepConfig.ContainerRegistryUser, "containerRegistryUser", os.Getenv("PIPER_containerRegistryUser"), "Username for container registry access - typically provided by the CI/CD environment.")
cmd.Flags().StringVar(&stepConfig.ContainerRegistrySecret, "containerRegistrySecret", `regsecret`, "Name of the container registry secret used for pulling containers from the registry.")
cmd.Flags().BoolVar(&stepConfig.CreateDockerRegistrySecret, "createDockerRegistrySecret", false, "Only for `deployTool:kubectl`: Toggle to turn on `containerRegistrySecret` creation.")
cmd.Flags().StringVar(&stepConfig.DeploymentName, "deploymentName", os.Getenv("PIPER_deploymentName"), "Defines the name of the deployment. It is a mandatory parameter when `deployTool:helm` or `deployTool:helm3`.")
cmd.Flags().StringVar(&stepConfig.DeployTool, "deployTool", `kubectl`, "Defines the tool which should be used for deployment.")
cmd.Flags().BoolVar(&stepConfig.ForceUpdates, "forceUpdates", true, "Adds `--force` flag to a helm resource update command or to a kubectl replace command")
cmd.Flags().IntVar(&stepConfig.HelmDeployWaitSeconds, "helmDeployWaitSeconds", 300, "Number of seconds before helm deploy returns.")
cmd.Flags().StringSliceVar(&stepConfig.HelmValues, "helmValues", []string{}, "List of helm values as YAML file reference or URL (as per helm parameter description for `-f` / `--values`)")
cmd.Flags().StringVar(&stepConfig.Image, "image", os.Getenv("PIPER_image"), "Full name of the image to be deployed.")
cmd.Flags().StringSliceVar(&stepConfig.IngressHosts, "ingressHosts", []string{}, "(Deprecated) List of ingress hosts to be exposed via helm deployment.")
cmd.Flags().BoolVar(&stepConfig.KeepFailedDeployments, "keepFailedDeployments", false, "Defines whether a failed deployment will be purged")
cmd.Flags().BoolVar(&stepConfig.RunHelmTests, "runHelmTests", false, "Defines whether or not to run helm tests against the recently deployed release")
cmd.Flags().BoolVar(&stepConfig.ShowTestLogs, "showTestLogs", false, "Defines whether to print the pod logs after running helm tests")
cmd.Flags().StringVar(&stepConfig.KubeConfig, "kubeConfig", os.Getenv("PIPER_kubeConfig"), "Defines the path to the \"kubeconfig\" file.")
cmd.Flags().StringVar(&stepConfig.KubeContext, "kubeContext", os.Getenv("PIPER_kubeContext"), "Defines the context to use from the \"kubeconfig\" file.")
cmd.Flags().StringVar(&stepConfig.KubeToken, "kubeToken", os.Getenv("PIPER_kubeToken"), "Contains the id_token used by kubectl for authentication. Consider using kubeConfig parameter instead.")
cmd.Flags().StringVar(&stepConfig.Namespace, "namespace", `default`, "Defines the target Kubernetes namespace for the deployment.")
cmd.Flags().StringVar(&stepConfig.TillerNamespace, "tillerNamespace", os.Getenv("PIPER_tillerNamespace"), "Defines optional tiller namespace for deployments using helm.")
cmd.Flags().StringVar(&stepConfig.DockerConfigJSON, "dockerConfigJSON", `.pipeline/docker/config.json`, "Path to the file `.docker/config.json` - this is typically provided by your CI/CD system. You can find more details about the Docker credentials in the [Docker documentation](https://docs.docker.com/engine/reference/commandline/login/).")
cmd.Flags().StringVar(&stepConfig.DeployCommand, "deployCommand", `apply`, "Only for `deployTool: kubectl`: defines the command `apply` or `replace`. The default is `apply`.")
cmd.MarkFlagRequired("containerRegistryUrl")
cmd.MarkFlagRequired("deployTool")
cmd.MarkFlagRequired("image")
}
// retrieve step metadata
func kubernetesDeployMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "kubernetesDeploy",
Aliases: []config.Alias{{Name: "deployToKubernetes", Deprecated: true}},
Description: "Deployment to Kubernetes test or production namespace within the specified Kubernetes cluster.",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "kubeConfigFileCredentialsId", Description: "Jenkins 'Secret file' credentials ID containing kubeconfig file. Details can be found in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/).", Type: "jenkins", Aliases: []config.Alias{{Name: "kubeCredentialsId", Deprecated: true}}},
{Name: "kubeTokenCredentialsId", Description: "Jenkins 'Secret text' credentials ID containing token to authenticate to Kubernetes. This is an alternative way to using a kubeconfig file. Details can be found in the [Kubernetes documentation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/).", Type: "jenkins", Aliases: []config.Alias{{Name: "k8sTokenCredentialsId", Deprecated: true}}},
{Name: "dockerCredentialsId", Type: "jenkins"},
{Name: "dockerConfigJsonCredentialsId", Description: "Jenkins 'Secret file' credentials ID containing Docker config.json (with registry credential(s)).", Type: "jenkins"},
},
Resources: []config.StepResources{
{Name: "deployDescriptor", Type: "stash"},
},
Parameters: []config.StepParameters{
{
Name: "additionalParameters",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{{Name: "helmDeploymentParameters"}},
Default: []string{},
},
{
Name: "apiServer",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "k8sAPIServer"}},
Default: os.Getenv("PIPER_apiServer"),
},
{
Name: "appTemplate",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "k8sAppTemplate"}},
Default: os.Getenv("PIPER_appTemplate"),
},
{
Name: "chartPath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "helmChartPath"}},
Default: os.Getenv("PIPER_chartPath"),
},
{
Name: "containerRegistryPassword",
ResourceRef: []config.ResourceReference{
{
Name: "dockerCredentialsId",
Param: "password",
Type: "secret",
},
{
Name: "commonPipelineEnvironment",
Param: "custom/repositoryPassword",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_containerRegistryPassword"),
},
{
Name: "containerImageName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "dockerImageName"}},
Default: os.Getenv("PIPER_containerImageName"),
},
{
Name: "containerImageTag",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "artifactVersion",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "artifactVersion"}},
Default: os.Getenv("PIPER_containerImageTag"),
},
{
Name: "containerRegistryUrl",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "container/registryUrl",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "dockerRegistryUrl"}},
Default: os.Getenv("PIPER_containerRegistryUrl"),
},
{
Name: "containerRegistryUser",
ResourceRef: []config.ResourceReference{
{
Name: "dockerCredentialsId",
Param: "username",
Type: "secret",
},
{
Name: "commonPipelineEnvironment",
Param: "custom/repositoryUsername",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_containerRegistryUser"),
},
{
Name: "containerRegistrySecret",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `regsecret`,
},
{
Name: "createDockerRegistrySecret",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
Default: false,
},
{
Name: "deploymentName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "helmDeploymentName"}},
Default: os.Getenv("PIPER_deploymentName"),
},
{
Name: "deployTool",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: `kubectl`,
},
{
Name: "forceUpdates",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{{Name: "force"}},
Default: true,
},
{
Name: "helmDeployWaitSeconds",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "int",
Mandatory: false,
Aliases: []config.Alias{},
Default: 300,
},
{
Name: "helmValues",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
Default: []string{},
},
{
Name: "image",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "container/imageNameTag",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "deployImage"}},
Default: os.Getenv("PIPER_image"),
},
{
Name: "ingressHosts",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
Default: []string{},
},
{
Name: "keepFailedDeployments",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
Default: false,
},
{
Name: "runHelmTests",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
Default: false,
},
{
Name: "showTestLogs",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
Default: false,
},
{
Name: "kubeConfig",
ResourceRef: []config.ResourceReference{
{
Name: "kubeConfigFileCredentialsId",
Type: "secret",
},
{
Name: "kubeConfigFileVaultSecretName",
Type: "vaultSecretFile",
Default: "kube-config",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_kubeConfig"),
},
{
Name: "kubeContext",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_kubeContext"),
},
{
Name: "kubeToken",
ResourceRef: []config.ResourceReference{
{
Name: "kubeTokenCredentialsId",
Type: "secret",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_kubeToken"),
},
{
Name: "namespace",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "helmDeploymentNamespace"}, {Name: "k8sDeploymentNamespace"}},
Default: `default`,
},
{
Name: "tillerNamespace",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "helmTillerNamespace"}},
Default: os.Getenv("PIPER_tillerNamespace"),
},
{
Name: "dockerConfigJSON",
ResourceRef: []config.ResourceReference{
{
Name: "dockerConfigJsonCredentialsId",
Type: "secret",
},
{
Name: "dockerConfigFileVaultSecretName",
Type: "vaultSecretFile",
Default: "docker-config",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `.pipeline/docker/config.json`,
},
{
Name: "deployCommand",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: `apply`,
},
},
},
Containers: []config.Container{
{Image: "dtzar/helm-kubectl:3.4.1", WorkingDir: "/config", Options: []config.Option{{Name: "-u", Value: "0"}}, Conditions: []config.Condition{{ConditionRef: "strings-equal", Params: []config.Param{{Name: "deployTool", Value: "helm3"}}}}},
{Image: "dtzar/helm-kubectl:2.17.0", WorkingDir: "/config", Options: []config.Option{{Name: "-u", Value: "0"}}, Conditions: []config.Condition{{ConditionRef: "strings-equal", Params: []config.Param{{Name: "deployTool", Value: "helm"}}}}},
{Image: "dtzar/helm-kubectl:2.17.0", WorkingDir: "/config", Options: []config.Option{{Name: "-u", Value: "0"}}, Conditions: []config.Condition{{ConditionRef: "strings-equal", Params: []config.Param{{Name: "deployTool", Value: "kubectl"}}}}},
},
},
}
return theMetaData
}
|
[
"\"PIPER_apiServer\"",
"\"PIPER_appTemplate\"",
"\"PIPER_chartPath\"",
"\"PIPER_containerRegistryPassword\"",
"\"PIPER_containerImageName\"",
"\"PIPER_containerImageTag\"",
"\"PIPER_containerRegistryUrl\"",
"\"PIPER_containerRegistryUser\"",
"\"PIPER_deploymentName\"",
"\"PIPER_image\"",
"\"PIPER_kubeConfig\"",
"\"PIPER_kubeContext\"",
"\"PIPER_kubeToken\"",
"\"PIPER_tillerNamespace\"",
"\"PIPER_apiServer\"",
"\"PIPER_appTemplate\"",
"\"PIPER_chartPath\"",
"\"PIPER_containerRegistryPassword\"",
"\"PIPER_containerImageName\"",
"\"PIPER_containerImageTag\"",
"\"PIPER_containerRegistryUrl\"",
"\"PIPER_containerRegistryUser\"",
"\"PIPER_deploymentName\"",
"\"PIPER_image\"",
"\"PIPER_kubeConfig\"",
"\"PIPER_kubeContext\"",
"\"PIPER_kubeToken\"",
"\"PIPER_tillerNamespace\""
] |
[] |
[
"PIPER_deploymentName",
"PIPER_tillerNamespace",
"PIPER_appTemplate",
"PIPER_containerRegistryPassword",
"PIPER_chartPath",
"PIPER_image",
"PIPER_kubeConfig",
"PIPER_containerImageTag",
"PIPER_containerRegistryUser",
"PIPER_kubeToken",
"PIPER_containerRegistryUrl",
"PIPER_apiServer",
"PIPER_containerImageName",
"PIPER_kubeContext"
] |
[]
|
["PIPER_deploymentName", "PIPER_tillerNamespace", "PIPER_appTemplate", "PIPER_containerRegistryPassword", "PIPER_chartPath", "PIPER_image", "PIPER_kubeConfig", "PIPER_containerImageTag", "PIPER_containerRegistryUser", "PIPER_kubeToken", "PIPER_containerRegistryUrl", "PIPER_apiServer", "PIPER_containerImageName", "PIPER_kubeContext"]
|
go
| 14 | 0 | |
finetune_with_separate.py
|
#!/usr/bin/env python3
""" ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import time
import yaml
import os
import logging
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import torch
import torch.nn as nn
import torch.distributed as dist
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint,\
convert_splitbn_model, model_parameters
from timm.utils import *
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy, JsdCrossEntropy
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('train')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset / Model parameters
parser.add_argument('data_dir', metavar='DIR',
help='path to dataset')
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
parser.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
parser.add_argument('--model', default='resnet101', type=str, metavar='MODEL',
help='Name of model to train (default: "countception"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--pretrained-path', default='', type=str, metavar='PATH',
help='Load from original checkpoint and pretrain (default: none) (with --pretrained)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=None, metavar='N',
help='number of label classes (Model default if None)')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('-vb', '--validation-batch-size-multiplier', type=int, default=1, metavar='N',
help='ratio of validation batch size to training batch size (default: 1)')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0001,
help='weight decay (default: 0.0001)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
# Learning rate schedule parameters
parser.add_argument('--sched', default='step', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 2)')
parser.add_argument('--epoch-repeats', type=float, default=0., metavar='N',
help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
parser.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
parser.add_argument('--ratio', type=float, nargs='+', default=[3./4., 4./3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
parser.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
parser.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
parser.add_argument('--jsd', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
parser.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
parser.add_argument('--remode', type=str, default='const',
help='Random erase mode (default: "const")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
parser.add_argument('--repeated-aug', action='store_true')
parser.set_defaults(repeated_aug=True)
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-tf', action='store_true', default=False,
help='Use Tensorflow BatchNorm defaults for models that support it (default: False)')
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default="reduce",
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('--checkpoint-hist', type=int, default=10, metavar='N',
help='number of checkpoints to keep (default: 10)')
parser.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--experiment', default='', type=str, metavar='NAME',
help='name of train experiment, name of sub-folder for output')
parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--log-wandb', action='store_true', default=False,
help='log training and validation metrics to wandb')
parser.add_argument('--fake-separated-loss-log', action='store_true', default=False,
help='log loss separated by fake or not')
parser.add_argument('--pause', type=int, default=None,
help='pause training at the epoch')
# distributed training
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument("--global_rank", default=0, type=int)
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--device', default=None, type=int,
help='GPU id to use.')
# original params
parser.add_argument('--separate-rate', type=float, default=1.0,
help='Ratio of how much of the sorting task to consider')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
setup_default_logging()
args, args_text = _parse_args()
args.prefetcher = not args.no_prefetcher
args.distributed = True
if args.device is not None:
print("Use GPU: {} for training".format(args.device))
if args.distributed:
# initialize torch.distributed using MPI
# from mpi4py import MPI
# comm = MPI.COMM_WORLD
# world_size = comm.Get_size()
# rank = comm.Get_rank()
# init_method = 'tcp://{}:23456'.format(args.dist_url)
master_addr = os.getenv("MASTER_ADDR", default="localhost")
master_port = os.getenv('MASTER_PORT', default='8888')
method = "tcp://{}:{}".format(master_addr, master_port)
rank = int(os.getenv('OMPI_COMM_WORLD_RANK', '0'))
world_size = int(os.getenv('OMPI_COMM_WORLD_SIZE', '1'))
ngpus_per_node = torch.cuda.device_count()
device = rank % ngpus_per_node
torch.cuda.set_device(device)
torch.distributed.init_process_group('nccl', init_method=method, world_size=world_size, rank=rank)
args.local_rank = device
args.global_rank = rank
args.device = device
args.world_size = world_size
_logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.global_rank, args.world_size))
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
if args.amp:
# `--amp` chooses native amp before apex (APEX ver not actively maintained)
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
if args.apex_amp and has_apex:
use_amp = 'apex'
elif args.native_amp and has_native_amp:
use_amp = 'native'
elif args.apex_amp or args.native_amp:
_logger.warning("Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6")
random_seed(args.seed, args.global_rank)
if args.log_wandb and args.global_rank == 0:
if has_wandb:
wandb.init(project="pytorch-image-models", name=args.experiment, config=args)
else:
_logger.warning("You've requested to log metrics to wandb but package not found. "
"Metrics not being logged to wandb, try `pip install wandb`")
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint,
pretrained_path=args.pretrained_path,
separate_flg=True)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly
if args.global_rank == 0:
_logger.info(
f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model, verbose=args.global_rank == 0)
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
# move model to GPU, enable channels last layout if set
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
assert not args.split_bn
if has_apex and use_amp != 'native':
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.global_rank == 0:
_logger.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
if args.torchscript:
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model'
model = torch.jit.script(model)
optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args))
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == 'apex':
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if args.global_rank == 0:
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif use_amp == 'native':
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.global_rank == 0:
_logger.info('Using native Torch AMP. Training in mixed precision.')
else:
if args.global_rank == 0:
_logger.info('AMP not enabled. Training in float32.')
# optionally resume from a checkpoint
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(
model, args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.global_rank == 0)
# setup exponential moving average of model weights, SWA could be used here too
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEmaV2(
model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else None)
if args.resume:
load_checkpoint(model_ema.module, args.resume, use_ema=True)
# setup distributed training
if args.distributed:
if has_apex and use_amp != 'native':
# Apex DDP preferred unless native amp is activated
if args.global_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if args.global_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[args.local_rank]) # can use device str in Torch >= 1.1
# NOTE: EMA model does not need to be wrapped by DDP
# setup learning rate schedule and starting epoch
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.global_rank == 0:
_logger.info('Scheduled epochs: {}'.format(num_epochs))
# if needed, load dataset from torch
if args.dataset == 'CIFAR10':
args.data_dir = f'{args.data_dir}/cifar10_data'
elif args.dataset == 'CIFAR100':
args.data_dir = f'{args.data_dir}/cifar100_data'
# create the train and eval datasets
dataset_train = create_dataset(
args.dataset,
root=args.data_dir, split=args.train_split, is_training=True,
batch_size=args.batch_size, repeats=args.epoch_repeats)
dataset_eval = create_dataset(
args.dataset, root=args.data_dir, split=args.val_split, is_training=False, batch_size=args.batch_size)
# setup mixup / cutmix
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeiine
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader,
repeated_aug=args.repeated_aug,
)
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.validation_batch_size_multiplier * args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
)
# setup loss function
if args.jsd:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing).cuda()
elif mixup_active:
# smoothing is handled with mixup target transform
train_loss_fn = SoftTargetCrossEntropy().cuda()
elif args.smoothing:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda()
else:
train_loss_fn = nn.CrossEntropyLoss().cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
label_loss_fn = nn.BCEWithLogitsLoss().cuda()
# setup checkpoint saver and eval metric tracking
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
output_dir = None
if args.global_rank == 0:
if args.experiment:
exp_name = args.experiment
else:
exp_name = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
str(data_config['input_size'][-1])
])
output_dir = get_outdir(args.output if args.output else './output/train', exp_name)
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(
model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler,
checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing, max_history=args.checkpoint_hist)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed and hasattr(loader_train.sampler, 'set_epoch'):
loader_train.sampler.set_epoch(epoch)
train_metrics = train_one_epoch(
epoch, model, loader_train, optimizer, train_loss_fn, label_loss_fn, args,
lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if args.global_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
eval_metrics = validate(model, loader_eval, validate_loss_fn, label_loss_fn, args, amp_autocast=amp_autocast)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
ema_eval_metrics = validate(
model_ema.module, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)')
eval_metrics = ema_eval_metrics
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
if output_dir is not None and args.global_rank == 0:
update_summary(
epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),
write_header=best_metric is None, log_wandb=args.log_wandb and has_wandb)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric)
if args.pause is not None:
if epoch - start_epoch >= args.pause:
break
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
def train_one_epoch(
epoch, model, loader, optimizer, loss_fn, label_loss_fn, args,
lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress,
loss_scaler=None, model_ema=None, mixup_fn=None):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_label_m = AverageMeter()
losses_class_m = AverageMeter()
losses_m = AverageMeter()
top1_label_m = AverageMeter()
if args.fake_separated_loss_log:
fake_losses_m = AverageMeter()
origin_losses_m = AverageMeter()
model.train()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
# ここでとってくるローダーを二種類に ここで fake と original をここで input と target にまーじ (後回し)
# import pdb; pdb.set_trace()
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if not mixup_active:
# mixupがoffの場合,label形式でtargetが入ってくる(mixup系消去,ラベルターゲット)
target_label = (target < args.num_classes).to(dtype=torch.float32).unsqueeze(1)
target_class = target
else:
# mixupがonの場合,one-hot形式でtargetが入ってくる
target_class = target
target_label = torch.sum(target, 1).unsqueeze(1) # realの方を横方法にたす([128, 1000] -> [128, 1]) 1->real, 0->fake
# if args.global_rank == 0 and batch_idx%200 == 0:
# print(f"target:{target.shape}")
# print(target)
# print(f"target_label:{target_label.shape}")
# print(target_label)
if not args.prefetcher:
input, target_class = input.cuda(), target_class.cuda()
if mixup_fn is not None:
input, target_class = mixup_fn(input, target_class)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output, output_label = model(input)
# if args.fake_separated_loss_log:
# # calc loss splited with [0-999], [1000-1999]
# target_labels = torch.argmax(target, axis=1).cuda()
# fake_output = output[target_labels < args.num_classes//2]
# fake_target = target[target_labels < args.num_classes//2]
# origin_output = output[target_labels >= args.num_classes//2]
# origin_target = target[target_labels >= args.num_classes//2]
# fake_loss = loss_fn(fake_output, fake_target)
# origin_loss = loss_fn(origin_output, origin_target)
# if len(fake_target) == 0:
# fake_loss = torch.zeros(1, dtype=torch.float32).cuda()
# if len(origin_target) == 0:
# origin_loss = torch.zeros(1, dtype=torch.float32).cuda()
# if args.global_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
# print(f'fake_target_shape: {fake_target.shape}, origin_target_shape: {origin_target.shape}')
# print(f'fake_loss: {fake_loss}, origin_loss: {origin_loss}')
loss_class = loss_fn(output, target_class)
loss_label = label_loss_fn(output_label, target_label)
# batch size 混ぜるときに気を付ける
# loss = (loss_class + loss_label)/2
# args.separate_rate = 1.0 の時,クラス分類タスクとfake判別タスクを毎回同等の価値とみなしてバックワードを回す.0.5なら,fake判別タスクの価値はクラス分類タスクの価値の半分
# rate_loss = (loss_class/loss_label).item()
loss = (loss_class + 0*loss_label)
acc1_label = accuracy_label(output_label, target_label)
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
losses_class_m.update(loss_class.item(), input.size(0))
losses_label_m.update(loss_label.item(), input.size(0))
top1_label_m.update(acc1_label.item(), output.size(0))
# if args.fake_separated_loss_log:
# if len(fake_target) > 0:
# fake_losses_m.update(fake_loss.item(), len(fake_target))
# if len(origin_target) > 0:
# origin_losses_m.update(origin_loss.item(), len(origin_target))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss, optimizer,
clip_grad=args.clip_grad, clip_mode=args.clip_mode,
parameters=model_parameters(model, exclude_head='agc' in args.clip_mode),
create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
dispatch_clip_grad(
model_parameters(model, exclude_head='agc' in args.clip_mode),
value=args.clip_grad, mode=args.clip_mode)
optimizer.step()
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize()
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
reduced_loss_class = reduce_tensor(loss_class.data, args.world_size)
reduced_loss_label = reduce_tensor(loss_label.data, args.world_size)
acc1_label = reduce_tensor(acc1_label, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
losses_class_m.update(reduced_loss_class.item(), input.size(0))
losses_label_m.update(reduced_loss_label.item(), input.size(0))
top1_label_m.update(acc1_label.item(), output.size(0))
# if args.fake_separated_loss_log:
# # len(fake_target)やlen(origin_target)を全プロセスで足し合わせて考慮する必要あり
# fake_local_sum_loss = torch.tensor([len(fake_target)*fake_loss.item()], dtype=torch.float32).cuda()
# dist.all_reduce(fake_local_sum_loss.data, op=dist.ReduceOp.SUM)
# fake_nums = torch.tensor([len(fake_target)], dtype=torch.int64).cuda()
# dist.all_reduce(fake_nums.data, op=dist.ReduceOp.SUM)
# if fake_nums.item() > 0:
# fake_losses_m.update(fake_local_sum_loss.item()/fake_nums.item(), fake_nums.item())
# origin_local_sum_loss = torch.tensor([len(origin_target)*origin_loss.item()], dtype=torch.float32).cuda()
# dist.all_reduce(origin_local_sum_loss.data, op=dist.ReduceOp.SUM)
# origin_nums = torch.tensor([len(origin_target)], dtype=torch.int64).cuda()
# dist.all_reduce(origin_nums.data, op=dist.ReduceOp.SUM)
# if origin_nums.item() > 0:
# origin_losses_m.update(origin_local_sum_loss.item()/origin_nums.item(), origin_nums.item())
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.global_rank == 0:
_logger.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) '
'Loss_Class: {loss_class.val:>7.4f} ({loss_class.avg:>6.4f}) '
'Loss_Label: {loss_label.val:>7.4f} ({loss_label.avg:>6.4f}) '
'Acc@label: {top1_label.val:>7.4f} ({top1_label.avg:>7.4f}) '
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'LR: {lr:.3e} '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
loss=losses_m,
loss_class=losses_class_m,
loss_label=losses_label_m,
top1_label=top1_label_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m))
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True)
if saver is not None and args.recovery_interval and (
last_batch or (batch_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
if args.fake_separated_loss_log:
return OrderedDict([('loss', losses_m.avg), ('fake_loss', fake_losses_m.avg), ('origin_loss', origin_losses_m.avg)])
else:
return OrderedDict([('loss', losses_m.avg), ('loss_class', losses_class_m.avg), ('loss_label', losses_label_m.avg), ('top1_label', top1_label_m.avg)])
def validate(model, loader, loss_fn, label_loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_class_m = AverageMeter()
losses_label_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
top1_label_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
target_label = (target < args.num_classes).to(dtype=torch.float32).unsqueeze(1)
target_class = target
# if args.global_rank == 0 and batch_idx == 0:
# print(f"target:{target_class.shape}")
# print(target_class)
# print(f"target_label:{target_label.shape}")
# print(target_label)
with amp_autocast():
output, output_label = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
output_label = output_label.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target_class = target_class[0:target.size(0):reduce_factor]
target_label = target_label[0:target_label.size(0):reduce_factor]
loss_class = loss_fn(output, target_class)
loss_label = label_loss_fn(output_label, target_label)
acc1, acc5 = accuracy(output, target_class, topk=(1, 5))
acc1_label = accuracy_label(output_label, target_label)
if args.distributed:
reduced_loss_class = reduce_tensor(loss_class.data, args.world_size)
reduced_loss_label = reduce_tensor(loss_label.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
acc1_label = reduce_tensor(acc1_label, args.world_size)
else:
reduced_loss_class = loss_class.data
reduced_loss_label = loss_label.data
torch.cuda.synchronize()
losses_class_m.update(reduced_loss_class.item(), input.size(0))
losses_label_m.update(reduced_loss_label.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
top1_label_m.update(acc1_label.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.global_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
_logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss_Class: {loss_class.val:>7.4f} ({loss_class.avg:>6.4f}) '
'Loss_Label: {loss_label.val:>7.4f} ({loss_label.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f}) '
'Acc@label: {top1_label.val:>7.4f} ({top1_label.avg:>7.4f})'.format(
log_name, batch_idx, last_idx, batch_time=batch_time_m,
loss_class=losses_class_m, loss_label=losses_label_m, top1=top1_m, top5=top5_m, top1_label=top1_label_m))
metrics = OrderedDict([('loss_class', losses_class_m.avg), ('loss_label', losses_label_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg), ('top1_label', top1_label_m.avg)])
return metrics
if __name__ == '__main__':
main()
|
[] |
[] |
[
"MASTER_ADDR",
"OMPI_COMM_WORLD_RANK",
"MASTER_PORT",
"OMPI_COMM_WORLD_SIZE"
] |
[]
|
["MASTER_ADDR", "OMPI_COMM_WORLD_RANK", "MASTER_PORT", "OMPI_COMM_WORLD_SIZE"]
|
python
| 4 | 0 | |
conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MicroPython documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 21 11:42:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from recommonmark.parser import CommonMarkParser
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('docs'))
sys.path.insert(0, os.path.abspath('.'))
master_doc = 'docs/index'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = ['.rst', '.md', '.c', '.h']
source_parsers = {'.md': CommonMarkParser,
'.c': "c2rst.CStrip", '.h': "c2rst.CStrip"}
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
#master_doc = 'index'
# General information about the project.
project = 'Adafruit CircuitPython'
copyright = '2014-2018, MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors)'
# These are overwritten on ReadTheDocs.
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# We don't follow "The short X.Y version" vs "The full version, including alpha/beta/rc tags"
# breakdown, so use the same version identifier for both to avoid confusion.
version = release = '0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["**/build*",
".venv",
"docs/README.md",
"drivers",
"examples",
"extmod",
"frozen",
"lib",
"main.c",
"mpy-cross",
"ports/*/*.c",
"ports/*/*.h",
"ports/*/boards",
"ports/*/common-hal",
"ports/*/supervisor",
"ports/atmel-samd/asf4",
"ports/atmel-samd/asf4_conf",
"ports/atmel-samd/external_flash",
"ports/atmel-samd/freetouch",
"ports/atmel-samd/peripherals",
"ports/atmel-samd/QTouch",
"ports/atmel-samd/tools",
"ports/bare-arm",
"ports/cc3200",
"ports/cc3200/FreeRTOS",
"ports/cc3200/hal",
"ports/esp8266/boards",
"ports/esp8266/common-hal",
"ports/esp8266/modules",
"ports/minimal",
"ports/nrf/device",
"ports/nrf/drivers",
"ports/nrf/hal",
"ports/nrf/modules",
"ports/pic16bit",
"ports/qemu-arm",
"ports/stm32",
"ports/stm32/hal",
"ports/stm32/cmsis",
"ports/stm32/usbdev",
"ports/stm32/usbhost",
"ports/teensy",
"ports/unix",
"ports/windows",
"ports/zephyr",
"py",
"shared-bindings/util.*",
"shared-module",
"supervisor",
"tests",
"tools"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Global include files. Sphinx docs suggest using rst_epilog in preference
# of rst_prolog, so we follow. Absolute paths below mean "from the base
# of the doctree".
rst_epilog = """
.. include:: /docs/templates/replace.inc
"""
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '../../logo/trans-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'docs/static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['docs/static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d %b %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {"index": "topindex.html"}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CircuitPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Include 3 levels of headers in PDF ToC
'preamble': '\setcounter{tocdepth}{2}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CircuitPython.tex', 'CircuitPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'CircuitPython', 'CircuitPython Documentation',
['Damien P. George, Paul Sokolovsky, and contributors'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CircuitPython', 'CircuitPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'CircuitPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"cpython": ('https://docs.python.org/3/', None),
"bus_device": ('https://circuitpython.readthedocs.io/projects/busdevice/en/latest/', None),
"register": ('https://circuitpython.readthedocs.io/projects/register/en/latest/', None)}
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from flask.ext.script import Shell, Server, Manager
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.sqlalchemy import SQLAlchemy
import geojson
from zipfile import ZipFile
from urllib.request import urlopen
from io import BytesIO, TextIOWrapper
from geoalchemy2.shape import from_shape
from shapely.geometry import asShape
from geoindex.factory import create_app
app = create_app()
app.debug = True
port = os.environ.get('PORT', 8000)
manager = Manager(app)
manager.add_command('server', Server(host="0.0.0.0", port=port))
from geoindex.extensions import db
from geoindex.frontend.models import *
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.command
def load_geojson():
result = urlopen('https://github.com/openregister/boundaries/archive/master.zip').read()
stream = BytesIO(result)
zipfile = ZipFile(stream, 'r')
file_names = [name for name in zipfile.namelist()
if name.endswith('.geojson')]
for name in file_names:
with zipfile.open(name, 'r') as f:
if name.endswith('.geojson'):
file_contents = TextIOWrapper(f, encoding='utf-8',
newline='')
data = geojson.loads(file_contents.read())
try:
name = data['properties']['REGD14NM']
code = data['properties']['REGD14CD']
geometry = data['geometry']
# hackery store everthing as multipolygon
if geometry['type'] == 'Polygon':
coordinates = []
coordinates.append(geometry['coordinates'])
geometry['coordinates'] = coordinates
geometry['type'] = 'MultiPolygon'
polygon = from_shape(asShape(geometry), srid=4326)
boundary = Boundary(name=name, code=code, polygon=polygon)
db.session.add(boundary)
db.session.commit()
except KeyError as e:
print("not something we were expecting really")
if __name__ == '__main__':
manager.run()
|
[] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
python
| 1 | 0 | |
internal/server/user_router.go
|
package server
import (
"encoding/json"
"fmt"
"net/http"
"os"
"strconv"
"Backend/internal/middleware"
"Backend/pkg/claim"
"Backend/pkg/response"
"Backend/pkg/user"
"github.com/go-chi/chi"
)
// UserRouter is the router of the users.
type UserRouter struct {
Repository user.Repository
}
// CreateHandler Create a new user.
func (ur *UserRouter) CreateHandler(w http.ResponseWriter, r *http.Request) {
var u user.User
err := json.NewDecoder(r.Body).Decode(&u)
if err != nil {
response.HTTPError(w, r, http.StatusBadRequest, err.Error())
return
}
defer r.Body.Close()
ctx := r.Context()
err = ur.Repository.Create(ctx, &u)
if err != nil {
response.HTTPError(w, r, http.StatusBadRequest, err.Error())
return
}
u.Password = ""
w.Header().Add("Location", fmt.Sprintf("%s%d", r.URL.String(), u.ID))
response.JSON(w, r, http.StatusCreated, response.Map{"user": u})
}
// GetOneHandler response one user by username.
func (ur *UserRouter) GetOneHandler(w http.ResponseWriter, r *http.Request) {
username := chi.URLParam(r, "username")
ctx := r.Context()
u, err := ur.Repository.GetByUsername(ctx, username)
if err != nil {
response.HTTPError(w, r, http.StatusNotFound, err.Error())
return
}
response.JSON(w, r, http.StatusOK, response.Map{"user": u})
}
// UpdateHandler update a stored user by id.
func (ur *UserRouter) UpdateHandler(w http.ResponseWriter, r *http.Request) {
idStr := chi.URLParam(r, "id")
id, err := strconv.Atoi(idStr)
if err != nil {
response.HTTPError(w, r, http.StatusBadRequest, err.Error())
return
}
var u user.User
err = json.NewDecoder(r.Body).Decode(&u)
if err != nil {
response.HTTPError(w, r, http.StatusBadRequest, err.Error())
return
}
defer r.Body.Close()
ctx := r.Context()
err = ur.Repository.Update(ctx, uint(id), u)
if err != nil {
response.HTTPError(w, r, http.StatusNotFound, err.Error())
return
}
response.JSON(w, r, http.StatusOK, nil)
}
// DeleteHandler Remove a user by ID.
func (ur *UserRouter) DeleteHandler(w http.ResponseWriter, r *http.Request) {
idStr := chi.URLParam(r, "id")
id, err := strconv.Atoi(idStr)
if err != nil {
response.HTTPError(w, r, http.StatusBadRequest, err.Error())
return
}
ctx := r.Context()
err = ur.Repository.Delete(ctx, uint(id))
if err != nil {
response.HTTPError(w, r, http.StatusNotFound, err.Error())
return
}
response.JSON(w, r, http.StatusOK, response.Map{})
}
// LoginHandler search user and return a jwt.
func (ur *UserRouter) LoginHandler(w http.ResponseWriter, r *http.Request) {
var u user.User
err := json.NewDecoder(r.Body).Decode(&u)
if err != nil {
response.HTTPError(w, r, http.StatusBadRequest, err.Error())
return
}
defer r.Body.Close()
ctx := r.Context()
storedUser, err := ur.Repository.GetByUsername(ctx, u.Username)
if err != nil {
response.HTTPError(w, r, http.StatusNotFound, err.Error())
return
}
if !storedUser.PasswordMatch(u.Password) {
response.HTTPError(w, r, http.StatusBadRequest, "password don't match")
return
}
c := claim.Claim{ID: int(storedUser.ID)}
token, err := c.GetToken(os.Getenv("SIGNING_STRING"))
if err != nil {
response.HTTPError(w, r, http.StatusInternalServerError, err.Error())
return
}
response.JSON(w, r, http.StatusOK, response.Map{"token": token, "user": storedUser})
}
// Routes returns user router with each endpoint.
func (ur *UserRouter) Routes() http.Handler {
r := chi.NewRouter()
r.Post("/", ur.CreateHandler)
r.
With(middleware.Authorizator).
Get("/{username}", ur.GetOneHandler)
r.
With(middleware.Authorizator).
Put("/{id}", ur.UpdateHandler)
r.
With(middleware.Authorizator).
Delete("/{id}", ur.DeleteHandler)
r.Post("/login/", ur.LoginHandler)
return r
}
|
[
"\"SIGNING_STRING\""
] |
[] |
[
"SIGNING_STRING"
] |
[]
|
["SIGNING_STRING"]
|
go
| 1 | 0 | |
pxr/usdImaging/lib/usdviewq/appController.py
|
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
# Qt Components
from qt import QtCore, QtGui, QtWidgets
# Stdlib components
import re, sys, os, cProfile, pstats, traceback
from itertools import groupby
from time import time, sleep
from collections import deque, OrderedDict
# Usd Library Components
from pxr import Usd, UsdGeom, UsdShade, UsdUtils, UsdImagingGL, Glf, Sdf, Tf, Ar
from pxr import UsdAppUtils
from pxr.UsdAppUtils.complexityArgs import RefinementComplexities
# UI Components
from ._usdviewq import Utils
from stageView import StageView
from mainWindowUI import Ui_MainWindow
from primContextMenu import PrimContextMenu
from headerContextMenu import HeaderContextMenu
from layerStackContextMenu import LayerStackContextMenu
from attributeViewContextMenu import AttributeViewContextMenu
from customAttributes import (_GetCustomAttributes, CustomAttribute,
BoundingBoxAttribute, LocalToWorldXformAttribute,
ResolvedBoundMaterial)
from primTreeWidget import PrimTreeWidget, PrimViewColumnIndex
from primViewItem import PrimViewItem
from variantComboBox import VariantComboBox
from legendUtil import ToggleLegendWithBrowser
import prettyPrint, adjustClipping, adjustDefaultMaterial, settings
from constantGroup import ConstantGroup
from selectionDataModel import ALL_INSTANCES, SelectionDataModel
# Common Utilities
from common import (UIBaseColors, UIPropertyValueSourceColors, UIFonts,
GetPropertyColor, GetPropertyTextFont,
Timer, Drange, BusyContext, DumpMallocTags, GetShortString,
GetInstanceIdForIndex,
ResetSessionVisibility, InvisRootPrims, GetAssetCreationTime,
PropertyViewIndex, PropertyViewIcons, PropertyViewDataRoles,
RenderModes, ColorCorrectionModes, ShadedRenderModes,
PickModes, SelectionHighlightModes, CameraMaskModes,
PropTreeWidgetTypeIsRel, PrimNotFoundException,
GetRootLayerStackInfo, HasSessionVis, GetEnclosingModelPrim,
GetPrimsLoadability, ClearColors,
HighlightColors)
import settings2
from settings2 import StateSource
from usdviewApi import UsdviewApi
from rootDataModel import RootDataModel
from viewSettingsDataModel import ViewSettingsDataModel
import plugin
from pythonInterpreter import Myconsole
SETTINGS_VERSION = "1"
class HUDEntries(ConstantGroup):
# Upper HUD entries (declared in variables for abstraction)
PRIM = "Prims"
CV = "CVs"
VERT = "Verts"
FACE = "Faces"
# Lower HUD entries
PLAYBACK = "Playback"
RENDER = "Render"
GETBOUNDS = "BBox"
# Name for prims that have no type
NOTYPE = "Typeless"
class PropertyIndex(ConstantGroup):
VALUE, METADATA, LAYERSTACK, COMPOSITION = range(4)
class UIDefaults(ConstantGroup):
STAGE_VIEW_WIDTH = 604
PRIM_VIEW_WIDTH = 521
ATTRIBUTE_VIEW_WIDTH = 682
ATTRIBUTE_INSPECTOR_WIDTH = 443
TOP_HEIGHT = 538
BOTTOM_HEIGHT = 306
# Name of the Qt binding being used
QT_BINDING = QtCore.__name__.split('.')[0]
class UsdviewDataModel(RootDataModel):
def __init__(self, printTiming, settings2):
super(UsdviewDataModel, self).__init__(printTiming)
self._selectionDataModel = SelectionDataModel(self)
self._viewSettingsDataModel = ViewSettingsDataModel(self, settings2)
@property
def selection(self):
return self._selectionDataModel
@property
def viewSettings(self):
return self._viewSettingsDataModel
class UIStateProxySource(StateSource):
"""XXX Temporary class which allows AppController to serve as two state sources.
All fields here will be moved back into AppController in the future.
"""
def __init__(self, mainWindow, parent, name):
StateSource.__init__(self, parent, name)
self._mainWindow = mainWindow
primViewColumnVisibility = self.stateProperty("primViewColumnVisibility",
default=[True, True, True, False], validator=lambda value:
len(value) == 4)
propertyViewColumnVisibility = self.stateProperty("propertyViewColumnVisibility",
default=[True, True, True], validator=lambda value: len(value) == 3)
attributeInspectorCurrentTab = self.stateProperty("attributeInspectorCurrentTab", default=PropertyIndex.VALUE)
# UI is different when --norender is used so just save the default splitter sizes.
# TODO Save the loaded state so it doesn't disappear after using --norender.
if not self._mainWindow._noRender:
stageViewWidth = self.stateProperty("stageViewWidth", default=UIDefaults.STAGE_VIEW_WIDTH)
primViewWidth = self.stateProperty("primViewWidth", default=UIDefaults.PRIM_VIEW_WIDTH)
attributeViewWidth = self.stateProperty("attributeViewWidth", default=UIDefaults.ATTRIBUTE_VIEW_WIDTH)
attributeInspectorWidth = self.stateProperty("attributeInspectorWidth", default=UIDefaults.ATTRIBUTE_INSPECTOR_WIDTH)
topHeight = self.stateProperty("topHeight", default=UIDefaults.TOP_HEIGHT)
bottomHeight = self.stateProperty("bottomHeight", default=UIDefaults.BOTTOM_HEIGHT)
viewerMode = self.stateProperty("viewerMode", default=False)
if viewerMode:
self._mainWindow._ui.primStageSplitter.setSizes([0, 1])
self._mainWindow._ui.topBottomSplitter.setSizes([1, 0])
else:
self._mainWindow._ui.primStageSplitter.setSizes(
[primViewWidth, stageViewWidth])
self._mainWindow._ui.topBottomSplitter.setSizes(
[topHeight, bottomHeight])
self._mainWindow._ui.attribBrowserInspectorSplitter.setSizes(
[attributeViewWidth, attributeInspectorWidth])
self._mainWindow._viewerModeEscapeSizes = topHeight, bottomHeight, primViewWidth, stageViewWidth
else:
self._mainWindow._ui.primStageSplitter.setSizes(
[UIDefaults.PRIM_VIEW_WIDTH, UIDefaults.STAGE_VIEW_WIDTH])
self._mainWindow._ui.attribBrowserInspectorSplitter.setSizes(
[UIDefaults.ATTRIBUTE_VIEW_WIDTH, UIDefaults.ATTRIBUTE_INSPECTOR_WIDTH])
self._mainWindow._ui.topBottomSplitter.setSizes(
[UIDefaults.TOP_HEIGHT, UIDefaults.BOTTOM_HEIGHT])
for i, visible in enumerate(primViewColumnVisibility):
self._mainWindow._ui.primView.setColumnHidden(i, not visible)
for i, visible in enumerate(propertyViewColumnVisibility):
self._mainWindow._ui.propertyView.setColumnHidden(i, not visible)
propertyIndex = attributeInspectorCurrentTab
if propertyIndex not in PropertyIndex:
propertyIndex = PropertyIndex.VALUE
self._mainWindow._ui.propertyInspector.setCurrentIndex(propertyIndex)
def onSaveState(self, state):
# UI is different when --norender is used so don't load the splitter sizes.
if not self._mainWindow._noRender:
primViewWidth, stageViewWidth = self._mainWindow._ui.primStageSplitter.sizes()
attributeViewWidth, attributeInspectorWidth = self._mainWindow._ui.attribBrowserInspectorSplitter.sizes()
topHeight, bottomHeight = self._mainWindow._ui.topBottomSplitter.sizes()
viewerMode = (bottomHeight == 0 and primViewWidth == 0)
# If viewer mode is active, save the escape sizes instead of the
# actual sizes. If there are no escape sizes, just save the defaults.
if viewerMode:
if self._mainWindow._viewerModeEscapeSizes is not None:
topHeight, bottomHeight, primViewWidth, stageViewWidth = self._mainWindow._viewerModeEscapeSizes
else:
primViewWidth = UIDefaults.STAGE_VIEW_WIDTH
stageViewWidth = UIDefaults.PRIM_VIEW_WIDTH
attributeViewWidth = UIDefaults.ATTRIBUTE_VIEW_WIDTH
attributeInspectorWidth = UIDefaults.ATTRIBUTE_INSPECTOR_WIDTH
topHeight = UIDefaults.TOP_HEIGHT
bottomHeight = UIDefaults.BOTTOM_HEIGHT
state["primViewWidth"] = primViewWidth
state["stageViewWidth"] = stageViewWidth
state["attributeViewWidth"] = attributeViewWidth
state["attributeInspectorWidth"] = attributeInspectorWidth
state["topHeight"] = topHeight
state["bottomHeight"] = bottomHeight
state["viewerMode"] = viewerMode
state["primViewColumnVisibility"] = [
not self._mainWindow._ui.primView.isColumnHidden(c)
for c in range(self._mainWindow._ui.primView.columnCount())]
state["propertyViewColumnVisibility"] = [
not self._mainWindow._ui.propertyView.isColumnHidden(c)
for c in range(self._mainWindow._ui.propertyView.columnCount())]
state["attributeInspectorCurrentTab"] = self._mainWindow._ui.propertyInspector.currentIndex()
class Blocker:
"""Object which can be used to temporarily block the execution of a body of
code. This object is a context manager, and enters a 'blocked' state when
used in a 'with' statement. The 'blocked()' method can be used to find if
the Blocker is in this 'blocked' state.
For example, this is used to prevent UI code from handling signals from the
selection data model while the UI code itself modifies selection.
"""
def __init__(self):
# A count is used rather than a 'blocked' flag to allow for nested
# blocking.
self._count = 0
def __enter__(self):
"""Enter the 'blocked' state until the context is exited."""
self._count += 1
def __exit__(self, *args):
"""Exit the 'blocked' state."""
self._count -= 1
def blocked(self):
"""Returns True if in the 'blocked' state, and False otherwise."""
return self._count > 0
class AppController(QtCore.QObject):
HYDRA_DISABLED_OPTION_STRING = "HydraDisabled"
###########
# Signals #
###########
@classmethod
def clearSettings(cls):
settingsPath = cls._outputBaseDirectory()
if settingsPath is None:
return None
else:
settingsPath = os.path.join(settingsPath, 'state')
if not os.path.exists(settingsPath):
print 'INFO: ClearSettings requested, but there ' \
'were no settings currently stored.'
return None
if not os.access(settingsPath, os.W_OK):
print 'ERROR: Could not remove settings file.'
return None
else:
os.remove(settingsPath)
print 'INFO: Settings restored to default.'
def _configurePlugins(self):
with Timer() as t:
self._plugRegistry = plugin.loadPlugins(
self._usdviewApi, self._mainWindow)
if self._printTiming:
t.PrintTime("configure and load plugins.")
def _openSettings2(self, defaultSettings):
settingsPathDir = self._outputBaseDirectory()
if (settingsPathDir is None) or defaultSettings:
# Create an ephemeral settings object by withholding the file path.
self._settings2 = settings2.Settings(SETTINGS_VERSION)
else:
settings2Path = os.path.join(settingsPathDir, "state.json")
self._settings2 = settings2.Settings(SETTINGS_VERSION, settings2Path)
uiProxy = UIStateProxySource(self, self._settings2, "ui")
def __del__(self):
# This is needed to free Qt items before exit; Qt hits failed GTK
# assertions without it.
self._primToItemMap.clear()
def __init__(self, parserData, resolverContextFn):
QtCore.QObject.__init__(self)
with Timer() as uiOpenTimer:
self._primToItemMap = {}
self._itemsToPush = []
self._currentSpec = None
self._currentLayer = None
self._console = None
self._debugFlagsWindow = None
self._interpreter = None
self._parserData = parserData
self._noRender = parserData.noRender
self._noPlugins = parserData.noPlugins
self._unloaded = parserData.unloaded
self._resolverContextFn = resolverContextFn
self._debug = os.getenv('USDVIEW_DEBUG', False)
self._printTiming = parserData.timing or self._debug
self._lastViewContext = {}
if QT_BINDING == 'PySide':
self._statusFileName = 'state'
self._deprecatedStatusFileNames = ('.usdviewrc')
else:
self._statusFileName = 'state.%s'%QT_BINDING
self._deprecatedStatusFileNames = ('state', '.usdviewrc')
self._mallocTags = parserData.mallocTagStats
self._allowViewUpdates = True
# When viewer mode is active, the panel sizes are cached so they can
# be restored later.
self._viewerModeEscapeSizes = None
self._rendererNameOpt = parserData.renderer
if self._rendererNameOpt:
if self._rendererNameOpt == \
AppController.HYDRA_DISABLED_OPTION_STRING:
os.environ['HD_ENABLED'] = '0'
else:
os.environ['HD_DEFAULT_RENDERER'] = self._rendererNameOpt
self._mainWindow = QtWidgets.QMainWindow(None)
# Showing the window immediately prevents UI flashing.
self._mainWindow.show()
self._ui = Ui_MainWindow()
self._ui.setupUi(self._mainWindow)
self._mainWindow.setWindowTitle(parserData.usdFile)
self._statusBar = QtWidgets.QStatusBar(self._mainWindow)
self._mainWindow.setStatusBar(self._statusBar)
# Install our custom event filter. The member assignment of the
# filter is just for lifetime management
from appEventFilter import AppEventFilter
self._filterObj = AppEventFilter(self)
QtWidgets.QApplication.instance().installEventFilter(self._filterObj)
# Setup Usdview API and optionally load plugins. We do this before
# loading the stage in case a plugin wants to modify global settings
# that affect stage loading.
self._plugRegistry = None
self._usdviewApi = UsdviewApi(self)
if not self._noPlugins:
self._configurePlugins()
# read the stage here
stage = self._openStage(
self._parserData.usdFile, self._parserData.sessionLayer,
self._parserData.populationMask)
if not stage:
sys.exit(0)
if not stage.GetPseudoRoot():
print parserData.usdFile, 'has no prims; exiting.'
sys.exit(0)
self._openSettings2(parserData.defaultSettings)
self._dataModel = UsdviewDataModel(
self._printTiming, self._settings2)
self._dataModel.signalPrimsChanged.connect(
self._onPrimsChanged)
self._dataModel.stage = stage
self._primViewSelectionBlocker = Blocker()
self._propertyViewSelectionBlocker = Blocker()
self._dataModel.selection.signalPrimSelectionChanged.connect(
self._primSelectionChanged)
self._dataModel.selection.signalPropSelectionChanged.connect(
self._propSelectionChanged)
self._dataModel.selection.signalComputedPropSelectionChanged.connect(
self._propSelectionChanged)
self._initialSelectPrim = self._dataModel.stage.GetPrimAtPath(
parserData.primPath)
if not self._initialSelectPrim:
print 'Could not find prim at path <%s> to select. '\
'Ignoring...' % parserData.primPath
self._initialSelectPrim = None
try:
self._dataModel.viewSettings.complexity = parserData.complexity
except ValueError:
fallback = RefinementComplexities.LOW
sys.stderr.write(("Error: Invalid complexity '{}'. "
"Using fallback '{}' instead.\n").format(
parserData.complexity, fallback.id))
self._dataModel.viewSettings.complexity = fallback
self._hasPrimResync = False
self._timeSamples = None
self._stageView = None
self._startingPrimCamera = None
if (parserData.camera.IsAbsolutePath() or
parserData.camera.pathElementCount > 1):
self._startingPrimCameraName = None
self._startingPrimCameraPath = parserData.camera
else:
self._startingPrimCameraName = parserData.camera.pathString
self._startingPrimCameraPath = None
settingsPathDir = self._outputBaseDirectory()
if settingsPathDir is None or parserData.defaultSettings:
# Create an ephemeral settings object with a non existent filepath
self._settings = settings.Settings('', seq=None, ephemeral=True)
else:
settingsPath = os.path.join(settingsPathDir, self._statusFileName)
for deprecatedName in self._deprecatedStatusFileNames:
deprecatedSettingsPath = \
os.path.join(settingsPathDir, deprecatedName)
if (os.path.isfile(deprecatedSettingsPath) and
not os.path.isfile(settingsPath)):
warning = ('\nWARNING: The settings file at: '
+ str(deprecatedSettingsPath) + ' is deprecated.\n'
+ 'These settings are not being used, the new '
+ 'settings file will be located at: '
+ str(settingsPath) + '.\n')
print warning
break
self._settings = settings.Settings(settingsPath)
try:
self._settings.load()
except IOError:
# try to force out a new settings file
try:
self._settings.save()
except:
settings.EmitWarning(settingsPath)
except EOFError:
# try to force out a new settings file
try:
self._settings.save()
except:
settings.EmitWarning(settingsPath)
except:
settings.EmitWarning(settingsPath)
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.BusyCursor)
self._timer = QtCore.QTimer(self)
# Timeout interval in ms. We set it to 0 so it runs as fast as
# possible. In advanceFrameForPlayback we use the sleep() call
# to slow down rendering to self.framesPerSecond fps.
self._timer.setInterval(0)
self._lastFrameTime = time()
# Initialize the upper HUD info
self._upperHUDInfo = dict()
# declare dictionary keys for the fps info too
self._fpsHUDKeys = (HUDEntries.RENDER, HUDEntries.PLAYBACK)
# Initialize fps HUD with empty strings
self._fpsHUDInfo = dict(zip(self._fpsHUDKeys,
["N/A", "N/A"]))
self._startTime = self._endTime = time()
# This timer is used to coalesce the primView resizes
# in certain cases. e.g. When you
# deactivate/activate a prim.
self._primViewResizeTimer = QtCore.QTimer(self)
self._primViewResizeTimer.setInterval(0)
self._primViewResizeTimer.setSingleShot(True)
self._primViewResizeTimer.timeout.connect(self._resizePrimView)
# This timer coalesces GUI resets when the USD stage is modified or
# reloaded.
self._guiResetTimer = QtCore.QTimer(self)
self._guiResetTimer.setInterval(0)
self._guiResetTimer.setSingleShot(True)
self._guiResetTimer.timeout.connect(self._resetGUI)
# Idle timer to push off-screen data to the UI.
self._primViewUpdateTimer = QtCore.QTimer(self)
self._primViewUpdateTimer.setInterval(0)
self._primViewUpdateTimer.timeout.connect(self._updatePrimView)
# This creates the _stageView and restores state from settings file
self._resetSettings()
# This is for validating frame values input on the "Frame" line edit
validator = QtGui.QDoubleValidator(self)
self._ui.frameField.setValidator(validator)
self._ui.rangeEnd.setValidator(validator)
self._ui.rangeBegin.setValidator(validator)
stepValidator = QtGui.QDoubleValidator(self)
stepValidator.setRange(0.01, 1e7, 2)
self._ui.stepSize.setValidator(stepValidator)
# This causes the last column of the attribute view (the value)
# to be stretched to fill the available space
self._ui.propertyView.header().setStretchLastSection(True)
self._ui.propertyView.setSelectionBehavior(
QtWidgets.QAbstractItemView.SelectRows)
self._ui.primView.setSelectionBehavior(
QtWidgets.QAbstractItemView.SelectRows)
# This allows ctrl and shift clicking for multi-selecting
self._ui.propertyView.setSelectionMode(
QtWidgets.QAbstractItemView.ExtendedSelection)
self._ui.propertyView.setHorizontalScrollMode(
QtWidgets.QAbstractItemView.ScrollPerPixel)
self._ui.frameSlider.setTracking(
self._dataModel.viewSettings.redrawOnScrub)
self._ui.colorGroup = QtWidgets.QActionGroup(self)
self._ui.colorGroup.setExclusive(True)
self._clearColorActions = (
self._ui.actionBlack,
self._ui.actionGrey_Dark,
self._ui.actionGrey_Light,
self._ui.actionWhite)
for action in self._clearColorActions:
self._ui.colorGroup.addAction(action)
self._ui.threePointLights = QtWidgets.QActionGroup(self)
self._ui.threePointLights.setExclusive(False)
self._threePointLightsActions = (
self._ui.actionKey,
self._ui.actionFill,
self._ui.actionBack)
for action in self._threePointLightsActions:
self._ui.threePointLights.addAction(action)
self._ui.renderModeActionGroup = QtWidgets.QActionGroup(self)
self._ui.renderModeActionGroup.setExclusive(True)
self._renderModeActions = (
self._ui.actionWireframe,
self._ui.actionWireframeOnSurface,
self._ui.actionSmooth_Shaded,
self._ui.actionFlat_Shaded,
self._ui.actionPoints,
self._ui.actionGeom_Only,
self._ui.actionGeom_Smooth,
self._ui.actionGeom_Flat,
self._ui.actionHidden_Surface_Wireframe)
for action in self._renderModeActions:
self._ui.renderModeActionGroup.addAction(action)
self._ui.colorCorrectionActionGroup = QtWidgets.QActionGroup(self)
self._ui.colorCorrectionActionGroup.setExclusive(True)
self._colorCorrectionActions = (
self._ui.actionNoColorCorrection,
self._ui.actionSRGBColorCorrection,
self._ui.actionOpenColorIO)
for action in self._colorCorrectionActions:
self._ui.colorCorrectionActionGroup.addAction(action)
# XXX This should be a validator in ViewSettingsDataModel.
if self._dataModel.viewSettings.renderMode not in RenderModes:
fallback = str(
self._ui.renderModeActionGroup.actions()[0].text())
print "Warning: Unknown render mode '%s', falling back to '%s'" % (
self._dataModel.viewSettings.renderMode, fallback)
self._dataModel.viewSettings.renderMode = fallback
self._ui.pickModeActionGroup = QtWidgets.QActionGroup(self)
self._ui.pickModeActionGroup.setExclusive(True)
self._pickModeActions = (
self._ui.actionPick_Prims,
self._ui.actionPick_Models,
self._ui.actionPick_Instances)
for action in self._pickModeActions:
self._ui.pickModeActionGroup.addAction(action)
# XXX This should be a validator in ViewSettingsDataModel.
if self._dataModel.viewSettings.pickMode not in PickModes:
fallback = str(self._ui.pickModeActionGroup.actions()[0].text())
print "Warning: Unknown pick mode '%s', falling back to '%s'" % (
self._dataModel.viewSettings.pickMode, fallback)
self._dataModel.viewSettings.pickMode = fallback
self._ui.selHighlightModeActionGroup = QtWidgets.QActionGroup(self)
self._ui.selHighlightModeActionGroup.setExclusive(True)
self._selHighlightActions = (
self._ui.actionNever,
self._ui.actionOnly_when_paused,
self._ui.actionAlways)
for action in self._selHighlightActions:
self._ui.selHighlightModeActionGroup.addAction(action)
self._ui.highlightColorActionGroup = QtWidgets.QActionGroup(self)
self._ui.highlightColorActionGroup.setExclusive(True)
self._selHighlightColorActions = (
self._ui.actionSelYellow,
self._ui.actionSelCyan,
self._ui.actionSelWhite)
for action in self._selHighlightColorActions:
self._ui.highlightColorActionGroup.addAction(action)
self._ui.interpolationActionGroup = QtWidgets.QActionGroup(self)
self._ui.interpolationActionGroup.setExclusive(True)
for interpolationType in Usd.InterpolationType.allValues:
action = self._ui.menuInterpolation.addAction(interpolationType.displayName)
action.setCheckable(True)
action.setChecked(
self._dataModel.stage.GetInterpolationType() == interpolationType)
self._ui.interpolationActionGroup.addAction(action)
self._ui.primViewDepthGroup = QtWidgets.QActionGroup(self)
for i in range(1, 9):
action = getattr(self._ui, "actionLevel_" + str(i))
self._ui.primViewDepthGroup.addAction(action)
# setup animation objects for the primView and propertyView
self._propertyLegendAnim = QtCore.QPropertyAnimation(
self._ui.propertyLegendContainer, "maximumHeight")
self._primLegendAnim = QtCore.QPropertyAnimation(
self._ui.primLegendContainer, "maximumHeight")
# set the context menu policy for the prim browser and attribute
# inspector headers. This is so we can have a context menu on the
# headers that allows you to select which columns are visible.
self._ui.propertyView.header()\
.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self._ui.primView.header()\
.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Set custom context menu for attribute browser
self._ui.propertyView\
.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Set custom context menu for layer stack browser
self._ui.layerStackView\
.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Set custom context menu for composition tree browser
self._ui.compositionTreeWidget\
.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Arc path is the most likely to need stretch.
twh = self._ui.compositionTreeWidget.header()
twh.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
twh.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
twh.setSectionResizeMode(2, QtWidgets.QHeaderView.Stretch)
twh.setSectionResizeMode(3, QtWidgets.QHeaderView.ResizeToContents)
# Set the prim view header to have a fixed size type and vis columns
nvh = self._ui.primView.header()
nvh.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
nvh.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
nvh.setSectionResizeMode(2, QtWidgets.QHeaderView.ResizeToContents)
nvh.resizeSection(3, 116)
nvh.setSectionResizeMode(3, QtWidgets.QHeaderView.Fixed)
pvh = self._ui.propertyView.header()
pvh.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
pvh.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
pvh.setSectionResizeMode(2, QtWidgets.QHeaderView.Stretch)
# XXX:
# To avoid QTBUG-12850 (https://bugreports.qt.io/browse/QTBUG-12850),
# we force the horizontal scrollbar to always be visible for all
# QTableWidget widgets in use.
self._ui.primView.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
self._ui.propertyView.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
self._ui.metadataView.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
self._ui.layerStackView.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
self._ui.attributeValueEditor.setAppController(self)
self._ui.primView.InitDrawModeDelegate(self)
self._ui.currentPathWidget.editingFinished.connect(
self._currentPathChanged)
# XXX:
# To avoid PYSIDE-79 (https://bugreports.qt.io/browse/PYSIDE-79)
# with Qt4/PySide, we must hold the prim view's selectionModel
# in a local variable before connecting its signals.
primViewSelModel = self._ui.primView.selectionModel()
primViewSelModel.selectionChanged.connect(self._selectionChanged)
self._ui.primView.itemClicked.connect(self._itemClicked)
self._ui.primView.itemPressed.connect(self._itemPressed)
self._ui.primView.header().customContextMenuRequested.connect(
self._primViewHeaderContextMenu)
self._timer.timeout.connect(self._advanceFrameForPlayback)
self._ui.primView.customContextMenuRequested.connect(
self._primViewContextMenu)
self._ui.primView.expanded.connect(self._primViewExpanded)
self._ui.frameSlider.valueChanged.connect(self._setFrameIndex)
self._ui.frameSlider.sliderMoved.connect(self._sliderMoved)
self._ui.frameSlider.sliderReleased.connect(self._updateGUIForFrameChange)
self._ui.frameField.editingFinished.connect(self._frameStringChanged)
self._ui.rangeBegin.editingFinished.connect(self._rangeBeginChanged)
self._ui.stepSize.editingFinished.connect(self._stepSizeChanged)
self._ui.rangeEnd.editingFinished.connect(self._rangeEndChanged)
self._ui.actionFrame_Forward.triggered.connect(self._advanceFrame)
self._ui.actionFrame_Backwards.triggered.connect(self._retreatFrame)
self._ui.actionReset_View.triggered.connect(lambda: self._resetView())
self._ui.topBottomSplitter.splitterMoved.connect(self._cacheViewerModeEscapeSizes)
self._ui.primStageSplitter.splitterMoved.connect(self._cacheViewerModeEscapeSizes)
self._ui.actionToggle_Viewer_Mode.triggered.connect(
self._toggleViewerMode)
self._ui.showBBoxes.triggered.connect(self._toggleShowBBoxes)
self._ui.showAABBox.triggered.connect(self._toggleShowAABBox)
self._ui.showOBBox.triggered.connect(self._toggleShowOBBox)
self._ui.showBBoxPlayback.triggered.connect(
self._toggleShowBBoxPlayback)
self._ui.useExtentsHint.triggered.connect(self._setUseExtentsHint)
self._ui.showInterpreter.triggered.connect(self._showInterpreter)
self._ui.showDebugFlags.triggered.connect(self._showDebugFlags)
self._ui.redrawOnScrub.toggled.connect(self._redrawOptionToggled)
if self._stageView:
self._ui.actionAuto_Compute_Clipping_Planes.triggered.connect(
self._toggleAutoComputeClippingPlanes)
self._ui.actionAdjust_Clipping.triggered[bool].connect(
self._adjustClippingPlanes)
self._ui.actionAdjust_Default_Material.triggered[bool].connect(
self._adjustDefaultMaterial)
self._ui.actionOpen.triggered.connect(self._openFile)
self._ui.actionSave_Overrides_As.triggered.connect(
self._saveOverridesAs)
self._ui.actionSave_Flattened_As.triggered.connect(
self._saveFlattenedAs)
# Setup quit actions to ensure _cleanAndClose is only invoked once.
self._ui.actionQuit.triggered.connect(QtWidgets.QApplication.instance().quit)
QtWidgets.QApplication.instance().aboutToQuit.connect(self._cleanAndClose)
self._ui.actionReopen_Stage.triggered.connect(self._reopenStage)
self._ui.actionReload_All_Layers.triggered.connect(self._reloadStage)
self._ui.actionFrame_Selection.triggered.connect(self._frameSelection)
self._ui.actionToggle_Framed_View.triggered.connect(self._toggleFramedView)
self._ui.actionAdjust_FOV.triggered.connect(self._adjustFOV)
self._ui.complexityGroup = QtWidgets.QActionGroup(self._mainWindow)
self._ui.complexityGroup.setExclusive(True)
self._complexityActions = (
self._ui.actionLow,
self._ui.actionMedium,
self._ui.actionHigh,
self._ui.actionVery_High)
for action in self._complexityActions:
self._ui.complexityGroup.addAction(action)
self._ui.complexityGroup.triggered.connect(self._changeComplexity)
self._ui.actionDisplay_Guide.triggered.connect(
self._toggleDisplayGuide)
self._ui.actionDisplay_Proxy.triggered.connect(
self._toggleDisplayProxy)
self._ui.actionDisplay_Render.triggered.connect(
self._toggleDisplayRender)
self._ui.actionDisplay_Camera_Oracles.triggered.connect(
self._toggleDisplayCameraOracles)
self._ui.actionDisplay_PrimId.triggered.connect(
self._toggleDisplayPrimId)
self._ui.actionEnable_Scene_Materials.triggered.connect(
self._toggleEnableSceneMaterials)
self._ui.actionCull_Backfaces.triggered.connect(
self._toggleCullBackfaces)
self._ui.propertyInspector.currentChanged.connect(
self._updatePropertyInspector)
self._ui.propertyView.itemSelectionChanged.connect(
self._propertyViewSelectionChanged)
self._ui.propertyView.currentItemChanged.connect(
self._propertyViewCurrentItemChanged)
self._ui.propertyView.header().customContextMenuRequested.\
connect(self._propertyViewHeaderContextMenu)
self._ui.propertyView.customContextMenuRequested.connect(
self._propertyViewContextMenu)
self._ui.layerStackView.customContextMenuRequested.connect(
self._layerStackContextMenu)
self._ui.compositionTreeWidget.customContextMenuRequested.connect(
self._compositionTreeContextMenu)
self._ui.compositionTreeWidget.currentItemChanged.connect(
self._onCompositionSelectionChanged)
self._ui.renderModeActionGroup.triggered.connect(self._changeRenderMode)
self._ui.colorCorrectionActionGroup.triggered.connect(
self._changeColorCorrection)
self._ui.pickModeActionGroup.triggered.connect(self._changePickMode)
self._ui.selHighlightModeActionGroup.triggered.connect(
self._changeSelHighlightMode)
self._ui.highlightColorActionGroup.triggered.connect(
self._changeHighlightColor)
self._ui.interpolationActionGroup.triggered.connect(
self._changeInterpolationType)
self._ui.actionAmbient_Only.triggered[bool].connect(
self._ambientOnlyClicked)
self._ui.actionKey.triggered[bool].connect(self._onKeyLightClicked)
self._ui.actionFill.triggered[bool].connect(self._onFillLightClicked)
self._ui.actionBack.triggered[bool].connect(self._onBackLightClicked)
self._ui.colorGroup.triggered.connect(self._changeBgColor)
# Configuring the PrimView's Show menu. In addition to the
# "designed" menu items, we inject a PrimView HeaderContextMenu
self._ui.primViewDepthGroup.triggered.connect(self._changePrimViewDepth)
self._ui.actionExpand_All.triggered.connect(
lambda: self._expandToDepth(1000000))
self._ui.actionCollapse_All.triggered.connect(
self._ui.primView.collapseAll)
self._ui.actionShow_Inactive_Prims.triggered.connect(
self._toggleShowInactivePrims)
self._ui.actionShow_All_Master_Prims.triggered.connect(
self._toggleShowMasterPrims)
self._ui.actionShow_Undefined_Prims.triggered.connect(
self._toggleShowUndefinedPrims)
self._ui.actionShow_Abstract_Prims.triggered.connect(
self._toggleShowAbstractPrims)
# Since setting column visibility is probably not a common
# operation, it's actually good to have Columns at the end.
self._ui.menuShow.addSeparator()
self._ui.menuShow.addMenu(HeaderContextMenu(self._ui.primView))
self._ui.actionRollover_Prim_Info.triggered.connect(
self._toggleRolloverPrimInfo)
self._ui.primViewLineEdit.returnPressed.connect(
self._ui.primViewFindNext.click)
self._ui.primViewFindNext.clicked.connect(self._primViewFindNext)
self._ui.attrViewLineEdit.returnPressed.connect(
self._ui.attrViewFindNext.click)
self._ui.attrViewFindNext.clicked.connect(self._attrViewFindNext)
self._ui.primLegendQButton.clicked.connect(
self._primLegendToggleCollapse)
self._ui.propertyLegendQButton.clicked.connect(
self._propertyLegendToggleCollapse)
self._ui.playButton.clicked.connect(self._playClicked)
self._ui.actionCameraMask_Full.triggered.connect(
self._updateCameraMaskMenu)
self._ui.actionCameraMask_Partial.triggered.connect(
self._updateCameraMaskMenu)
self._ui.actionCameraMask_None.triggered.connect(
self._updateCameraMaskMenu)
self._ui.actionCameraMask_Outline.triggered.connect(
self._updateCameraMaskOutlineMenu)
self._ui.actionCameraMask_Color.triggered.connect(
self._pickCameraMaskColor)
self._ui.actionCameraReticles_Inside.triggered.connect(
self._updateCameraReticlesInsideMenu)
self._ui.actionCameraReticles_Outside.triggered.connect(
self._updateCameraReticlesOutsideMenu)
self._ui.actionCameraReticles_Color.triggered.connect(
self._pickCameraReticlesColor)
self._ui.actionHUD.triggered.connect(self._showHUDChanged)
self._ui.actionHUD_Info.triggered.connect(self._showHUD_InfoChanged)
self._ui.actionHUD_Complexity.triggered.connect(
self._showHUD_ComplexityChanged)
self._ui.actionHUD_Performance.triggered.connect(
self._showHUD_PerformanceChanged)
self._ui.actionHUD_GPUstats.triggered.connect(
self._showHUD_GPUstatsChanged)
self._mainWindow.addAction(self._ui.actionIncrementComplexity1)
self._mainWindow.addAction(self._ui.actionIncrementComplexity2)
self._mainWindow.addAction(self._ui.actionDecrementComplexity)
self._ui.actionIncrementComplexity1.triggered.connect(
self._incrementComplexity)
self._ui.actionIncrementComplexity2.triggered.connect(
self._incrementComplexity)
self._ui.actionDecrementComplexity.triggered.connect(
self._decrementComplexity)
self._ui.attributeValueEditor.editComplete.connect(self.editComplete)
# Edit Prim menu
self._ui.menuEdit.aboutToShow.connect(self._updateEditMenu)
self._ui.menuNavigation.aboutToShow.connect(self._updateNavigationMenu)
self._ui.actionFind_Prims.triggered.connect(
self._ui.primViewLineEdit.setFocus)
self._ui.actionSelect_Stage_Root.triggered.connect(
self.selectPseudoroot)
self._ui.actionSelect_Model_Root.triggered.connect(
self.selectEnclosingModel)
self._ui.actionSelect_Bound_Preview_Material.triggered.connect(
self.selectBoundPreviewMaterial)
self._ui.actionSelect_Bound_Full_Material.triggered.connect(
self.selectBoundFullMaterial)
self._ui.actionSelect_Preview_Binding_Relationship.triggered.connect(
self.selectPreviewBindingRel)
self._ui.actionSelect_Full_Binding_Relationship.triggered.connect(
self.selectFullBindingRel)
self._ui.actionMake_Visible.triggered.connect(self.visSelectedPrims)
# Add extra, Presto-inspired shortcut for Make Visible
self._ui.actionMake_Visible.setShortcuts(["Shift+H", "Ctrl+Shift+H"])
self._ui.actionMake_Invisible.triggered.connect(self.invisSelectedPrims)
self._ui.actionVis_Only.triggered.connect(self.visOnlySelectedPrims)
self._ui.actionRemove_Session_Visibility.triggered.connect(
self.removeVisSelectedPrims)
self._ui.actionReset_All_Session_Visibility.triggered.connect(
self.resetSessionVisibility)
self._ui.actionLoad.triggered.connect(self.loadSelectedPrims)
self._ui.actionUnload.triggered.connect(self.unloadSelectedPrims)
self._ui.actionActivate.triggered.connect(self.activateSelectedPrims)
self._ui.actionDeactivate.triggered.connect(self.deactivateSelectedPrims)
# We refresh as if all view settings changed. In the future, we
# should do more granular refreshes. This first requires more
# granular signals from ViewSettingsDataModel.
self._dataModel.viewSettings.signalSettingChanged.connect(
self._viewSettingChanged)
# Update view menu actions and submenus with initial state.
self._refreshViewMenubar()
# We manually call processEvents() here to make sure that the prim
# browser and other widgetry get drawn before we draw the first image in
# the viewer, which might take a long time.
if self._stageView:
self._stageView.setUpdatesEnabled(False)
self._mainWindow.update()
QtWidgets.QApplication.processEvents()
if self._printTiming:
uiOpenTimer.PrintTime('bring up the UI')
self._drawFirstImage()
QtWidgets.QApplication.restoreOverrideCursor()
def _drawFirstImage(self):
if self._stageView:
self._stageView.setUpdatesEnabled(True)
with BusyContext():
try:
self._resetView(self._initialSelectPrim)
except Exception:
pass
QtWidgets.QApplication.processEvents()
# configure render plugins after stageView initialized its renderer.
self._configureRendererPlugins()
self._configureColorManagement()
if self._mallocTags == 'stageAndImaging':
DumpMallocTags(self._dataModel.stage,
"stage-loading and imaging")
def statusMessage(self, msg, timeout = 0):
self._statusBar.showMessage(msg, timeout * 1000)
def editComplete(self, msg):
title = self._mainWindow.windowTitle()
if title[-1] != '*':
self._mainWindow.setWindowTitle(title + ' *')
self.statusMessage(msg, 12)
with Timer() as t:
if self._stageView:
self._stageView.updateView(resetCam=False, forceComputeBBox=True)
if self._printTiming:
t.PrintTime("'%s'" % msg)
def _openStage(self, usdFilePath, sessionFilePath, populationMaskPaths):
def _GetFormattedError(reasons=[]):
err = ("Error: Unable to open stage '{0}'\n".format(usdFilePath))
if reasons:
err += "\n".join(reasons) + "\n"
return err
if not Ar.GetResolver().Resolve(usdFilePath):
sys.stderr.write(_GetFormattedError(["File not found"]))
sys.exit(1)
if self._mallocTags != 'none':
Tf.MallocTag.Initialize()
with Timer() as t:
loadSet = Usd.Stage.LoadNone if self._unloaded else Usd.Stage.LoadAll
popMask = (None if populationMaskPaths is None else
Usd.StagePopulationMask())
# Open as a layer first to make sure its a valid file format
try:
layer = Sdf.Layer.FindOrOpen(usdFilePath)
except Tf.ErrorException as e:
sys.stderr.write(_GetFormattedError(
[err.commentary.strip() for err in e.args]))
sys.exit(1)
if sessionFilePath:
try:
sessionLayer = Sdf.Layer.Find(sessionFilePath)
if sessionLayer:
sessionLayer.Reload()
else:
sessionLayer = Sdf.Layer.FindOrOpen(sessionFilePath)
except Tf.ErrorException as e:
sys.stderr.write(_GetFormattedError(
[err.commentary.strip() for err in e.args]))
sys.exit(1)
else:
sessionLayer = Sdf.Layer.CreateAnonymous()
if popMask:
for p in populationMaskPaths:
popMask.Add(p)
stage = Usd.Stage.OpenMasked(layer,
sessionLayer,
self._resolverContextFn(usdFilePath),
popMask, loadSet)
else:
stage = Usd.Stage.Open(layer,
sessionLayer,
self._resolverContextFn(usdFilePath),
loadSet)
if not stage:
sys.stderr.write(_GetFormattedError())
else:
if self._printTiming:
t.PrintTime('open stage "%s"' % usdFilePath)
stage.SetEditTarget(stage.GetSessionLayer())
if self._mallocTags == 'stage':
DumpMallocTags(stage, "stage-loading")
return stage
def _closeStage(self):
# Close the USD stage.
if self._stageView:
self._stageView.closeRenderer()
self._dataModel.stage = None
def _setPlayShortcut(self):
self._ui.playButton.setShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Space))
# Non-topology dependent UI changes
def _reloadFixedUI(self, resetStageDataOnly=False):
# If animation is playing, stop it.
if self._dataModel.playing:
self._ui.playButton.click()
# frame range supplied by user
ff = self._parserData.firstframe
lf = self._parserData.lastframe
# frame range supplied by stage
stageStartTimeCode = self._dataModel.stage.GetStartTimeCode()
stageEndTimeCode = self._dataModel.stage.GetEndTimeCode()
# final range results
self.realStartTimeCode = None
self.realEndTimeCode = None
self.framesPerSecond = self._dataModel.stage.GetFramesPerSecond()
if not resetStageDataOnly:
self.step = self._dataModel.stage.GetTimeCodesPerSecond() / self.framesPerSecond
self._ui.stepSize.setText(str(self.step))
# if one option is provided(lastframe or firstframe), we utilize it
if ff is not None and lf is not None:
self.realStartTimeCode = ff
self.realEndTimeCode = lf
elif ff is not None:
self.realStartTimeCode = ff
self.realEndTimeCode = stageEndTimeCode
elif lf is not None:
self.realStartTimeCode = stageStartTimeCode
self.realEndTimeCode = lf
elif self._dataModel.stage.HasAuthoredTimeCodeRange():
self.realStartTimeCode = stageStartTimeCode
self.realEndTimeCode = stageEndTimeCode
self._ui.stageBegin.setText(str(stageStartTimeCode))
self._ui.stageEnd.setText(str(stageEndTimeCode))
# Use a valid current frame supplied by user, or allow _UpdateTimeSamples
# to set the current frame.
cf = self._parserData.currentframe
if cf:
if (cf < self.realStartTimeCode or cf > self.realEndTimeCode):
sys.stderr.write('Warning: Invalid current frame specified (%s)\n' % (cf))
else:
self._dataModel.currentFrame = Usd.TimeCode(cf)
self._UpdateTimeSamples(resetStageDataOnly)
def _UpdateTimeSamples(self, resetStageDataOnly=False):
if self.realStartTimeCode is not None and self.realEndTimeCode is not None:
if self.realStartTimeCode > self.realEndTimeCode:
sys.stderr.write('Warning: Invalid frame range (%s, %s)\n'
% (self.realStartTimeCode, self.realEndTimeCode))
self._timeSamples = []
else:
self._timeSamples = Drange(self.realStartTimeCode,
self.realEndTimeCode,
self.step)
else:
self._timeSamples = []
self._geomCounts = dict()
self._hasTimeSamples = (len(self._timeSamples) > 0)
self._setPlaybackAvailability() # this sets self._playbackAvailable
if self._hasTimeSamples:
self._ui.rangeBegin.setText(str(self._timeSamples[0]))
self._ui.rangeEnd.setText(str(self._timeSamples[-1]))
if ( self._dataModel.currentFrame.IsDefault() or
self._dataModel.currentFrame < self._timeSamples[0] ):
self._dataModel.currentFrame = Usd.TimeCode(self._timeSamples[0])
if self._dataModel.currentFrame > self._timeSamples[-1]:
self._dataModel.currentFrame = Usd.TimeCode(self._timeSamples[-1])
else:
self._dataModel.currentFrame = Usd.TimeCode(0.0)
if not resetStageDataOnly:
self._ui.frameField.setText(
str(self._dataModel.currentFrame.GetValue()))
if self._playbackAvailable:
if not resetStageDataOnly:
self._ui.frameSlider.setRange(0, len(self._timeSamples) - 1)
self._ui.frameSlider.setValue(0)
self._setPlayShortcut()
self._ui.playButton.setCheckable(True)
# Ensure the play button state respects the current playback state
self._ui.playButton.setChecked(self._dataModel.playing)
def _clearCaches(self, preserveCamera=False):
"""Clears value and computation caches maintained by the controller.
Does NOT initiate any GUI updates"""
self._geomCounts = dict()
self._dataModel._clearCaches()
self._refreshCameraListAndMenu(preserveCurrCamera = preserveCamera)
@staticmethod
def GetRendererOptionChoices():
ids = UsdImagingGL.Engine.GetRendererPlugins()
choices = []
if ids:
choices = [UsdImagingGL.Engine.GetRendererDisplayName(x)
for x in ids]
choices.append(AppController.HYDRA_DISABLED_OPTION_STRING)
else:
choices = [AppController.HYDRA_DISABLED_OPTION_STRING]
return choices
# Render plugin support
def _rendererPluginChanged(self, plugin):
if self._stageView:
if not self._stageView.SetRendererPlugin(plugin):
# If SetRendererPlugin failed, we need to reset the check mark
# to whatever the currently loaded renderer is.
for action in self._ui.rendererPluginActionGroup.actions():
if action.text() == self._stageView.rendererDisplayName:
action.setChecked(True)
break
# Then display an error message to let the user know something
# went wrong, and disable the menu item so it can't be selected
# again.
for action in self._ui.rendererPluginActionGroup.actions():
if action.pluginType == plugin:
self.statusMessage(
'Renderer not supported: %s' % action.text())
action.setText(action.text() + " (unsupported)")
action.setDisabled(True)
break
else:
# Refresh the AOV menu and settings menu
self._configureRendererAovs()
self._configureRendererSettings()
def _configureRendererPlugins(self):
if self._stageView:
self._ui.rendererPluginActionGroup = QtWidgets.QActionGroup(self)
self._ui.rendererPluginActionGroup.setExclusive(True)
pluginTypes = self._stageView.GetRendererPlugins()
for pluginType in pluginTypes:
name = self._stageView.GetRendererDisplayName(pluginType)
action = self._ui.menuRendererPlugin.addAction(name)
action.setCheckable(True)
action.pluginType = pluginType
self._ui.rendererPluginActionGroup.addAction(action)
action.triggered[bool].connect(lambda _, pluginType=pluginType:
self._rendererPluginChanged(pluginType))
# Now set the checked box on the current renderer (it should
# have been set by now).
currentRendererId = self._stageView.GetCurrentRendererId()
foundPlugin = False
for action in self._ui.rendererPluginActionGroup.actions():
if action.pluginType == currentRendererId:
action.setChecked(True)
foundPlugin = True
break
# Disable the menu if no plugins were found
self._ui.menuRendererPlugin.setEnabled(foundPlugin)
# Refresh the AOV menu and settings menu
self._configureRendererAovs()
self._configureRendererSettings()
# Renderer AOV support
def _rendererAovChanged(self, aov):
if self._stageView:
self._stageView.SetRendererAov(aov)
self._ui.aovOtherAction.setText("Other...")
def _configureRendererAovs(self):
if self._stageView:
self._ui.rendererAovActionGroup = QtWidgets.QActionGroup(self)
self._ui.rendererAovActionGroup.setExclusive(True)
self._ui.menuRendererAovs.clear()
aovs = self._stageView.GetRendererAovs()
for aov in aovs:
action = self._ui.menuRendererAovs.addAction(aov)
action.setCheckable(True)
if (aov == "color"):
action.setChecked(True)
action.aov = aov
self._ui.rendererAovActionGroup.addAction(action)
action.triggered[bool].connect(lambda _, aov=aov:
self._rendererAovChanged(aov))
self._ui.menuRendererAovs.addSeparator()
self._ui.aovOtherAction = self._ui.menuRendererAovs.addAction("Other...")
self._ui.aovOtherAction.setCheckable(True)
self._ui.aovOtherAction.aov = "Other"
self._ui.rendererAovActionGroup.addAction(self._ui.aovOtherAction)
self._ui.aovOtherAction.triggered[bool].connect(self._otherAov)
self._ui.menuRendererAovs.setEnabled(len(aovs) != 0)
def _otherAov(self):
# If we've already selected "Other..." as an AOV, populate the current
# AOV name.
initial = ""
if self._ui.aovOtherAction.text() != "Other...":
initial = self._stageView.rendererAovName
aov, ok = QtWidgets.QInputDialog.getText(self._mainWindow, "Other AOVs",
"Enter the aov name. Visualize primvars with \"primvars:name\".",
QtWidgets.QLineEdit.Normal, initial)
if (ok and len(aov) > 0):
self._rendererAovChanged(str(aov))
self._ui.aovOtherAction.setText("Other (%r)..." % str(aov))
else:
for action in self._ui.rendererAovActionGroup.actions():
if action.text() == self._stageView.rendererAovName:
action.setChecked(True)
break
def _rendererSettingsFlagChanged(self, action):
if self._stageView:
self._stageView.SetRendererSetting(action.key, action.isChecked())
def _configureRendererSettings(self):
if self._stageView:
self._ui.menuRendererSettings.clear()
self._ui.settingsFlagActions = []
settings = self._stageView.GetRendererSettingsList()
moreSettings = False
for setting in settings:
if setting.type != UsdImagingGL.RendererSettingType.FLAG:
moreSettings = True
continue
action = self._ui.menuRendererSettings.addAction(setting.name)
action.setCheckable(True)
action.key = str(setting.key)
action.setChecked(self._stageView.GetRendererSetting(setting.key))
action.triggered[bool].connect(lambda _, action=action:
self._rendererSettingsFlagChanged(action))
self._ui.settingsFlagActions.append(action)
if moreSettings:
self._ui.menuRendererSettings.addSeparator()
self._ui.settingsMoreAction = self._ui.menuRendererSettings.addAction("More...")
self._ui.settingsMoreAction.setCheckable(False)
self._ui.settingsMoreAction.triggered[bool].connect(self._moreRendererSettings)
self._ui.menuRendererSettings.setEnabled(len(settings) != 0)
# Close the old "More..." dialog if it's still open
if hasattr(self._ui, 'settingsMoreDialog'):
self._ui.settingsMoreDialog.reject()
def _moreRendererSettings(self):
# Recreate the settings dialog
self._ui.settingsMoreDialog = QtWidgets.QDialog(self._mainWindow)
self._ui.settingsMoreDialog.setWindowTitle("Hydra Settings")
self._ui.settingsMoreWidgets = []
layout = QtWidgets.QVBoxLayout()
# Add settings
groupBox = QtWidgets.QGroupBox()
formLayout = QtWidgets.QFormLayout()
groupBox.setLayout(formLayout)
layout.addWidget(groupBox)
formLayout.setLabelAlignment(QtCore.Qt.AlignLeft)
formLayout.setFormAlignment(QtCore.Qt.AlignRight)
settings = self._stageView.GetRendererSettingsList()
for setting in settings:
if setting.type == UsdImagingGL.RendererSettingType.FLAG:
checkBox = QtWidgets.QCheckBox()
checkBox.setChecked(self._stageView.GetRendererSetting(setting.key))
checkBox.key = str(setting.key)
checkBox.defValue = setting.defValue
formLayout.addRow(setting.name, checkBox)
self._ui.settingsMoreWidgets.append(checkBox)
if setting.type == UsdImagingGL.RendererSettingType.INT:
spinBox = QtWidgets.QSpinBox()
spinBox.setMinimum(-2 ** 31)
spinBox.setMaximum(2 ** 31 - 1)
spinBox.setValue(self._stageView.GetRendererSetting(setting.key))
spinBox.key = str(setting.key)
spinBox.defValue = setting.defValue
formLayout.addRow(setting.name, spinBox)
self._ui.settingsMoreWidgets.append(spinBox)
if setting.type == UsdImagingGL.RendererSettingType.FLOAT:
spinBox = QtWidgets.QDoubleSpinBox()
spinBox.setDecimals(10)
spinBox.setMinimum(-2 ** 31)
spinBox.setMaximum(2 ** 31 - 1)
spinBox.setValue(self._stageView.GetRendererSetting(setting.key))
spinBox.key = str(setting.key)
spinBox.defValue = setting.defValue
formLayout.addRow(setting.name, spinBox)
self._ui.settingsMoreWidgets.append(spinBox)
if setting.type == UsdImagingGL.RendererSettingType.STRING:
lineEdit = QtWidgets.QLineEdit()
lineEdit.setText(self._stageView.GetRendererSetting(setting.key))
lineEdit.key = str(setting.key)
lineEdit.defValue = setting.defValue
formLayout.addRow(setting.name, lineEdit)
self._ui.settingsMoreWidgets.append(lineEdit)
# Add buttons
buttonBox = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel |
QtWidgets.QDialogButtonBox.RestoreDefaults)
layout.addWidget(buttonBox)
buttonBox.rejected.connect(self._ui.settingsMoreDialog.reject)
buttonBox.accepted.connect(self._ui.settingsMoreDialog.accept)
self._ui.settingsMoreDialog.accepted.connect(self._applyMoreRendererSettings)
defaultButton = buttonBox.button(QtWidgets.QDialogButtonBox.RestoreDefaults)
defaultButton.clicked.connect(self._resetMoreRendererSettings)
self._ui.settingsMoreDialog.setLayout(layout)
self._ui.settingsMoreDialog.show()
def _applyMoreRendererSettings(self):
for widget in self._ui.settingsMoreWidgets:
if isinstance(widget, QtWidgets.QCheckBox):
self._stageView.SetRendererSetting(widget.key, widget.isChecked())
if isinstance(widget, QtWidgets.QSpinBox):
self._stageView.SetRendererSetting(widget.key, widget.value())
if isinstance(widget, QtWidgets.QDoubleSpinBox):
self._stageView.SetRendererSetting(widget.key, widget.value())
if isinstance(widget, QtWidgets.QLineEdit):
self._stageView.SetRendererSetting(widget.key, widget.text())
for action in self._ui.settingsFlagActions:
action.setChecked(self._stageView.GetRendererSetting(action.key))
def _resetMoreRendererSettings(self):
for widget in self._ui.settingsMoreWidgets:
if isinstance(widget, QtWidgets.QCheckBox):
widget.setChecked(widget.defValue)
if isinstance(widget, QtWidgets.QSpinBox):
widget.setValue(widget.defValue)
if isinstance(widget, QtWidgets.QDoubleSpinBox):
widget.setValue(widget.defValue)
if isinstance(widget, QtWidgets.QLineEdit):
widget.setText(widget.defValue)
def _configureColorManagement(self):
enableMenu = (not self._noRender and
UsdImagingGL.Engine.IsColorCorrectionCapable())
self._ui.menuColorCorrection.setEnabled(enableMenu)
# Topology-dependent UI changes
def _reloadVaryingUI(self):
self._clearCaches()
if self._debug:
cProfile.runctx('self._resetPrimView(restoreSelection=False)', globals(), locals(), 'resetPrimView')
p = pstats.Stats('resetPrimView')
p.strip_dirs().sort_stats(-1).print_stats()
else:
self._resetPrimView(restoreSelection=False)
if not self._stageView:
# The second child is self._ui.glFrame, which disappears if
# its size is set to zero.
if self._noRender:
# remove glFrame from the ui
self._ui.glFrame.setParent(None)
# move the attributeBrowser into the primSplitter instead
self._ui.primStageSplitter.addWidget(self._ui.attributeBrowserFrame)
else:
self._stageView = StageView(parent=self._mainWindow,
dataModel=self._dataModel,
printTiming=self._printTiming)
self._stageView.fpsHUDInfo = self._fpsHUDInfo
self._stageView.fpsHUDKeys = self._fpsHUDKeys
self._stageView.signalPrimSelected.connect(self.onPrimSelected)
self._stageView.signalPrimRollover.connect(self.onRollover)
self._stageView.signalMouseDrag.connect(self.onStageViewMouseDrag)
self._stageView.signalErrorMessage.connect(self.statusMessage)
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self._ui.glFrame.setLayout(layout)
layout.addWidget(self._stageView)
self._primSearchResults = deque([])
self._attrSearchResults = deque([])
self._primSearchString = ""
self._attrSearchString = ""
self._lastPrimSearched = self._dataModel.selection.getFocusPrim()
if self._stageView:
self._stageView.setFocus(QtCore.Qt.TabFocusReason)
self._stageView.rolloverPicking = self._dataModel.viewSettings.rolloverPrimInfo
def _scheduleResizePrimView(self):
""" Schedules a resize of the primView widget.
This will call _resizePrimView when the timer expires
(uses timer coalescing to prevent redundant resizes from occurring).
"""
self._primViewResizeTimer.start(0)
def _resizePrimView(self):
""" Used to coalesce excess calls to resizeColumnToContents.
"""
self._ui.primView.resizeColumnToContents(0)
# This appears to be "reasonably" performant in normal sized pose caches.
# If it turns out to be too slow, or if we want to do a better job of
# preserving the view the user currently has, we could look into ways of
# reconstructing just the prim tree under the "changed" prim(s). The
# (far and away) faster solution would be to implement our own TreeView
# and model in C++.
def _resetPrimView(self, restoreSelection=True):
with Timer() as t, BusyContext():
startingDepth = 3
self._computeDisplayPredicate()
with self._primViewSelectionBlocker:
self._ui.primView.setUpdatesEnabled(False)
self._ui.primView.clear()
self._primToItemMap.clear()
self._itemsToPush = []
# force new search since we are blowing away the primViewItems
# that may be cached in _primSearchResults
self._primSearchResults = []
self._populateRoots()
# it's confusing to see timing for expand followed by reset with
# the times being similar (esp when they are large)
self._expandToDepth(startingDepth, suppressTiming=True)
if restoreSelection:
self._refreshPrimViewSelection()
self._ui.primView.setUpdatesEnabled(True)
self._refreshCameraListAndMenu(preserveCurrCamera = True)
if self._printTiming:
t.PrintTime("reset Prim Browser to depth %d" % startingDepth)
def _resetGUI(self):
"""Perform a full refresh/resync of all GUI contents. This should be
called whenever the USD stage is modified, and assumes that all data
previously fetched from the stage is invalid. In the future, more
granular updates will be supported by listening to UsdNotice objects on
the active stage.
If a prim resync is needed then we fully update the prim view,
otherwise can just do a simplified update to the prim view.
"""
with BusyContext():
if self._hasPrimResync:
self._resetPrimView()
self._hasPrimResync = False
else:
self._resetPrimViewVis(selItemsOnly=False)
self._updatePropertyView()
self._populatePropertyInspector()
self._updateMetadataView()
self._updateLayerStackView()
self._updateCompositionView()
if self._stageView:
self._stageView.update()
def updateGUI(self):
"""Will schedule a full refresh/resync of the GUI contents.
Prefer this to calling _resetGUI() directly, since it will
coalesce multiple calls to this method in to a single refresh.
"""
self._guiResetTimer.start()
def _resetPrimViewVis(self, selItemsOnly=True,
authoredVisHasChanged=True):
"""Updates browser rows' Vis columns... can update just selected
items (and their descendants and ancestors), or all items in the
primView. When authoredVisHasChanged is True, we force each item
to discard any value caches it may be holding onto."""
with Timer() as t:
self._ui.primView.setUpdatesEnabled(False)
rootsToProcess = self.getSelectedItems() if selItemsOnly else \
[self._ui.primView.invisibleRootItem()]
for item in rootsToProcess:
PrimViewItem.propagateVis(item, authoredVisHasChanged)
self._ui.primView.setUpdatesEnabled(True)
if self._printTiming:
t.PrintTime("update vis column")
def _updatePrimView(self):
# Process some more prim view items.
n = min(100, len(self._itemsToPush))
if n:
items = self._itemsToPush[-n:]
del self._itemsToPush[-n:]
for item in items:
item.push()
else:
self._primViewUpdateTimer.stop()
# Option windows ==========================================================
def _setComplexity(self, complexity):
"""Set the complexity and update the UI."""
self._dataModel.viewSettings.complexity = complexity
def _incrementComplexity(self):
"""Jump up to the next level of complexity."""
self._setComplexity(RefinementComplexities.next(
self._dataModel.viewSettings.complexity))
def _decrementComplexity(self):
"""Jump back to the previous level of complexity."""
self._setComplexity(RefinementComplexities.prev(
self._dataModel.viewSettings.complexity))
def _changeComplexity(self, action):
"""Update the complexity from a selected QAction."""
self._setComplexity(RefinementComplexities.fromName(action.text()))
def _adjustFOV(self):
fov = QtWidgets.QInputDialog.getDouble(self._mainWindow, "Adjust FOV",
"Enter a value between 0 and 180", self._dataModel.viewSettings.freeCamera.fov, 0, 180)
if (fov[1]):
self._dataModel.viewSettings.freeCamera.fov = fov[0]
if self._stageView:
self._stageView.update()
def _adjustClippingPlanes(self, checked):
# Eventually, this will not be accessible when _stageView is None.
# Until then, silently ignore.
if self._stageView:
if (checked):
self._adjustClippingDlg = adjustClipping.AdjustClipping(self._mainWindow,
self._stageView)
self._adjustClippingDlg.finished.connect(
lambda status : self._ui.actionAdjust_Clipping.setChecked(False))
self._adjustClippingDlg.show()
else:
self._adjustClippingDlg.close()
def _adjustDefaultMaterial(self, checked):
if (checked):
self._adjustDefaultMaterialDlg = adjustDefaultMaterial.AdjustDefaultMaterial(
self._mainWindow, self._dataModel.viewSettings)
self._adjustDefaultMaterialDlg.finished.connect(lambda status :
self._ui.actionAdjust_Default_Material.setChecked(False))
self._adjustDefaultMaterialDlg.show()
else:
self._adjustDefaultMaterialDlg.close()
def _redrawOptionToggled(self, checked):
self._dataModel.viewSettings.redrawOnScrub = checked
self._ui.frameSlider.setTracking(
self._dataModel.viewSettings.redrawOnScrub)
# Frame-by-frame/Playback functionality ===================================
def _setPlaybackAvailability(self, enabled = True):
isEnabled = len(self._timeSamples) > 1 and enabled
self._playbackAvailable = isEnabled
#If playback is disabled, but the animation is playing...
if not isEnabled and self._dataModel.playing:
self._ui.playButton.click()
self._ui.playButton.setEnabled(isEnabled)
self._ui.frameSlider.setEnabled(isEnabled)
self._ui.actionFrame_Forward.setEnabled(isEnabled)
self._ui.actionFrame_Backwards.setEnabled(isEnabled)
self._ui.frameField.setEnabled(isEnabled
if self._hasTimeSamples else False)
self._ui.frameLabel.setEnabled(isEnabled
if self._hasTimeSamples else False)
self._ui.stageBegin.setEnabled(isEnabled)
self._ui.stageEnd.setEnabled(isEnabled)
self._ui.redrawOnScrub.setEnabled(isEnabled)
self._ui.stepSizeLabel.setEnabled(isEnabled)
self._ui.stepSize.setEnabled(isEnabled)
def _playClicked(self):
if self._ui.playButton.isChecked():
# Enable tracking whilst playing
self._ui.frameSlider.setTracking(True)
# Start playback.
self._dataModel.playing = True
self._ui.playButton.setText("Stop")
# setText() causes the shortcut to be reset to whatever
# Qt thinks it should be based on the text. We know better.
self._setPlayShortcut()
self._fpsHUDInfo[HUDEntries.PLAYBACK] = "..."
self._timer.start()
# For performance, don't update the prim tree view while playing.
self._primViewUpdateTimer.stop()
self._playbackIndex = 0
else:
self._ui.frameSlider.setTracking(self._ui.redrawOnScrub.isChecked())
# Stop playback.
self._dataModel.playing = False
self._ui.playButton.setText("Play")
# setText() causes the shortcut to be reset to whatever
# Qt thinks it should be based on the text. We know better.
self._setPlayShortcut()
self._fpsHUDInfo[HUDEntries.PLAYBACK] = "N/A"
self._timer.stop()
self._primViewUpdateTimer.start()
self._updateOnFrameChange()
def _advanceFrameForPlayback(self):
sleep(max(0, 1. / self.framesPerSecond - (time() - self._lastFrameTime)))
self._lastFrameTime = time()
if self._playbackIndex == 0:
self._startTime = time()
if self._playbackIndex == 4:
self._endTime = time()
delta = (self._endTime - self._startTime)/4.
ms = delta * 1000.
fps = 1. / delta
self._fpsHUDInfo[HUDEntries.PLAYBACK] = "%.2f ms (%.2f FPS)" % (ms, fps)
self._playbackIndex = (self._playbackIndex + 1) % 5
self._advanceFrame()
def _advanceFrame(self):
if self._playbackAvailable:
value = self._ui.frameSlider.value() + 1
if value > self._ui.frameSlider.maximum():
value = self._ui.frameSlider.minimum()
self._ui.frameSlider.setValue(value)
def _retreatFrame(self):
if self._playbackAvailable:
value = self._ui.frameSlider.value() - 1
if value < self._ui.frameSlider.minimum():
value = self._ui.frameSlider.maximum()
self._ui.frameSlider.setValue(value)
def _findClosestFrameIndex(self, timeSample):
"""Find the closest frame index for the given `timeSample`.
Args:
timeSample (float): A time sample value.
Returns:
int: The closest matching frame index or 0 if one cannot be
found.
"""
closestIndex = int(round((timeSample - self._timeSamples[0]) / self.step))
# Bounds checking
# 0 <= closestIndex <= number of time samples - 1
closestIndex = max(0, closestIndex)
closestIndex = min(len(self._timeSamples) - 1, closestIndex)
return closestIndex
def _rangeBeginChanged(self):
value = float(self._ui.rangeBegin.text())
if value != self.realStartTimeCode:
self.realStartTimeCode = value
self._UpdateTimeSamples(resetStageDataOnly=False)
def _stepSizeChanged(self):
value = float(self._ui.stepSize.text())
if value != self.step:
self.step = value
self._UpdateTimeSamples(resetStageDataOnly=False)
def _rangeEndChanged(self):
value = float(self._ui.rangeEnd.text())
if value != self.realEndTimeCode:
self.realEndTimeCode = value
self._UpdateTimeSamples(resetStageDataOnly=False)
def _frameStringChanged(self):
value = float(self._ui.frameField.text())
self.setFrame(value)
def _sliderMoved(self, frameIndex):
"""Slot called when the frame slider is moved by a user.
Args:
frameIndex (int): The new frame index value.
"""
# If redraw on scrub is disabled, ensure we still update the
# frame field.
if not self._ui.redrawOnScrub.isChecked():
self.setFrameField(self._timeSamples[frameIndex])
def setFrameField(self, frame):
"""Set the frame field to the given `frame`.
Args:
frame (str|int|float): The new frame value.
"""
frame = round(float(frame), ndigits=2)
self._ui.frameField.setText(str(frame))
# Prim/Attribute search functionality =====================================
def _findPrims(self, pattern, useRegex=True):
"""Search the Usd Stage for matching prims
"""
# If pattern doesn't contain regexp special chars, drop
# down to simple search, as it's faster
if useRegex and re.match("^[0-9_A-Za-z]+$", pattern):
useRegex = False
if useRegex:
isMatch = re.compile(pattern, re.IGNORECASE).search
else:
pattern = pattern.lower()
isMatch = lambda x: pattern in x.lower()
matches = [prim.GetPath() for prim
in Usd.PrimRange.Stage(self._dataModel.stage,
self._displayPredicate)
if isMatch(prim.GetName())]
if self._dataModel.viewSettings.showAllMasterPrims:
for master in self._dataModel.stage.GetMasters():
matches += [prim.GetPath() for prim
in Usd.PrimRange(master, self._displayPredicate)
if isMatch(prim.GetName())]
return matches
def _primViewFindNext(self):
if (self._primSearchString == self._ui.primViewLineEdit.text() and
len(self._primSearchResults) > 0 and
self._lastPrimSearched == self._dataModel.selection.getFocusPrim()):
# Go to the next result of the currently ongoing search.
# First time through, we'll be converting from SdfPaths
# to items (see the append() below)
nextResult = self._primSearchResults.popleft()
if isinstance(nextResult, Sdf.Path):
nextResult = self._getItemAtPath(nextResult)
if nextResult:
with self._dataModel.selection.batchPrimChanges:
self._dataModel.selection.clearPrims()
self._dataModel.selection.addPrim(nextResult.prim)
self._primSearchResults.append(nextResult)
self._lastPrimSearched = self._dataModel.selection.getFocusPrim()
# The path is effectively pruned if we couldn't map the
# path to an item
else:
# Begin a new search
with Timer() as t:
self._primSearchString = self._ui.primViewLineEdit.text()
self._primSearchResults = self._findPrims(str(self._ui.primViewLineEdit.text()))
self._primSearchResults = deque(self._primSearchResults)
self._lastPrimSearched = self._dataModel.selection.getFocusPrim()
if (len(self._primSearchResults) > 0):
self._primViewFindNext()
if self._printTiming:
t.PrintTime("match '%s' (%d matches)" %
(self._primSearchString,
len(self._primSearchResults)))
def _primLegendToggleCollapse(self):
ToggleLegendWithBrowser(self._ui.primLegendContainer,
self._ui.primLegendQButton,
self._primLegendAnim)
def _propertyLegendToggleCollapse(self):
ToggleLegendWithBrowser(self._ui.propertyLegendContainer,
self._ui.propertyLegendQButton,
self._propertyLegendAnim)
def _attrViewFindNext(self):
if (self._attrSearchString == self._ui.attrViewLineEdit.text() and
len(self._attrSearchResults) > 0 and
self._lastPrimSearched == self._dataModel.selection.getFocusPrim()):
# Go to the next result of the currently ongoing search
nextResult = self._attrSearchResults.popleft()
itemName = str(nextResult.text(PropertyViewIndex.NAME))
selectedProp = self._propertiesDict[itemName]
if isinstance(selectedProp, CustomAttribute):
self._dataModel.selection.clearProps()
self._dataModel.selection.setComputedProp(selectedProp)
else:
self._dataModel.selection.setProp(selectedProp)
self._dataModel.selection.clearComputedProps()
self._ui.propertyView.scrollToItem(nextResult)
self._attrSearchResults.append(nextResult)
self._lastPrimSearched = self._dataModel.selection.getFocusPrim()
self._ui.attributeValueEditor.populate(
self._dataModel.selection.getFocusPrim().GetPath(), itemName)
self._updateMetadataView(self._getSelectedObject())
self._updateLayerStackView(self._getSelectedObject())
else:
# Begin a new search
self._attrSearchString = self._ui.attrViewLineEdit.text()
attrSearchItems = self._ui.propertyView.findItems(
self._ui.attrViewLineEdit.text(),
QtCore.Qt.MatchRegExp,
PropertyViewIndex.NAME)
# Now just search for the string itself
otherSearch = self._ui.propertyView.findItems(
self._ui.attrViewLineEdit.text(),
QtCore.Qt.MatchContains,
PropertyViewIndex.NAME)
combinedItems = attrSearchItems + otherSearch
# We find properties first, then connections/targets
# Based on the default recursive match finding in Qt.
combinedItems.sort()
self._attrSearchResults = deque(combinedItems)
self._lastPrimSearched = self._dataModel.selection.getFocusPrim()
if (len(self._attrSearchResults) > 0):
self._attrViewFindNext()
@classmethod
def _outputBaseDirectory(cls):
homeDirRoot = os.getenv('HOME') or os.path.expanduser('~')
baseDir = os.path.join(homeDirRoot, '.usdview')
try:
if not os.path.exists(baseDir):
os.makedirs(baseDir)
return baseDir
except OSError:
sys.stderr.write('ERROR: Unable to create base directory '
'for settings file, settings will not be saved.\n')
return None
# View adjustment functionality ===========================================
def _storeAndReturnViewState(self):
lastView = self._lastViewContext
self._lastViewContext = self._stageView.copyViewState()
return lastView
def _frameSelection(self):
if self._stageView:
# Save all the pertinent attribute values (for _toggleFramedView)
self._storeAndReturnViewState() # ignore return val - we're stomping it
self._stageView.updateView(True, True) # compute bbox on frame selection
def _toggleFramedView(self):
if self._stageView:
self._stageView.restoreViewState(self._storeAndReturnViewState())
def _resetSettings(self):
"""Reloads the UI and Sets up the initial settings for the
_stageView object created in _reloadVaryingUI"""
# Seems like a good time to clear the texture registry
Glf.TextureRegistry.Reset()
# RELOAD fixed and varying UI
self._reloadFixedUI()
self._reloadVaryingUI()
if self._stageView:
self._stageView.update()
self._ui.actionFreeCam._prim = None
self._ui.actionFreeCam.triggered.connect(
lambda : self._cameraSelectionChanged(None))
if self._stageView:
self._stageView.signalSwitchedToFreeCam.connect(
lambda : self._cameraSelectionChanged(None))
self._refreshCameraListAndMenu(preserveCurrCamera = False)
def _updateForStageChanges(self, hasPrimResync=True):
"""Assuming there have been authoring changes to the already-loaded
stage, make the minimal updates to the UI required to maintain a
consistent state. This may still be over-zealous until we know
what actually changed, but we should be able to preserve camera and
playback positions (unless viewing through a stage camera that no
longer exists"""
self._hasPrimResync = hasPrimResync or self._hasPrimResync
self._clearCaches(preserveCamera=True)
# Update the UIs (it gets all of them) and StageView on a timer
self.updateGUI()
def _cacheViewerModeEscapeSizes(self, pos=None, index=None):
topHeight, bottomHeight = self._ui.topBottomSplitter.sizes()
primViewWidth, stageViewWidth = self._ui.primStageSplitter.sizes()
if bottomHeight > 0 or primViewWidth > 0:
self._viewerModeEscapeSizes = topHeight, bottomHeight, primViewWidth, stageViewWidth
else:
self._viewerModeEscapeSizes = None
def _toggleViewerMode(self):
topHeight, bottomHeight = self._ui.topBottomSplitter.sizes()
primViewWidth, stageViewWidth = self._ui.primStageSplitter.sizes()
if bottomHeight > 0 or primViewWidth > 0:
topHeight += bottomHeight
bottomHeight = 0
stageViewWidth += primViewWidth
primViewWidth = 0
else:
if self._viewerModeEscapeSizes is not None:
topHeight, bottomHeight, primViewWidth, stageViewWidth = self._viewerModeEscapeSizes
else:
bottomHeight = UIDefaults.BOTTOM_HEIGHT
topHeight = UIDefaults.TOP_HEIGHT
primViewWidth = UIDefaults.PRIM_VIEW_WIDTH
stageViewWidth = UIDefaults.STAGE_VIEW_WIDTH
self._ui.topBottomSplitter.setSizes([topHeight, bottomHeight])
self._ui.primStageSplitter.setSizes([primViewWidth, stageViewWidth])
def _resetView(self,selectPrim = None):
""" Reverts the GL frame to the initial camera view,
and clears selection (sets to pseudoRoot), UNLESS 'selectPrim' is
not None, in which case we'll select and frame it."""
self._ui.primView.clearSelection()
pRoot = self._dataModel.stage.GetPseudoRoot()
if selectPrim is None:
# if we had a command-line specified selection, re-frame it
selectPrim = self._initialSelectPrim or pRoot
item = self._getItemAtPath(selectPrim.GetPath())
# Our response to selection-change includes redrawing. We do NOT
# want that to happen here, since we are subsequently going to
# change the camera framing (and redraw, again), which can cause
# flickering. So make sure we don't redraw!
self._allowViewUpdates = False
self._ui.primView.setCurrentItem(item)
self._allowViewUpdates = True
if self._stageView:
if (selectPrim and selectPrim != pRoot) or not self._startingPrimCamera:
# _frameSelection translates the camera from wherever it happens
# to be at the time. If we had a starting selection AND a
# primCam, then before framing, switch back to the prim camera
if selectPrim == self._initialSelectPrim and self._startingPrimCamera:
self._dataModel.viewSettings.cameraPrim = self._startingPrimCamera
self._frameSelection()
else:
self._dataModel.viewSettings.cameraPrim = self._startingPrimCamera
self._stageView.updateView()
def _changeRenderMode(self, mode):
self._dataModel.viewSettings.renderMode = str(mode.text())
def _changeColorCorrection(self, mode):
self._dataModel.viewSettings.colorCorrectionMode = str(mode.text())
def _changePickMode(self, mode):
self._dataModel.viewSettings.pickMode = str(mode.text())
def _changeSelHighlightMode(self, mode):
self._dataModel.viewSettings.selHighlightMode = str(mode.text())
def _changeHighlightColor(self, color):
self._dataModel.viewSettings.highlightColorName = str(color.text())
def _changeInterpolationType(self, interpolationType):
for t in Usd.InterpolationType.allValues:
if t.displayName == str(interpolationType.text()):
self._dataModel.stage.SetInterpolationType(t)
self._resetSettings()
break
def _ambientOnlyClicked(self, checked=None):
if self._stageView and checked is not None:
self._dataModel.viewSettings.ambientLightOnly = checked
# If all three lights are disabled, re-enable them all.
if (not self._dataModel.viewSettings.keyLightEnabled and not self._dataModel.viewSettings.fillLightEnabled and
not self._dataModel.viewSettings.backLightEnabled):
self._dataModel.viewSettings.keyLightEnabled = True
self._dataModel.viewSettings.fillLightEnabled = True
self._dataModel.viewSettings.backLightEnabled = True
def _onKeyLightClicked(self, checked=None):
if self._stageView and checked is not None:
self._dataModel.viewSettings.keyLightEnabled = checked
def _onFillLightClicked(self, checked=None):
if self._stageView and checked is not None:
self._dataModel.viewSettings.fillLightEnabled = checked
def _onBackLightClicked(self, checked=None):
if self._stageView and checked is not None:
self._dataModel.viewSettings.backLightEnabled = checked
def _changeBgColor(self, mode):
self._dataModel.viewSettings.clearColorText = str(mode.text())
def _toggleShowBBoxPlayback(self):
"""Called when the menu item for showing BBoxes
during playback is activated or deactivated."""
self._dataModel.viewSettings.showBBoxPlayback = (
self._ui.showBBoxPlayback.isChecked())
def _toggleAutoComputeClippingPlanes(self):
autoClip = self._ui.actionAuto_Compute_Clipping_Planes.isChecked()
self._dataModel.viewSettings.autoComputeClippingPlanes = autoClip
if autoClip:
self._stageView.detachAndReClipFromCurrentCamera()
def _setUseExtentsHint(self):
self._dataModel.useExtentsHint = self._ui.useExtentsHint.isChecked()
self._updatePropertyView()
#recompute and display bbox
self._refreshBBox()
def _toggleShowBBoxes(self):
"""Called when the menu item for showing BBoxes
is activated."""
self._dataModel.viewSettings.showBBoxes = self._ui.showBBoxes.isChecked()
#recompute and display bbox
self._refreshBBox()
def _toggleShowAABBox(self):
"""Called when Axis-Aligned bounding boxes
are activated/deactivated via menu item"""
self._dataModel.viewSettings.showAABBox = self._ui.showAABBox.isChecked()
# recompute and display bbox
self._refreshBBox()
def _toggleShowOBBox(self):
"""Called when Oriented bounding boxes
are activated/deactivated via menu item"""
self._dataModel.viewSettings.showOBBox = self._ui.showOBBox.isChecked()
# recompute and display bbox
self._refreshBBox()
def _refreshBBox(self):
"""Recompute and hide/show Bounding Box."""
if self._stageView:
self._stageView.updateView(forceComputeBBox=True)
def _toggleDisplayGuide(self):
self._dataModel.viewSettings.displayGuide = (
self._ui.actionDisplay_Guide.isChecked())
def _toggleDisplayProxy(self):
self._dataModel.viewSettings.displayProxy = (
self._ui.actionDisplay_Proxy.isChecked())
def _toggleDisplayRender(self):
self._dataModel.viewSettings.displayRender = (
self._ui.actionDisplay_Render.isChecked())
def _toggleDisplayCameraOracles(self):
self._dataModel.viewSettings.displayCameraOracles = (
self._ui.actionDisplay_Camera_Oracles.isChecked())
def _toggleDisplayPrimId(self):
self._dataModel.viewSettings.displayPrimId = (
self._ui.actionDisplay_PrimId.isChecked())
def _toggleEnableSceneMaterials(self):
self._dataModel.viewSettings.enableSceneMaterials = (
self._ui.actionEnable_Scene_Materials.isChecked())
def _toggleCullBackfaces(self):
self._dataModel.viewSettings.cullBackfaces = (
self._ui.actionCull_Backfaces.isChecked())
def _showInterpreter(self):
if self._interpreter is None:
self._interpreter = QtWidgets.QDialog(self._mainWindow)
self._interpreter.setObjectName("Interpreter")
self._console = Myconsole(self._interpreter, self._usdviewApi)
self._interpreter.setFocusProxy(self._console) # this is important!
lay = QtWidgets.QVBoxLayout()
lay.addWidget(self._console)
self._interpreter.setLayout(lay)
# dock the interpreter window next to the main usdview window
self._interpreter.move(self._mainWindow.x() + self._mainWindow.frameGeometry().width(),
self._mainWindow.y())
self._interpreter.resize(600, self._mainWindow.size().height()/2)
self._interpreter.show()
self._interpreter.activateWindow()
self._interpreter.setFocus()
def _showDebugFlags(self):
if self._debugFlagsWindow is None:
from debugFlagsWidget import DebugFlagsWidget
self._debugFlagsWindow = DebugFlagsWidget()
self._debugFlagsWindow.show()
# Screen capture functionality ===========================================
def GrabWindowShot(self):
'''Returns a QImage of the full usdview window '''
# generate an image of the window. Due to how Qt's rendering
# works, this will not pick up the GL Widget(_stageView)'s
# contents, and we'll need to compose it separately.
windowShot = QtGui.QImage(self._mainWindow.size(),
QtGui.QImage.Format_ARGB32_Premultiplied)
painter = QtGui.QPainter(windowShot)
self._mainWindow.render(painter, QtCore.QPoint())
if self._stageView:
# overlay the QGLWidget on and return the composed image
# we offset by a single point here because of Qt.Pos funkyness
offset = QtCore.QPoint(0,1)
pos = self._stageView.mapTo(self._mainWindow, self._stageView.pos()) - offset
painter.drawImage(pos, self.GrabViewportShot())
return windowShot
def GrabViewportShot(self):
'''Returns a QImage of the current stage view in usdview.'''
if self._stageView:
return self._stageView.grabFrameBuffer()
else:
return None
# File handling functionality =============================================
def _cleanAndClose(self):
self._settings2.save()
# If the current path widget is focused when closing usdview, it can
# trigger an "editingFinished()" signal, which will look for a prim in
# the scene (which is already deleted). This prevents that.
# XXX:
# This method is reentrant and calling disconnect twice on a signal
# causes an exception to be thrown.
try:
self._ui.currentPathWidget.editingFinished.disconnect(
self._currentPathChanged)
except RuntimeError:
pass
# Shut down some timers and our eventFilter
self._primViewUpdateTimer.stop()
self._guiResetTimer.stop()
QtWidgets.QApplication.instance().removeEventFilter(self._filterObj)
# If the timer is currently active, stop it from being invoked while
# the USD stage is being torn down.
if self._timer.isActive():
self._timer.stop()
# Close the stage.
self._closeStage()
# Tear down the UI window.
with Timer() as t:
self._mainWindow.close()
if self._printTiming:
t.PrintTime('tear down the UI')
def _openFile(self):
extensions = Sdf.FileFormat.FindAllFileFormatExtensions()
builtInFiles = lambda f: f.startswith(".usd")
notBuiltInFiles = lambda f: not f.startswith(".usd")
extensions = filter(builtInFiles, extensions) + filter(notBuiltInFiles, extensions)
fileFilter = "USD Compatible Files (" + " ".join("*." + e for e in extensions) + ")"
(filename, _) = QtWidgets.QFileDialog.getOpenFileName(
self._mainWindow,
caption="Select file",
dir=".",
filter=fileFilter,
selectedFilter=fileFilter)
if len(filename) > 0:
self._parserData.usdFile = str(filename)
self._mainWindow.setWindowTitle(filename)
self._reopenStage()
def _getSaveFileName(self, caption, recommendedFilename):
(saveName, _) = QtWidgets.QFileDialog.getSaveFileName(
self._mainWindow,
caption,
'./' + recommendedFilename,
'USD Files (*.usd)'
';;USD ASCII Files (*.usda)'
';;USD Crate Files (*.usdc)'
';;Any USD File (*.usd *.usda *.usdc)',
'Any USD File (*.usd *.usda *.usdc)')
if len(saveName) == 0:
return ''
_, ext = os.path.splitext(saveName)
if ext not in ('.usd', '.usda', '.usdc'):
saveName += '.usd'
return saveName
def _saveOverridesAs(self):
recommendedFilename = self._parserData.usdFile.rsplit('.', 1)[0]
recommendedFilename += '_overrides.usd'
saveName = self._getSaveFileName(
'Save Overrides As', recommendedFilename)
if len(saveName) == 0:
return
if not self._dataModel.stage:
return
with BusyContext():
# In the future, we may allow usdview to be brought up with no file,
# in which case it would create an in-memory root layer, to which
# all edits will be targeted. In order to future proof
# this, first fetch the root layer, and if it is anonymous, just
# export it to the given filename. If it isn't anonmyous (i.e., it
# is a regular usd file on disk), export the session layer and add
# the stage root file as a sublayer.
rootLayer = self._dataModel.stage.GetRootLayer()
if not rootLayer.anonymous:
self._dataModel.stage.GetSessionLayer().Export(
saveName, 'Created by UsdView')
targetLayer = Sdf.Layer.FindOrOpen(saveName)
UsdUtils.CopyLayerMetadata(rootLayer, targetLayer,
skipSublayers=True)
# We don't ever store self.realStartTimeCode or
# self.realEndTimeCode in a layer, so we need to author them
# here explicitly.
if self.realStartTimeCode:
targetLayer.startTimeCode = self.realStartTimeCode
if self.realEndTimeCode:
targetLayer.endTimeCode = self.realEndTimeCode
targetLayer.subLayerPaths.append(
self._dataModel.stage.GetRootLayer().realPath)
targetLayer.RemoveInertSceneDescription()
targetLayer.Save()
else:
self._dataModel.stage.GetRootLayer().Export(
saveName, 'Created by UsdView')
def _saveFlattenedAs(self):
recommendedFilename = self._parserData.usdFile.rsplit('.', 1)[0]
recommendedFilename += '_flattened.usd'
saveName = self._getSaveFileName(
'Save Flattened As', recommendedFilename)
if len(saveName) == 0:
return
with BusyContext():
self._dataModel.stage.Export(saveName)
def _reopenStage(self):
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.BusyCursor)
try:
# Pause the stage view while we update
self._stageView.setUpdatesEnabled(False)
# Clear out any Usd objects that may become invalid.
self._dataModel.selection.clear()
self._currentSpec = None
self._currentLayer = None
# Close the current stage so that we don't keep it in memory
# while trying to open another stage.
self._closeStage()
stage = self._openStage(
self._parserData.usdFile, self._parserData.sessionLayer,
self._parserData.populationMask)
# We need this for layers which were cached in memory but changed on
# disk. The additional Reload call should be cheap when nothing
# actually changed.
stage.Reload()
self._dataModel.stage = stage
self._resetSettings()
self._resetView()
self._stepSizeChanged()
self._stepSizeChanged()
self._stageView.setUpdatesEnabled(True)
except Exception as err:
self.statusMessage('Error occurred reopening Stage: %s' % err)
traceback.print_exc()
finally:
QtWidgets.QApplication.restoreOverrideCursor()
self.statusMessage('Stage Reopened')
def _reloadStage(self):
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.BusyCursor)
try:
self._dataModel.stage.Reload()
# Seems like a good time to clear the texture registry
Glf.TextureRegistry.Reset()
# reset timeline, and playback settings from stage metadata
self._reloadFixedUI(resetStageDataOnly=True)
except Exception as err:
self.statusMessage('Error occurred rereading all layers for Stage: %s' % err)
finally:
QtWidgets.QApplication.restoreOverrideCursor()
self.statusMessage('All Layers Reloaded.')
def _cameraSelectionChanged(self, camera):
self._dataModel.viewSettings.cameraPrim = camera
def _refreshCameraListAndMenu(self, preserveCurrCamera):
self._allSceneCameras = Utils._GetAllPrimsOfType(
self._dataModel.stage, Tf.Type.Find(UsdGeom.Camera))
currCamera = self._startingPrimCamera
if self._stageView:
currCamera = self._dataModel.viewSettings.cameraPrim
self._stageView.allSceneCameras = self._allSceneCameras
# if the stageView is holding an expired camera, clear it first
# and force search for a new one
if currCamera != None and not (currCamera and currCamera.IsActive()):
currCamera = None
self._dataModel.viewSettings.cameraPrim = None
preserveCurrCamera = False
if not preserveCurrCamera:
cameraWasSet = False
def setCamera(camera):
self._startingPrimCamera = currCamera = camera
self._dataModel.viewSettings.cameraPrim = camera
cameraWasSet = True
if self._startingPrimCameraPath:
prim = self._dataModel.stage.GetPrimAtPath(
self._startingPrimCameraPath)
if not prim.IsValid():
msg = sys.stderr
print >> msg, "WARNING: Camera path %r did not exist in " \
"stage" % (str(self._startingPrimCameraPath),)
self._startingPrimCameraPath = None
elif not prim.IsA(UsdGeom.Camera):
msg = sys.stderr
print >> msg, "WARNING: Camera path %r was not a " \
"UsdGeom.Camera" % \
(str(self._startingPrimCameraPath),)
self._startingPrimCameraPath = None
else:
setCamera(prim)
if not cameraWasSet and self._startingPrimCameraName:
for camera in self._allSceneCameras:
if camera.GetName() == self._startingPrimCameraName:
setCamera(camera)
break
# Now that we have the current camera and all cameras, build the menu
self._ui.menuCamera.clear()
if len(self._allSceneCameras) == 0:
self._ui.menuCamera.setEnabled(False)
else:
self._ui.menuCamera.setEnabled(True)
currCameraPath = None
if currCamera:
currCameraPath = currCamera.GetPath()
for camera in self._allSceneCameras:
action = self._ui.menuCamera.addAction(camera.GetName())
action.setData(camera.GetPath())
action.setToolTip(str(camera.GetPath()))
action.setCheckable(True)
action.triggered[bool].connect(
lambda _, cam = camera: self._cameraSelectionChanged(cam))
action.setChecked(action.data() == currCameraPath)
def _updatePropertiesFromPropertyView(self):
"""Update the data model's property selection to match property view's
current selection.
"""
selectedProperties = dict()
for item in self._ui.propertyView.selectedItems():
# We define data 'roles' in the property viewer to distinguish between things
# like attributes and attributes with connections, relationships and relationships
# with targets etc etc.
role = item.data(PropertyViewIndex.TYPE, QtCore.Qt.ItemDataRole.WhatsThisRole)
if role in (PropertyViewDataRoles.CONNECTION, PropertyViewDataRoles.TARGET):
# Get the owning property's set of selected targets.
propName = str(item.parent().text(PropertyViewIndex.NAME))
prop = self._propertiesDict[propName]
targets = selectedProperties.setdefault(prop, set())
# Add the target to the set of targets.
targetPath = Sdf.Path(str(item.text(PropertyViewIndex.NAME)))
if role == PropertyViewDataRoles.CONNECTION:
prim = self._dataModel.stage.GetPrimAtPath(
targetPath.GetPrimPath())
target = prim.GetProperty(targetPath.name)
else: # role == PropertyViewDataRoles.TARGET
target = self._dataModel.stage.GetPrimAtPath(
targetPath)
targets.add(target)
else:
propName = str(item.text(PropertyViewIndex.NAME))
prop = self._propertiesDict[propName]
selectedProperties.setdefault(prop, set())
with self._dataModel.selection.batchPropChanges:
self._dataModel.selection.clearProps()
for prop, targets in selectedProperties.items():
if not isinstance(prop, CustomAttribute):
self._dataModel.selection.addProp(prop)
for target in targets:
self._dataModel.selection.addPropTarget(prop, target)
with self._dataModel.selection.batchComputedPropChanges:
self._dataModel.selection.clearComputedProps()
for prop, targets in selectedProperties.items():
if isinstance(prop, CustomAttribute):
self._dataModel.selection.addComputedProp(prop)
def _propertyViewSelectionChanged(self):
"""Called whenever property view's selection changes."""
if self._propertyViewSelectionBlocker.blocked():
return
self._updatePropertiesFromPropertyView()
def _propertyViewCurrentItemChanged(self, currentItem, lastItem):
"""Called whenever property view's current item changes."""
if self._propertyViewSelectionBlocker.blocked():
return
# If a selected item becomes the current item, it will not fire a
# selection changed signal but we still want to change the property
# selection.
if currentItem is not None and currentItem.isSelected():
self._updatePropertiesFromPropertyView()
def _propSelectionChanged(self):
"""Called whenever the property selection in the data model changes.
Updates any UI that relies on the selection state.
"""
self._updatePropertyViewSelection()
self._populatePropertyInspector()
self._updatePropertyInspector()
def _populatePropertyInspector(self):
focusPrimPath = None
focusPropName = None
focusProp = self._dataModel.selection.getFocusProp()
if focusProp is None:
focusPrimPath, focusPropName = (
self._dataModel.selection.getFocusComputedPropPath())
else:
focusPrimPath = focusProp.GetPrimPath()
focusPropName = focusProp.GetName()
if focusPropName is not None:
# inform the value editor that we selected a new attribute
self._ui.attributeValueEditor.populate(focusPrimPath, focusPropName)
else:
self._ui.attributeValueEditor.clear()
def _onCompositionSelectionChanged(self, curr=None, prev=None):
self._currentSpec = getattr(curr, 'spec', None)
self._currentLayer = getattr(curr, 'layer', None)
def _updatePropertyInspector(self, index=None, obj=None):
# index must be the first parameter since this method is used as
# propertyInspector tab widget's currentChanged(int) signal callback
if index is None:
index = self._ui.propertyInspector.currentIndex()
if obj is None:
obj = self._getSelectedObject()
if index == PropertyIndex.METADATA:
self._updateMetadataView(obj)
elif index == PropertyIndex.LAYERSTACK:
self._updateLayerStackView(obj)
elif index == PropertyIndex.COMPOSITION:
self._updateCompositionView(obj)
def _refreshAttributeValue(self):
self._ui.attributeValueEditor.refresh()
def _propertyViewContextMenu(self, point):
item = self._ui.propertyView.itemAt(point)
if item:
self.contextMenu = AttributeViewContextMenu(self._mainWindow,
item, self._dataModel)
self.contextMenu.exec_(QtGui.QCursor.pos())
def _layerStackContextMenu(self, point):
item = self._ui.layerStackView.itemAt(point)
if item:
self.contextMenu = LayerStackContextMenu(self._mainWindow, item)
self.contextMenu.exec_(QtGui.QCursor.pos())
def _compositionTreeContextMenu(self, point):
item = self._ui.compositionTreeWidget.itemAt(point)
self.contextMenu = LayerStackContextMenu(self._mainWindow, item)
self.contextMenu.exec_(QtGui.QCursor.pos())
# Headers & Columns =================================================
def _propertyViewHeaderContextMenu(self, point):
self.contextMenu = HeaderContextMenu(self._ui.propertyView)
self.contextMenu.exec_(QtGui.QCursor.pos())
def _primViewHeaderContextMenu(self, point):
self.contextMenu = HeaderContextMenu(self._ui.primView)
self.contextMenu.exec_(QtGui.QCursor.pos())
# Widget management =================================================
def _changePrimViewDepth(self, action):
"""Signal handler for view-depth menu items
"""
actionTxt = str(action.text())
# recover the depth factor from the action's name
depth = int(actionTxt[actionTxt.find(" ")+1])
self._expandToDepth(depth)
def _expandToDepth(self, depth, suppressTiming=False):
"""Expands treeview prims to the given depth
"""
with Timer() as t, BusyContext():
# Populate items down to depth. Qt will expand items at depth
# depth-1 so we need to have items at depth. We know something
# changed if any items were added to _itemsToPush.
n = len(self._itemsToPush)
self._populateItem(self._dataModel.stage.GetPseudoRoot(),
maxDepth=depth)
changed = (n != len(self._itemsToPush))
# Expand the tree to depth.
self._ui.primView.expandToDepth(depth-1)
if changed:
# Resize column.
self._scheduleResizePrimView()
# Start pushing prim data to the UI during idle cycles.
# Qt doesn't need the data unless the item is actually
# visible (or affects what's visible) but to avoid
# jerky scrolling when that data is pulled during the
# scroll, we can do it ahead of time. But don't do it
# if we're currently playing to maximize playback
# performance.
if not self._dataModel.playing:
self._primViewUpdateTimer.start()
if self._printTiming and not suppressTiming:
t.PrintTime("expand Prim browser to depth %d" % depth)
def _primViewExpanded(self, index):
"""Signal handler for expanded(index), facilitates lazy tree population
"""
self._populateChildren(self._ui.primView.itemFromIndex(index))
self._scheduleResizePrimView()
def _toggleShowInactivePrims(self):
self._dataModel.viewSettings.showInactivePrims = (
self._ui.actionShow_Inactive_Prims.isChecked())
# Note: _toggleShowInactivePrims, _toggleShowMasterPrims,
# _toggleShowUndefinedPrims, and _toggleShowAbstractPrims all call
# _resetPrimView after being toggled, but only from menu items.
# In the future, we should do this when a signal from
# ViewSettingsDataModel is emitted so the prim view always updates
# when they are changed.
self._dataModel.selection.removeInactivePrims()
self._resetPrimView()
def _toggleShowMasterPrims(self):
self._dataModel.viewSettings.showAllMasterPrims = (
self._ui.actionShow_All_Master_Prims.isChecked())
self._dataModel.selection.removeMasterPrims()
self._resetPrimView()
def _toggleShowUndefinedPrims(self):
self._dataModel.viewSettings.showUndefinedPrims = (
self._ui.actionShow_Undefined_Prims.isChecked())
self._dataModel.selection.removeUndefinedPrims()
self._resetPrimView()
def _toggleShowAbstractPrims(self):
self._dataModel.viewSettings.showAbstractPrims = (
self._ui.actionShow_Abstract_Prims.isChecked())
self._dataModel.selection.removeAbstractPrims()
self._resetPrimView()
def _toggleRolloverPrimInfo(self):
self._dataModel.viewSettings.rolloverPrimInfo = (
self._ui.actionRollover_Prim_Info.isChecked())
if self._stageView:
self._stageView.rolloverPicking = self._dataModel.viewSettings.rolloverPrimInfo
def _tallyPrimStats(self, prim):
def _GetType(prim):
typeString = prim.GetTypeName()
return HUDEntries.NOTYPE if not typeString else typeString
childTypeDict = {}
primCount = 0
for child in Usd.PrimRange(prim):
typeString = _GetType(child)
# skip pseudoroot
if typeString is HUDEntries.NOTYPE and not prim.GetParent():
continue
primCount += 1
childTypeDict[typeString] = 1 + childTypeDict.get(typeString, 0)
return (primCount, childTypeDict)
def _populateChildren(self, item, depth=0, maxDepth=1, childrenToAdd=None):
"""Populates the children of the given item in the prim viewer.
If childrenToAdd is given its a list of prims to add as
children."""
if depth < maxDepth and item.prim.IsActive():
if item.needsChildrenPopulated() or childrenToAdd:
# Populate all the children.
if not childrenToAdd:
childrenToAdd = self._getFilteredChildren(item.prim)
item.addChildren([self._populateItem(child, depth+1, maxDepth)
for child in childrenToAdd])
elif depth + 1 < maxDepth:
# The children already exist but we're recursing deeper.
for i in xrange(item.childCount()):
self._populateChildren(item.child(i), depth+1, maxDepth)
def _populateItem(self, prim, depth=0, maxDepth=0):
"""Populates a prim viewer item."""
item = self._primToItemMap.get(prim)
if not item:
# Create a new item. If we want its children we obviously
# have to create those too.
children = self._getFilteredChildren(prim)
item = PrimViewItem(prim, self, len(children) != 0)
self._primToItemMap[prim] = item
self._populateChildren(item, depth, maxDepth, children)
# Push the item after the children so ancestors are processed
# before descendants.
self._itemsToPush.append(item)
else:
# Item already exists. Its children may or may not exist.
# Either way, we need to have them to get grandchildren.
self._populateChildren(item, depth, maxDepth)
return item
def _populateRoots(self):
invisibleRootItem = self._ui.primView.invisibleRootItem()
rootPrim = self._dataModel.stage.GetPseudoRoot()
rootItem = self._populateItem(rootPrim)
self._populateChildren(rootItem)
if self._dataModel.viewSettings.showAllMasterPrims:
self._populateChildren(rootItem,
childrenToAdd=self._dataModel.stage.GetMasters())
# Add all descendents all at once.
invisibleRootItem.addChild(rootItem)
def _getFilteredChildren(self, prim):
return prim.GetFilteredChildren(self._displayPredicate)
def _computeDisplayPredicate(self):
# Take current browser filtering into account when discovering
# prims while traversing
self._displayPredicate = None
if not self._dataModel.viewSettings.showInactivePrims:
self._displayPredicate = Usd.PrimIsActive \
if self._displayPredicate is None \
else self._displayPredicate & Usd.PrimIsActive
if not self._dataModel.viewSettings.showUndefinedPrims:
self._displayPredicate = Usd.PrimIsDefined \
if self._displayPredicate is None \
else self._displayPredicate & Usd.PrimIsDefined
if not self._dataModel.viewSettings.showAbstractPrims:
self._displayPredicate = ~Usd.PrimIsAbstract \
if self._displayPredicate is None \
else self._displayPredicate & ~Usd.PrimIsAbstract
if self._displayPredicate is None:
self._displayPredicate = Usd._PrimFlagsPredicate.Tautology()
# Unless user experience indicates otherwise, we think we always
# want to show instance proxies
self._displayPredicate = Usd.TraverseInstanceProxies(self._displayPredicate)
def _getItemAtPath(self, path, ensureExpanded=False):
# If the prim hasn't been expanded yet, drill down into it.
# Note the explicit str(path) in the following expr is necessary
# because path may be a QString.
path = path if isinstance(path, Sdf.Path) else Sdf.Path(str(path))
parent = self._dataModel.stage.GetPrimAtPath(path)
if not parent:
raise RuntimeError("Prim not found at path in stage: %s" % str(path))
pseudoRoot = self._dataModel.stage.GetPseudoRoot()
if parent not in self._primToItemMap:
# find the first loaded parent
childList = []
while parent != pseudoRoot \
and not parent in self._primToItemMap:
childList.append(parent)
parent = parent.GetParent()
# go one step further, since the first item found could be hidden
# under a norgie and we would want to populate its siblings as well
if parent != pseudoRoot:
childList.append(parent)
# now populate down to the child
for parent in reversed(childList):
try:
item = self._primToItemMap[parent]
self._populateChildren(item)
if ensureExpanded:
item.setExpanded(True)
except:
item = None
# finally, return the requested item, which now should be in
# the map. If something has been added, this can fail. Not
# sure how to rebuild or add this to the map in a minimal way,
# but after the first hiccup, I don't see any ill
# effects. Would love to know a better way...
# - wave 04.17.2018
prim = self._dataModel.stage.GetPrimAtPath(path)
try:
item = self._primToItemMap[prim]
except:
item = None
return item
def selectPseudoroot(self):
"""Selects only the pseudoroot."""
self._dataModel.selection.clearPrims()
def selectEnclosingModel(self):
"""Iterates through all selected prims, selecting their containing model
instead if they are not a model themselves.
"""
oldPrims = self._dataModel.selection.getPrims()
with self._dataModel.selection.batchPrimChanges:
self._dataModel.selection.clearPrims()
for prim in oldPrims:
model = GetEnclosingModelPrim(prim)
if model:
self._dataModel.selection.addPrim(model)
else:
self._dataModel.selection.addPrim(prim)
def selectBoundMaterialForPurpose(self, materialPurpose):
"""Iterates through all selected prims, selecting their bound preview
materials.
"""
oldPrims = self._dataModel.selection.getPrims()
with self._dataModel.selection.batchPrimChanges:
self._dataModel.selection.clearPrims()
for prim in oldPrims:
(boundMaterial, bindingRel) = \
UsdShade.MaterialBindingAPI(prim).ComputeBoundMaterial(
materialPurpose=materialPurpose)
if boundMaterial:
self._dataModel.selection.addPrim(boundMaterial.GetPrim())
def selectBindingRelForPurpose(self, materialPurpose):
"""Iterates through all selected prims, selecting their bound preview
materials.
"""
relsToSelect = []
oldPrims = self._dataModel.selection.getPrims()
with self._dataModel.selection.batchPrimChanges:
self._dataModel.selection.clearPrims()
for prim in oldPrims:
(boundMaterial, bindingRel) = \
UsdShade.MaterialBindingAPI(prim).ComputeBoundMaterial(
materialPurpose=materialPurpose)
if boundMaterial and bindingRel:
self._dataModel.selection.addPrim(bindingRel.GetPrim())
relsToSelect.append(bindingRel)
with self._dataModel.selection.batchPropChanges:
self._dataModel.selection.clearProps()
for rel in relsToSelect:
self._dataModel.selection.addProp(rel)
def selectBoundPreviewMaterial(self):
"""Iterates through all selected prims, selecting their bound preview
materials.
"""
self.selectBoundMaterialForPurpose(
materialPurpose=UsdShade.Tokens.preview)
def selectBoundFullMaterial(self):
"""Iterates through all selected prims, selecting their bound preview
materials.
"""
self.selectBoundMaterialForPurpose(
materialPurpose=UsdShade.Tokens.full)
def selectPreviewBindingRel(self):
"""Iterates through all selected prims, computing their resolved
"preview" bindings and selecting the cooresponding binding relationship.
"""
self.selectBindingRelForPurpose(materialPurpose=UsdShade.Tokens.preview)
def selectFullBindingRel(self):
"""Iterates through all selected prims, computing their resolved
"full" bindings and selecting the cooresponding binding relationship.
"""
self.selectBindingRelForPurpose(materialPurpose=UsdShade.Tokens.full)
def _getCommonPrims(self, pathsList):
commonPrefix = os.path.commonprefix(pathsList)
### To prevent /Canopies/TwigA and /Canopies/TwigB
### from registering /Canopies/Twig as prefix
return commonPrefix.rsplit('/', 1)[0]
def _primSelectionChanged(self, added, removed):
"""Called when the prim selection is updated in the data model. Updates
any UI that depends on the state of the selection.
"""
with self._primViewSelectionBlocker:
self._updatePrimViewSelection(added, removed)
self._updatePrimPathText()
if self._stageView:
self._updateHUDPrimStats()
self._updateHUDGeomCounts()
self._stageView.updateView()
self._updatePropertyInspector(
obj=self._dataModel.selection.getFocusPrim())
self._updatePropertyView()
self._refreshAttributeValue()
def _getPrimsFromPaths(self, paths):
"""Get all prims from a list of paths."""
prims = []
for path in paths:
# Ensure we have an Sdf.Path, not a string.
sdfPath = Sdf.Path(str(path))
prim = self._dataModel.stage.GetPrimAtPath(
sdfPath.GetAbsoluteRootOrPrimPath())
if not prim:
raise PrimNotFoundException(sdfPath)
prims.append(prim)
return prims
def _updatePrimPathText(self):
self._ui.currentPathWidget.setText(
', '.join([str(prim.GetPath())
for prim in self._dataModel.selection.getPrims()]))
def _currentPathChanged(self):
"""Called when the currentPathWidget text is changed"""
newPaths = self._ui.currentPathWidget.text()
pathList = re.split(", ?", newPaths)
pathList = filter(lambda path: len(path) != 0, pathList)
try:
prims = self._getPrimsFromPaths(pathList)
except PrimNotFoundException as ex:
# _getPrimsFromPaths couldn't find one of the prims
sys.stderr.write("ERROR: %s\n" % ex.message)
self._updatePrimPathText()
return
explicitProps = any(Sdf.Path(str(path)).IsPropertyPath()
for path in pathList)
if len(prims) == 1 and not explicitProps:
self._dataModel.selection.switchToPrimPath(prims[0].GetPath())
else:
with self._dataModel.selection.batchPrimChanges:
self._dataModel.selection.clearPrims()
for prim in prims:
self._dataModel.selection.addPrim(prim)
with self._dataModel.selection.batchPropChanges:
self._dataModel.selection.clearProps()
for path, prim in zip(pathList, prims):
sdfPath = Sdf.Path(str(path))
if sdfPath.IsPropertyPath():
self._dataModel.selection.addPropPath(path)
self._dataModel.selection.clearComputedProps()
def _refreshPrimViewSelection(self):
"""Refresh the selected prim view items to match the selection data
model.
"""
self._ui.primView.clearSelection()
selectedItems = [
self._getItemAtPath(prim.GetPath(), ensureExpanded=True)
for prim in self._dataModel.selection.getPrims()]
if len(selectedItems) > 0:
self._ui.primView.setCurrentItem(selectedItems[0])
self._ui.primView.updateSelection(selectedItems, [])
def _updatePrimViewSelection(self, added, removed):
"""Do an incremental update to primView's selection using the added and
removed prim paths from the selectionDataModel.
"""
addedItems = [
self._getItemAtPath(path, ensureExpanded=True)
for path in added ]
removedItems = [ self._getItemAtPath(path) for path in removed ]
self._ui.primView.updateSelection(addedItems, removedItems)
def _primsFromSelectionRanges(self, ranges):
"""Iterate over all prims in a QItemSelection from primView."""
for itemRange in ranges:
for index in itemRange.indexes():
if index.column() == 0:
item = self._ui.primView.itemFromIndex(index)
yield item.prim
def _selectionChanged(self, added, removed):
"""Called when primView's selection is changed. If the selection was
changed by a user, update the selection data model with the changes.
"""
if self._primViewSelectionBlocker.blocked():
return
items = self._ui.primView.selectedItems()
if len(items) == 1:
self._dataModel.selection.switchToPrimPath(items[0].prim.GetPath())
else:
with self._dataModel.selection.batchPrimChanges:
for prim in self._primsFromSelectionRanges(added):
self._dataModel.selection.addPrim(prim)
for prim in self._primsFromSelectionRanges(removed):
self._dataModel.selection.removePrim(prim)
def _itemClicked(self, item, col):
# If user clicked in a selected row, we will toggle all selected items;
# otherwise, just the clicked one.
if col == PrimViewColumnIndex.VIS:
itemsToToggle = [ item ]
if item.isSelected():
itemsToToggle = [
self._getItemAtPath(prim.GetPath(), ensureExpanded=True)
for prim in self._dataModel.selection.getPrims()]
changedAny = False
with Timer() as t:
for toToggle in itemsToToggle:
# toggleVis() returns True if the click caused a visibility
# change.
changedOne = toToggle.toggleVis()
if changedOne:
PrimViewItem.propagateVis(toToggle)
changedAny = True
if changedAny:
self.editComplete('Updated prim visibility')
if self._printTiming:
t.PrintTime("update vis column")
def _itemPressed(self, item, col):
if col == PrimViewColumnIndex.DRAWMODE:
self._ui.primView.ShowDrawModeWidgetForItem(item)
def _getPathsFromItems(self, items, prune = False):
# this function returns a list of paths given a list of items if
# prune=True, it excludes certain paths if a parent path is already
# there this avoids double-rendering if both a prim and its parent
# are selected.
#
# Don't include the pseudoroot, though, if it's still selected, because
# leaving it in the pruned list will cause everything else to get
# pruned away!
allPaths = [itm.prim.GetPath() for itm in items]
if not prune:
return allPaths
if len(allPaths) > 1:
allPaths = [p for p in allPaths if p != Sdf.Path.absoluteRootPath]
return Sdf.Path.RemoveDescendentPaths(allPaths)
def _primViewContextMenu(self, point):
item = self._ui.primView.itemAt(point)
self._showPrimContextMenu(item)
def _showPrimContextMenu(self, item):
self.contextMenu = PrimContextMenu(self._mainWindow, item, self)
self.contextMenu.exec_(QtGui.QCursor.pos())
def setFrame(self, frame):
"""Set the `frame`.
Args:
frame (float): The new frame value.
"""
frameIndex = self._findClosestFrameIndex(frame)
self._setFrameIndex(frameIndex)
def _setFrameIndex(self, frameIndex):
"""Set the `frameIndex`.
Args:
frameIndex (int): The new frame index value.
"""
# Ensure the frameIndex exists, if not, return.
try:
frame = self._timeSamples[frameIndex]
except IndexError:
return
currentFrame = Usd.TimeCode(frame)
if self._dataModel.currentFrame != currentFrame:
self._dataModel.currentFrame = currentFrame
self._ui.frameSlider.setValue(frameIndex)
self._updateOnFrameChange()
self.setFrameField(self._dataModel.currentFrame.GetValue())
def _updateGUIForFrameChange(self):
"""Called when the frame changes have finished.
e.g When the playback/scrubbing has stopped.
"""
# slow stuff that we do only when not playing
# topology might have changed, recalculate
self._updateHUDGeomCounts()
self._updatePropertyView()
self._refreshAttributeValue()
# value sources of an attribute can change upon frame change
# due to value clips, so we must update the layer stack.
self._updateLayerStackView()
# refresh the visibility column
self._resetPrimViewVis(selItemsOnly=False, authoredVisHasChanged=False)
def _updateOnFrameChange(self):
"""Called when the frame changes, updates the renderer and such"""
# do not update HUD/BBOX if scrubbing or playing
if not (self._dataModel.playing or self._ui.frameSlider.isSliderDown()):
self._updateGUIForFrameChange()
if self._stageView:
# this is the part that renders
if self._dataModel.playing:
highlightMode = self._dataModel.viewSettings.selHighlightMode
if highlightMode == SelectionHighlightModes.ALWAYS:
# We don't want to resend the selection to the renderer
# every frame during playback unless we are actually going
# to see the selection (which is only when highlight mode is
# ALWAYS).
self._stageView.updateSelection()
self._stageView.updateForPlayback()
else:
self._stageView.updateSelection()
self._stageView.updateView()
def saveFrame(self, fileName):
if self._stageView:
pm = QtGui.QPixmap.grabWindow(self._stageView.winId())
pm.save(fileName, 'TIFF')
def _getPropertiesDict(self):
propertiesDict = OrderedDict()
# leave attribute viewer empty if multiple prims selected
if len(self._dataModel.selection.getPrims()) != 1:
return propertiesDict
prim = self._dataModel.selection.getFocusPrim()
composed = _GetCustomAttributes(prim, self._dataModel)
inheritedPrimvars = UsdGeom.PrimvarsAPI(prim).FindInheritablePrimvars()
# There may be overlap between inheritedProps and prim attributes,
# but that's OK because propsDict will uniquify them below
inheritedProps = [primvar.GetAttr() for primvar in inheritedPrimvars]
props = prim.GetAttributes() + prim.GetRelationships() + inheritedProps
def cmpFunc(propA, propB):
aName = propA.GetName()
bName = propB.GetName()
return cmp(aName.lower(), bName.lower())
props.sort(cmp=cmpFunc)
# Add the special composed attributes usdview generates
# at the top of our property list.
for prop in composed:
propertiesDict[prop.GetName()] = prop
for prop in props:
propertiesDict[prop.GetName()] = prop
return propertiesDict
def _propertyViewDeselectItem(self, item):
item.setSelected(False)
for i in range(item.childCount()):
item.child(i).setSelected(False)
def _updatePropertyViewSelection(self):
"""Updates property view's selected items to match the data model."""
focusPrim = self._dataModel.selection.getFocusPrim()
propTargets = self._dataModel.selection.getPropTargets()
computedProps = self._dataModel.selection.getComputedPropPaths()
selectedPrimPropNames = dict()
selectedPrimPropNames.update({prop.GetName(): targets
for prop, targets in propTargets.items()})
selectedPrimPropNames.update({propName: set()
for primPath, propName in computedProps})
rootItem = self._ui.propertyView.invisibleRootItem()
with self._propertyViewSelectionBlocker:
for i in range(rootItem.childCount()):
item = rootItem.child(i)
propName = str(item.text(PropertyViewIndex.NAME))
if propName in selectedPrimPropNames:
item.setSelected(True)
# Select relationships and connections.
targets = {prop.GetPath()
for prop in selectedPrimPropNames[propName]}
for j in range(item.childCount()):
childItem = item.child(j)
targetPath = Sdf.Path(
str(childItem.text(PropertyViewIndex.NAME)))
if targetPath in targets:
childItem.setSelected(True)
else:
self._propertyViewDeselectItem(item)
def _updatePropertyViewInternal(self):
frame = self._dataModel.currentFrame
treeWidget = self._ui.propertyView
treeWidget.setTextElideMode(QtCore.Qt.ElideMiddle)
scrollPosition = treeWidget.verticalScrollBar().value()
# get a dictionary of prim attribs/members and store it in self._propertiesDict
self._propertiesDict = self._getPropertiesDict()
with self._propertyViewSelectionBlocker:
treeWidget.clear()
self._populatePropertyInspector()
curPrimSelection = self._dataModel.selection.getFocusPrim()
currRow = 0
for key, primProperty in self._propertiesDict.iteritems():
targets = None
isInheritedProperty = isinstance(primProperty, Usd.Property) and \
(primProperty.GetPrim() != curPrimSelection)
if type(primProperty) == Usd.Attribute:
if primProperty.HasAuthoredConnections():
typeContent = PropertyViewIcons.ATTRIBUTE_WITH_CONNECTIONS()
typeRole = PropertyViewDataRoles.ATTRIBUTE_WITH_CONNNECTIONS
targets = primProperty.GetConnections()
else:
typeContent = PropertyViewIcons.ATTRIBUTE()
typeRole = PropertyViewDataRoles.ATTRIBUTE
elif isinstance(primProperty, ResolvedBoundMaterial):
typeContent = PropertyViewIcons.COMPOSED()
typeRole = PropertyViewDataRoles.RELATIONSHIP_WITH_TARGETS
elif isinstance(primProperty, CustomAttribute):
typeContent = PropertyViewIcons.COMPOSED()
typeRole = PropertyViewDataRoles.COMPOSED
elif isinstance(primProperty, Usd.Relationship):
# Otherwise we have a relationship
targets = primProperty.GetTargets()
if targets:
typeContent = PropertyViewIcons.RELATIONSHIP_WITH_TARGETS()
typeRole = PropertyViewDataRoles.RELATIONSHIP_WITH_TARGETS
else:
typeContent = PropertyViewIcons.RELATIONSHIP()
typeRole = PropertyViewDataRoles.RELATIONSHIP
else:
PrintWarning("Property '%s' has unknown property type <%s>." %
(key, type(primProperty)))
continue
attrText = GetShortString(primProperty, frame)
treeWidget.addTopLevelItem(
QtWidgets.QTreeWidgetItem(["", str(key), attrText]))
treeWidget.topLevelItem(currRow).setIcon(PropertyViewIndex.TYPE,
typeContent)
treeWidget.topLevelItem(currRow).setData(PropertyViewIndex.TYPE,
QtCore.Qt.ItemDataRole.WhatsThisRole,
typeRole)
currItem = treeWidget.topLevelItem(currRow)
valTextFont = GetPropertyTextFont(primProperty, frame)
if valTextFont:
currItem.setFont(PropertyViewIndex.VALUE, valTextFont)
currItem.setFont(PropertyViewIndex.NAME, valTextFont)
else:
currItem.setFont(PropertyViewIndex.NAME, UIFonts.BOLD)
fgColor = GetPropertyColor(primProperty, frame)
# Inherited properties are colored 15% darker, along with the
# addition of "(i)" in the type column.
if isInheritedProperty:
# Add "(i)" to the type column to indicate an inherited
# property.
treeWidget.topLevelItem(currRow).setText(PropertyViewIndex.TYPE,
"(i)")
fgColor = fgColor.darker(115)
currItem.setFont(PropertyViewIndex.TYPE, UIFonts.INHERITED)
currItem.setForeground(PropertyViewIndex.NAME, fgColor)
currItem.setForeground(PropertyViewIndex.VALUE, fgColor)
if targets:
childRow = 0
for t in targets:
valTextFont = GetPropertyTextFont(primProperty, frame) or \
UIFonts.BOLD
# USD does not provide or infer values for relationship or
# connection targets, so we don't display them here.
currItem.addChild(
QtWidgets.QTreeWidgetItem(["", str(t), ""]))
currItem.setFont(PropertyViewIndex.VALUE, valTextFont)
child = currItem.child(childRow)
if typeRole == PropertyViewDataRoles.RELATIONSHIP_WITH_TARGETS:
child.setIcon(PropertyViewIndex.TYPE,
PropertyViewIcons.TARGET())
child.setData(PropertyViewIndex.TYPE,
QtCore.Qt.ItemDataRole.WhatsThisRole,
PropertyViewDataRoles.TARGET)
else:
child.setIcon(PropertyViewIndex.TYPE,
PropertyViewIcons.CONNECTION())
child.setData(PropertyViewIndex.TYPE,
QtCore.Qt.ItemDataRole.WhatsThisRole,
PropertyViewDataRoles.CONNECTION)
childRow += 1
currRow += 1
self._updatePropertyViewSelection()
# For some reason, resetting the scrollbar position here only works on a
# frame change, not when the prim changes. When the prim changes, the
# scrollbar always stays at the top of the list and setValue() has no
# effect.
treeWidget.verticalScrollBar().setValue(scrollPosition)
def _updatePropertyView(self):
""" Sets the contents of the attribute value viewer """
cursorOverride = not self._timer.isActive()
if cursorOverride:
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.BusyCursor)
try:
self._updatePropertyViewInternal()
except Exception as err:
print "Problem encountered updating attribute view: %s" % err
raise
finally:
if cursorOverride:
QtWidgets.QApplication.restoreOverrideCursor()
def _getSelectedObject(self):
focusPrim = self._dataModel.selection.getFocusPrim()
attrs = self._ui.propertyView.selectedItems()
if len(attrs) == 0:
return focusPrim
selectedAttribute = attrs[0]
attrName = str(selectedAttribute.text(PropertyViewIndex.NAME))
if PropTreeWidgetTypeIsRel(selectedAttribute):
return focusPrim.GetRelationship(attrName)
obj = focusPrim.GetAttribute(attrName)
if not obj:
# Check if it is an inherited primvar.
inheritedPrimvar = UsdGeom.PrimvarsAPI(
focusPrim).FindPrimvarWithInheritance(attrName)
if inheritedPrimvar:
obj = inheritedPrimvar.GetAttr()
return obj
def _findIndentPos(self, s):
for index, char in enumerate(s):
if char != ' ':
return index
return len(s) - 1
def _maxToolTipWidth(self):
return 90
def _maxToolTipHeight(self):
return 32
def _trimWidth(self, s, isList=False):
# We special-case the display offset because list
# items will have </li> tags embedded in them.
offset = 10 if isList else 5
if len(s) >= self._maxToolTipWidth():
# For strings, well do special ellipsis behavior
# which displays the last 5 chars with an ellipsis
# in between. For other values, we simply display a
# trailing ellipsis to indicate more data.
if s[0] == '\'' and s[-1] == '\'':
return (s[:self._maxToolTipWidth() - offset]
+ '...'
+ s[len(s) - offset:])
else:
return s[:self._maxToolTipWidth()] + '...'
return s
def _limitToolTipSize(self, s, isList=False):
ttStr = ''
lines = s.split('<br>')
for index, line in enumerate(lines):
if index+1 > self._maxToolTipHeight():
break
ttStr += self._trimWidth(line, isList)
if not isList and index != len(lines)-1:
ttStr += '<br>'
if (len(lines) > self._maxToolTipHeight()):
ellipsis = ' '*self._findIndentPos(line) + '...'
if isList:
ellipsis = '<li>' + ellipsis + '</li>'
else:
ellipsis += '<br>'
ttStr += ellipsis
ttStr += self._trimWidth(lines[len(lines)-2], isList)
return ttStr
def _addRichTextIndicators(self, s):
# - We'll need to use html-style spaces to ensure they are respected
# in the toolTip which uses richtext formatting.
# - We wrap the tooltip as a paragraph to ensure 's are
# respected by Qt's rendering engine.
return '<p>' + s.replace(' ', ' ') + '</p>'
def _limitValueDisplaySize(self, s):
maxValueChars = 300
return s[:maxValueChars]
def _cleanStr(self, s, repl):
# Remove redundant char seqs and strip newlines.
replaced = str(s).replace('\n', repl)
filtered = [u for (u, _) in groupby(replaced.split())]
return ' '.join(filtered)
def _formatMetadataValueView(self, val):
from pprint import pformat, pprint
valStr = self._cleanStr(val, ' ')
ttStr = ''
isList = False
# For iterable things, like VtArrays and lists, we want to print
# a nice numbered list.
if isinstance(val, list) or getattr(val, "_isVtArray", False):
isList = True
# We manually supply the index for our list elements
# because Qt's richtext processor starts the <ol> numbering at 1.
for index, value in enumerate(val):
last = len(val) - 1
trimmed = self._cleanStr(value, ' ')
ttStr += ("<li>" + str(index) + ": " + trimmed + "</li><br>")
elif isinstance(val, dict):
# We stringify all dict elements so they display more nicely.
# For example, by default, the pprint operation would print a
# Vt Array as Vt.Array(N, (E1, ....). By running it through
# str(..). we'd get [(E1, E2), ....] which is more useful to
# the end user trying to examine their data.
for k, v in val.items():
val[k] = str(v)
# We'll need to strip the quotes generated by the str' operation above
stripQuotes = lambda s: s.replace('\'', '').replace('\"', "")
valStr = stripQuotes(self._cleanStr(val, ' '))
formattedDict = pformat(val)
formattedDictLines = formattedDict.split('\n')
for index, line in enumerate(formattedDictLines):
ttStr += (stripQuotes(line)
+ ('' if index == len(formattedDictLines) - 1 else '<br>'))
else:
ttStr = self._cleanStr(val, '<br>')
valStr = self._limitValueDisplaySize(valStr)
ttStr = self._addRichTextIndicators(
self._limitToolTipSize(ttStr, isList))
return valStr, ttStr
def _updateMetadataView(self, obj=None):
""" Sets the contents of the metadata viewer"""
# XXX: this method gets called multiple times on selection, it
# would be nice to clean that up and ensure we only update as needed.
tableWidget = self._ui.metadataView
self._propertiesDict = self._getPropertiesDict()
# Setup table widget
tableWidget.clearContents()
tableWidget.setRowCount(0)
if obj is None:
obj = self._getSelectedObject()
if not obj:
return
m = obj.GetAllMetadata()
# We have to explicitly add in metadata related to composition arcs
# and value clips here, since GetAllMetadata prunes them out.
#
# XXX: Would be nice to have some official facility to query
# this.
compKeys = [# composition related metadata
"references", "inheritPaths", "specializes",
"payload", "subLayers",
# non-template clip metadata
"clipAssetPaths", "clipTimes", "clipManifestAssetPath",
"clipActive", "clipPrimPath",
# template clip metadata
"clipTemplateAssetPath",
"clipTemplateStartTime", "clipTemplateEndTime",
"clipTemplateStride"]
for k in compKeys:
v = obj.GetMetadata(k)
if not v is None:
m[k] = v
m["[object type]"] = "Attribute" if type(obj) is Usd.Attribute \
else "Prim" if type(obj) is Usd.Prim \
else "Relationship" if type(obj) is Usd.Relationship \
else "Unknown"
m["[path]"] = str(obj.GetPath())
clipMetadata = obj.GetMetadata("clips")
if clipMetadata is None:
clipMetadata = {}
numClipRows = 0
for (clip, data) in clipMetadata.items():
numClipRows += len(data)
m["clips"] = clipMetadata
numMetadataRows = (len(m) - 1) + numClipRows
variantSets = {}
if (isinstance(obj, Usd.Prim)):
variantSetNames = obj.GetVariantSets().GetNames()
for variantSetName in variantSetNames:
variantSet = obj.GetVariantSet(variantSetName)
variantNames = variantSet.GetVariantNames()
variantSelection = variantSet.GetVariantSelection()
combo = VariantComboBox(None, obj, variantSetName, self._mainWindow)
# First index is always empty to indicate no (or invalid)
# variant selection.
combo.addItem('')
for variantName in variantNames:
combo.addItem(variantName)
indexToSelect = combo.findText(variantSelection)
combo.setCurrentIndex(indexToSelect)
variantSets[variantSetName] = combo
tableWidget.setRowCount(numMetadataRows + len(variantSets))
rowIndex = 0
for key in sorted(m.keys()):
if key == "clips":
for (clip, metadataGroup) in m[key].items():
attrName = QtWidgets.QTableWidgetItem(str('clip:' + clip))
tableWidget.setItem(rowIndex, 0, attrName)
for metadata in metadataGroup.keys():
dataPair = (metadata, metadataGroup[metadata])
valStr, ttStr = self._formatMetadataValueView(dataPair)
attrVal = QtWidgets.QTableWidgetItem(valStr)
attrVal.setToolTip(ttStr)
tableWidget.setItem(rowIndex, 1, attrVal)
rowIndex += 1
else:
attrName = QtWidgets.QTableWidgetItem(str(key))
tableWidget.setItem(rowIndex, 0, attrName)
# Get metadata value
if key == "customData":
val = obj.GetCustomData()
else:
val = m[key]
valStr, ttStr = self._formatMetadataValueView(val)
attrVal = QtWidgets.QTableWidgetItem(valStr)
attrVal.setToolTip(ttStr)
tableWidget.setItem(rowIndex, 1, attrVal)
rowIndex += 1
for variantSetName, combo in variantSets.iteritems():
attrName = QtWidgets.QTableWidgetItem(str(variantSetName+ ' variant'))
tableWidget.setItem(rowIndex, 0, attrName)
tableWidget.setCellWidget(rowIndex, 1, combo)
combo.currentIndexChanged.connect(
lambda i, combo=combo: combo.updateVariantSelection(
i, self._printTiming))
rowIndex += 1
tableWidget.resizeColumnToContents(0)
def _updateCompositionView(self, obj=None):
""" Sets the contents of the composition tree view"""
treeWidget = self._ui.compositionTreeWidget
treeWidget.clear()
# Update current spec & current layer, and push those updates
# to the python console
self._onCompositionSelectionChanged()
# If no prim or attribute selected, nothing to show.
if obj is None:
obj = self._getSelectedObject()
if not obj:
return
# For brevity, we display only the basename of layer paths.
def LabelForLayer(l):
return ('~session~' if l == self._dataModel.stage.GetSessionLayer()
else l.GetDisplayName())
# Create treeview items for all sublayers in the layer tree.
def WalkSublayers(parent, node, layerTree, sublayer=False):
layer = layerTree.layer
spec = layer.GetObjectAtPath(node.path)
item = QtWidgets.QTreeWidgetItem(
parent,
[
LabelForLayer(layer),
'sublayer' if sublayer else node.arcType.displayName,
str(node.GetPathAtIntroduction()),
'yes' if bool(spec) else 'no'
] )
# attributes for selection:
item.layer = layer
item.spec = spec
item.identifier = layer.identifier
# attributes for LayerStackContextMenu:
if layer.realPath:
item.layerPath = layer.realPath
if spec:
item.path = node.path
item.setExpanded(True)
item.setToolTip(0, layer.identifier)
if not spec:
for i in range(item.columnCount()):
item.setForeground(i, UIPropertyValueSourceColors.NONE)
for subtree in layerTree.childTrees:
WalkSublayers(item, node, subtree, True)
return item
# Create treeview items for all nodes in the composition index.
def WalkNodes(parent, node):
nodeItem = WalkSublayers(parent, node, node.layerStack.layerTree)
for child in node.children:
WalkNodes(nodeItem, child)
path = obj.GetPath().GetAbsoluteRootOrPrimPath()
prim = self._dataModel.stage.GetPrimAtPath(path)
if not prim:
return
# Populate the treeview with items from the prim index.
index = prim.GetPrimIndex()
if index.IsValid():
WalkNodes(treeWidget, index.rootNode)
def _updateLayerStackView(self, obj=None):
""" Sets the contents of the layer stack viewer"""
tableWidget = self._ui.layerStackView
# Setup table widget
tableWidget.clearContents()
tableWidget.setRowCount(0)
if obj is None:
obj = self._getSelectedObject()
if not obj:
return
path = obj.GetPath()
# The pseudoroot is different enough from prims and properties that
# it makes more sense to process it separately
if path == Sdf.Path.absoluteRootPath:
layers = GetRootLayerStackInfo(
self._dataModel.stage.GetRootLayer())
tableWidget.setColumnCount(2)
tableWidget.horizontalHeaderItem(1).setText('Layer Offset')
tableWidget.setRowCount(len(layers))
for i, layer in enumerate(layers):
layerItem = QtWidgets.QTableWidgetItem(layer.GetHierarchicalDisplayString())
layerItem.layerPath = layer.layer.realPath
layerItem.identifier = layer.layer.identifier
toolTip = "<b>identifier:</b> @%s@ <br> <b>resolved path:</b> %s" % \
(layer.layer.identifier, layerItem.layerPath)
toolTip = self._limitToolTipSize(toolTip)
layerItem.setToolTip(toolTip)
tableWidget.setItem(i, 0, layerItem)
offsetItem = QtWidgets.QTableWidgetItem(layer.GetOffsetString())
offsetItem.layerPath = layer.layer.realPath
offsetItem.identifier = layer.layer.identifier
toolTip = self._limitToolTipSize(str(layer.offset))
offsetItem.setToolTip(toolTip)
tableWidget.setItem(i, 1, offsetItem)
tableWidget.resizeColumnToContents(0)
else:
specs = []
tableWidget.setColumnCount(3)
header = tableWidget.horizontalHeader()
header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)
header.setSectionResizeMode(2, QtWidgets.QHeaderView.ResizeToContents)
tableWidget.horizontalHeaderItem(1).setText('Path')
if path.IsPropertyPath():
prop = obj.GetPrim().GetProperty(path.name)
specs = prop.GetPropertyStack(self._dataModel.currentFrame)
c3 = "Value" if (len(specs) == 0 or
isinstance(specs[0], Sdf.AttributeSpec)) else "Target Paths"
tableWidget.setHorizontalHeaderItem(2,
QtWidgets.QTableWidgetItem(c3))
else:
specs = obj.GetPrim().GetPrimStack()
tableWidget.setHorizontalHeaderItem(2,
QtWidgets.QTableWidgetItem('Metadata'))
tableWidget.setRowCount(len(specs))
for i, spec in enumerate(specs):
layerItem = QtWidgets.QTableWidgetItem(spec.layer.GetDisplayName())
layerItem.setToolTip(self._limitToolTipSize(spec.layer.realPath))
tableWidget.setItem(i, 0, layerItem)
pathItem = QtWidgets.QTableWidgetItem(spec.path.pathString)
pathItem.setToolTip(self._limitToolTipSize(spec.path.pathString))
tableWidget.setItem(i, 1, pathItem)
if path.IsPropertyPath():
valStr = GetShortString(
spec, self._dataModel.currentFrame)
ttStr = valStr
valueItem = QtWidgets.QTableWidgetItem(valStr)
sampleBased = (spec.HasInfo('timeSamples') and
spec.layer.GetNumTimeSamplesForPath(path) != -1)
valueItemColor = (UIPropertyValueSourceColors.TIME_SAMPLE if
sampleBased else UIPropertyValueSourceColors.DEFAULT)
valueItem.setForeground(valueItemColor)
valueItem.setToolTip(ttStr)
else:
metadataKeys = spec.GetMetaDataInfoKeys()
metadataDict = {}
for mykey in metadataKeys:
if spec.HasInfo(mykey):
metadataDict[mykey] = spec.GetInfo(mykey)
valStr, ttStr = self._formatMetadataValueView(metadataDict)
valueItem = QtWidgets.QTableWidgetItem(valStr)
valueItem.setToolTip(ttStr)
tableWidget.setItem(i, 2, valueItem)
# Add the data the context menu needs
for j in range(3):
item = tableWidget.item(i, j)
item.layerPath = spec.layer.realPath
item.path = spec.path.pathString
item.identifier = spec.layer.identifier
def _isHUDVisible(self):
"""Checks if the upper HUD is visible by looking at the global HUD
visibility menu as well as the 'Subtree Info' menu"""
return self._dataModel.viewSettings.showHUD and self._dataModel.viewSettings.showHUD_Info
def _updateCameraMaskMenu(self):
if self._ui.actionCameraMask_Full.isChecked():
self._dataModel.viewSettings.cameraMaskMode = CameraMaskModes.FULL
elif self._ui.actionCameraMask_Partial.isChecked():
self._dataModel.viewSettings.cameraMaskMode = CameraMaskModes.PARTIAL
else:
self._dataModel.viewSettings.cameraMaskMode = CameraMaskModes.NONE
def _updateCameraMaskOutlineMenu(self):
self._dataModel.viewSettings.showMask_Outline = (
self._ui.actionCameraMask_Outline.isChecked())
def _pickCameraMaskColor(self):
QtWidgets.QColorDialog.setCustomColor(0, 0xFF000000)
QtWidgets.QColorDialog.setCustomColor(1, 0xFF808080)
color = QtWidgets.QColorDialog.getColor()
color = (
color.redF(),
color.greenF(),
color.blueF(),
color.alphaF()
)
self._dataModel.viewSettings.cameraMaskColor = color
def _updateCameraReticlesInsideMenu(self):
self._dataModel.viewSettings.showReticles_Inside = (
self._ui.actionCameraReticles_Inside.isChecked())
def _updateCameraReticlesOutsideMenu(self):
self._dataModel.viewSettings.showReticles_Outside = (
self._ui.actionCameraReticles_Outside.isChecked())
def _pickCameraReticlesColor(self):
QtWidgets.QColorDialog.setCustomColor(0, 0xFF000000)
QtWidgets.QColorDialog.setCustomColor(1, 0xFF0080FF)
color = QtWidgets.QColorDialog.getColor()
color = (
color.redF(),
color.greenF(),
color.blueF(),
color.alphaF()
)
self._dataModel.viewSettings.cameraReticlesColor = color
def _showHUDChanged(self):
self._dataModel.viewSettings.showHUD = self._ui.actionHUD.isChecked()
def _showHUD_InfoChanged(self):
self._dataModel.viewSettings.showHUD_Info = (
self._ui.actionHUD_Info.isChecked())
def _showHUD_ComplexityChanged(self):
self._dataModel.viewSettings.showHUD_Complexity = (
self._ui.actionHUD_Complexity.isChecked())
def _showHUD_PerformanceChanged(self):
self._dataModel.viewSettings.showHUD_Performance = (
self._ui.actionHUD_Performance.isChecked())
def _showHUD_GPUstatsChanged(self):
self._dataModel.viewSettings.showHUD_GPUstats = (
self._ui.actionHUD_GPUstats.isChecked())
def _getHUDStatKeys(self):
''' returns the keys of the HUD with PRIM and NOTYPE and the top and
CV, VERT, and FACE at the bottom.'''
keys = [k for k in self._upperHUDInfo.keys() if k not in (
HUDEntries.CV, HUDEntries.VERT, HUDEntries.FACE, HUDEntries.PRIM, HUDEntries.NOTYPE)]
keys = [HUDEntries.PRIM, HUDEntries.NOTYPE] + keys + [HUDEntries.CV, HUDEntries.VERT, HUDEntries.FACE]
return keys
def _updateHUDPrimStats(self):
"""update the upper HUD with the proper prim information"""
self._upperHUDInfo = dict()
if self._isHUDVisible():
currentPaths = [n.GetPath()
for n in self._dataModel.selection.getLCDPrims()
if n.IsActive()]
for pth in currentPaths:
count,types = self._tallyPrimStats(
self._dataModel.stage.GetPrimAtPath(pth))
# no entry for Prim counts? initilize it
if not self._upperHUDInfo.has_key(HUDEntries.PRIM):
self._upperHUDInfo[HUDEntries.PRIM] = 0
self._upperHUDInfo[HUDEntries.PRIM] += count
for type in types.iterkeys():
# no entry for this prim type? initilize it
if not self._upperHUDInfo.has_key(type):
self._upperHUDInfo[type] = 0
self._upperHUDInfo[type] += types[type]
if self._stageView:
self._stageView.upperHUDInfo = self._upperHUDInfo
self._stageView.HUDStatKeys = self._getHUDStatKeys()
def _updateHUDGeomCounts(self):
"""updates the upper HUD with the right geom counts
calls _getGeomCounts() to get the info, which means it could be cached"""
if not self._isHUDVisible():
return
# we get multiple geom dicts, if we have multiple prims selected
geomDicts = [self._getGeomCounts(n, self._dataModel.currentFrame)
for n in self._dataModel.selection.getLCDPrims()]
for key in (HUDEntries.CV, HUDEntries.VERT, HUDEntries.FACE):
self._upperHUDInfo[key] = 0
for gDict in geomDicts:
self._upperHUDInfo[key] += gDict[key]
if self._stageView:
self._stageView.upperHUDInfo = self._upperHUDInfo
self._stageView.HUDStatKeys = self._getHUDStatKeys()
def _clearGeomCountsForPrimPath(self, primPath):
entriesToRemove = []
# Clear all entries whose prim is either an ancestor or a descendant
# of the given prim path.
for (p, frame) in self._geomCounts:
if (primPath.HasPrefix(p.GetPath()) or p.GetPath().HasPrefix(primPath)):
entriesToRemove.append((p, frame))
for entry in entriesToRemove:
del self._geomCounts[entry]
def _getGeomCounts( self, prim, frame ):
"""returns cached geom counts if available, or calls _calculateGeomCounts()"""
if not self._geomCounts.has_key((prim,frame)):
self._calculateGeomCounts( prim, frame )
return self._geomCounts[(prim,frame)]
def _accountForFlattening(self,shape):
"""Helper function for computing geomCounts"""
if len(shape) == 1:
return shape[0] / 3
else:
return shape[0]
def _calculateGeomCounts(self, prim, frame):
"""Computes the number of CVs, Verts, and Faces for each prim and each
frame in the stage (for use by the HUD)"""
# This is expensive enough that we should give the user feedback
# that something is happening...
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.BusyCursor)
try:
thisDict = {HUDEntries.CV: 0, HUDEntries.VERT: 0, HUDEntries.FACE: 0}
if prim.IsA(UsdGeom.Curves):
curves = UsdGeom.Curves(prim)
vertexCounts = curves.GetCurveVertexCountsAttr().Get(frame)
if vertexCounts is not None:
for count in vertexCounts:
thisDict[HUDEntries.CV] += count
elif prim.IsA(UsdGeom.Mesh):
mesh = UsdGeom.Mesh(prim)
faceVertexCount = mesh.GetFaceVertexCountsAttr().Get(frame)
faceVertexIndices = mesh.GetFaceVertexIndicesAttr().Get(frame)
if faceVertexCount is not None and faceVertexIndices is not None:
uniqueVerts = set(faceVertexIndices)
thisDict[HUDEntries.VERT] += len(uniqueVerts)
thisDict[HUDEntries.FACE] += len(faceVertexCount)
self._geomCounts[(prim,frame)] = thisDict
for child in prim.GetChildren():
childResult = self._getGeomCounts(child, frame)
for key in (HUDEntries.CV, HUDEntries.VERT, HUDEntries.FACE):
self._geomCounts[(prim,frame)][key] += childResult[key]
except Exception as err:
print "Error encountered while computing prim subtree HUD info: %s" % err
finally:
QtWidgets.QApplication.restoreOverrideCursor()
def _updateNavigationMenu(self):
"""Make the Navigation menu items enabled or disabled depending on the
selected prim."""
anyModels = False
anyBoundPreviewMaterials = False
anyBoundFullMaterials = False
for prim in self._dataModel.selection.getPrims():
if prim.IsA(UsdGeom.Imageable):
imageable = UsdGeom.Imageable(prim)
anyModels = anyModels or GetEnclosingModelPrim(prim) is not None
(previewMat,previewBindingRel) =\
UsdShade.MaterialBindingAPI(prim).ComputeBoundMaterial(
materialPurpose=UsdShade.Tokens.preview)
anyBoundPreviewMaterials |= bool(previewMat)
(fullMat,fullBindingRel) =\
UsdShade.MaterialBindingAPI(prim).ComputeBoundMaterial(
materialPurpose=UsdShade.Tokens.full)
anyBoundFullMaterials |= bool(fullMat)
self._ui.actionSelect_Model_Root.setEnabled(anyModels)
self._ui.actionSelect_Bound_Preview_Material.setEnabled(
anyBoundPreviewMaterials)
self._ui.actionSelect_Preview_Binding_Relationship.setEnabled(
anyBoundPreviewMaterials)
self._ui.actionSelect_Bound_Full_Material.setEnabled(
anyBoundFullMaterials)
self._ui.actionSelect_Full_Binding_Relationship.setEnabled(
anyBoundFullMaterials)
def _updateEditMenu(self):
"""Make the Edit Prim menu items enabled or disabled depending on the
selected prim."""
# Use the descendent-pruned selection set to avoid redundant
# traversal of the stage to answer isLoaded...
anyLoadable, unused = GetPrimsLoadability(
self._dataModel.selection.getLCDPrims())
removeEnabled = False
anyImageable = False
anyActive = False
anyInactive = False
for prim in self._dataModel.selection.getPrims():
if prim.IsA(UsdGeom.Imageable):
imageable = UsdGeom.Imageable(prim)
anyImageable = anyImageable or bool(imageable)
removeEnabled = removeEnabled or HasSessionVis(prim)
if prim.IsActive():
anyActive = True
else:
anyInactive = True
self._ui.actionRemove_Session_Visibility.setEnabled(removeEnabled)
self._ui.actionMake_Visible.setEnabled(anyImageable)
self._ui.actionVis_Only.setEnabled(anyImageable)
self._ui.actionMake_Invisible.setEnabled(anyImageable)
self._ui.actionLoad.setEnabled(anyLoadable)
self._ui.actionUnload.setEnabled(anyLoadable)
self._ui.actionActivate.setEnabled(anyInactive)
self._ui.actionDeactivate.setEnabled(anyActive)
def getSelectedItems(self):
return [self._primToItemMap[n]
for n in self._dataModel.selection.getPrims()
if n in self._primToItemMap]
def _getPrimFromPropString(self, p):
return self._dataModel.stage.GetPrimAtPath(p.split('.')[0])
def visSelectedPrims(self):
with BusyContext():
for item in self.getSelectedItems():
item.makeVisible()
self.editComplete('Made selected prims visible')
# makeVisible may cause aunt and uncle prims' authored vis
# to change, so we need to fix up the whole shebang
self._resetPrimViewVis(selItemsOnly=False)
def visOnlySelectedPrims(self):
with BusyContext():
ResetSessionVisibility(self._dataModel.stage)
InvisRootPrims(self._dataModel.stage)
for item in self.getSelectedItems():
item.makeVisible()
self.editComplete('Made ONLY selected prims visible')
# QTreeWidget does not honor setUpdatesEnabled, and updating
# the Vis column for all widgets is pathologically slow.
# It is sadly much much faster to regenerate the entire view
self._resetPrimView()
def invisSelectedPrims(self):
with BusyContext():
for item in self.getSelectedItems():
item.setVisible(False)
self.editComplete('Made selected prims invisible')
self._resetPrimViewVis()
def removeVisSelectedPrims(self):
with BusyContext():
for item in self.getSelectedItems():
item.removeVisibility()
self.editComplete("Removed selected prims' visibility opinions")
self._resetPrimViewVis()
def resetSessionVisibility(self):
with BusyContext():
ResetSessionVisibility(self._dataModel.stage)
self.editComplete('Removed ALL session visibility opinions.')
# QTreeWidget does not honor setUpdatesEnabled, and updating
# the Vis column for all widgets is pathologically slow.
# It is sadly much much faster to regenerate the entire view
self._resetPrimView()
def _setSelectedPrimsActivation(self, active):
"""Activate or deactivate all selected prims."""
with BusyContext():
# We can only activate/deactivate prims which are not in a master.
paths = []
for item in self.getSelectedItems():
if item.prim.IsPseudoRoot():
print("WARNING: Cannot change activation of pseudoroot.")
elif item.isInMaster:
print("WARNING: The prim <" + str(item.prim.GetPrimPath()) +
"> is in a master. Cannot change activation.")
else:
paths.append(item.prim.GetPrimPath())
# If we are deactivating prims, clear the selection so it doesn't
# hold onto paths from inactive prims.
if not active:
self._dataModel.selection.clear()
# If we try to deactivate prims one at a time in Usd, some may have
# become invalid by the time we get to them. Instead, we set the
# active state all at once through Sdf.
layer = self._dataModel.stage.GetEditTarget().GetLayer()
with Sdf.ChangeBlock():
for path in paths:
sdfPrim = Sdf.CreatePrimInLayer(layer, path)
sdfPrim.active = active
pathNames = ", ".join(path.name for path in paths)
if active:
self.editComplete("Activated {}.".format(pathNames))
else:
self.editComplete("Deactivated {}.".format(pathNames))
def activateSelectedPrims(self):
self._setSelectedPrimsActivation(True)
def deactivateSelectedPrims(self):
self._setSelectedPrimsActivation(False)
def loadSelectedPrims(self):
with BusyContext():
primNames=[]
for item in self.getSelectedItems():
item.setLoaded(True)
primNames.append(item.name)
self.editComplete("Loaded %s." % primNames)
def unloadSelectedPrims(self):
with BusyContext():
primNames=[]
for item in self.getSelectedItems():
item.setLoaded(False)
primNames.append(item.name)
self.editComplete("Unloaded %s." % primNames)
def onStageViewMouseDrag(self):
return
def onPrimSelected(self, path, instanceIndex, point, button, modifiers):
# Ignoring middle button until we have something
# meaningfully different for it to do
if button in [QtCore.Qt.LeftButton, QtCore.Qt.RightButton]:
# Expected context-menu behavior is that even with no
# modifiers, if we are activating on something already selected,
# do not change the selection
doContext = (button == QtCore.Qt.RightButton and path
and path != Sdf.Path.emptyPath)
doSelection = True
if doContext:
for selPrim in self._dataModel.selection.getPrims():
selPath = selPrim.GetPath()
if (selPath != Sdf.Path.absoluteRootPath and
path.HasPrefix(selPath)):
doSelection = False
break
if doSelection:
self._dataModel.selection.setPoint(point)
shiftPressed = modifiers & QtCore.Qt.ShiftModifier
ctrlPressed = modifiers & QtCore.Qt.ControlModifier
if path != Sdf.Path.emptyPath:
prim = self._dataModel.stage.GetPrimAtPath(path)
if self._dataModel.viewSettings.pickMode == PickModes.MODELS:
if prim.IsModel():
model = prim
else:
model = GetEnclosingModelPrim(prim)
if model:
prim = model
if self._dataModel.viewSettings.pickMode != PickModes.INSTANCES:
instanceIndex = ALL_INSTANCES
instance = instanceIndex
if instanceIndex != ALL_INSTANCES:
instanceId = GetInstanceIdForIndex(prim, instanceIndex,
self._dataModel.currentFrame)
if instanceId is not None:
instance = instanceId
if shiftPressed:
# Clicking prim while holding shift adds it to the
# selection.
self._dataModel.selection.addPrim(prim, instance)
elif ctrlPressed:
# Clicking prim while holding ctrl toggles it in the
# selection.
self._dataModel.selection.togglePrim(prim, instance)
else:
# Clicking prim with no modifiers sets it as the
# selection.
self._dataModel.selection.switchToPrimPath(
prim.GetPath(), instance)
elif not shiftPressed and not ctrlPressed:
# Clicking the background with no modifiers clears the
# selection.
self._dataModel.selection.clear()
if doContext:
item = self._getItemAtPath(path)
self._showPrimContextMenu(item)
# context menu steals mouse release event from the StageView.
# We need to give it one so it can track its interaction
# mode properly
mrEvent = QtGui.QMouseEvent(QtCore.QEvent.MouseButtonRelease,
QtGui.QCursor.pos(),
QtCore.Qt.RightButton,
QtCore.Qt.MouseButtons(QtCore.Qt.RightButton),
QtCore.Qt.KeyboardModifiers())
QtWidgets.QApplication.sendEvent(self._stageView, mrEvent)
def onRollover(self, path, instanceIndex, modifiers):
prim = self._dataModel.stage.GetPrimAtPath(path)
if prim:
headerStr = ""
propertyStr = ""
materialStr = ""
aiStr = ""
vsStr = ""
model = GetEnclosingModelPrim(prim)
def _MakeModelRelativePath(path, model,
boldPrim=True, boldModel=False):
makeRelative = model and path.HasPrefix(model.GetPath())
if makeRelative:
path = path.MakeRelativePath(model.GetPath().GetParentPath())
pathParts = str(path).split('/')
if boldModel and makeRelative:
pathParts[0] = "<b>%s</b>" % pathParts[0]
if boldPrim:
pathParts[-1] = "<b>%s</b>" % pathParts[-1]
return '/'.join(pathParts)
def _HTMLEscape(s):
return s.replace('&', '&'). \
replace('<', '<'). \
replace('>', '>')
# First add in all model-related data, if present
if model:
groupPath = model.GetPath().GetParentPath()
# Make the model name and prim name bold.
primModelPath = _MakeModelRelativePath(prim.GetPath(),
model, True, True)
headerStr = "%s<br><nobr><small>in group:</small> %s</nobr>" % \
(str(primModelPath),str(groupPath))
# asset info, including computed creation date
mAPI = Usd.ModelAPI(model)
assetInfo = mAPI.GetAssetInfo()
aiStr = "<hr><b>assetInfo</b> for %s:" % model.GetName()
if assetInfo and len(assetInfo) > 0:
specs = model.GetPrimStack()
name, time, owner = GetAssetCreationTime(specs,
mAPI.GetAssetIdentifier())
for key, value in assetInfo.iteritems():
aiStr += "<br> -- <em>%s</em> : %s" % (key, _HTMLEscape(str(value)))
aiStr += "<br><em><small>%s created on %s by %s</small></em>" % \
(_HTMLEscape(name), _HTMLEscape(time),
_HTMLEscape(owner))
else:
aiStr += "<br><small><em>No assetInfo!</em></small>"
# variantSets are by no means required/expected, so if there
# are none, don't bother to declare so.
mVarSets = model.GetVariantSets()
setNames = mVarSets.GetNames()
if len(setNames) > 0:
vsStr = "<hr><b>variantSets</b> on %s:" % model.GetName()
for name in setNames:
sel = mVarSets.GetVariantSelection(name)
vsStr += "<br> -- <em>%s</em> = %s" % (name, sel)
else:
headerStr = _MakeModelRelativePath(path, None)
# Property info: advise about rare visibility and purpose conditions
img = UsdGeom.Imageable(prim)
propertyStr = "<hr><b>Property Summary for %s '%s':</b>" % \
(prim.GetTypeName(), prim.GetName())
# Now cherry pick "important" attrs... could do more, here
if img:
if img.GetVisibilityAttr().ValueMightBeTimeVarying():
propertyStr += "<br> -- <em>visibility</em> varies over time"
purpose = img.GetPurposeAttr().Get()
inheritedPurpose = img.ComputePurpose()
if inheritedPurpose != UsdGeom.Tokens.default_:
propertyStr += "<br> -- <em>purpose</em> is <b>%s</b>%s " %\
(inheritedPurpose, "" if purpose == inheritedPurpose \
else ", <small>(inherited)</small>")
gprim = UsdGeom.Gprim(prim)
if gprim:
ds = gprim.GetDoubleSidedAttr().Get()
orient = gprim.GetOrientationAttr().Get()
propertyStr += "<br> -- <em>doubleSided</em> = %s" % \
( "true" if ds else "false")
propertyStr += "<br> -- <em>orientation</em> = %s" % orient
ptBased = UsdGeom.PointBased(prim)
if ptBased:
# XXX WBN to not have to read points in to get array size
# XXX2 Should try to determine varying topology
points = ptBased.GetPointsAttr().Get(
self._dataModel.currentFrame)
propertyStr += "<br> -- %d points" % len(points)
mesh = UsdGeom.Mesh(prim)
if mesh:
propertyStr += "<br> -- <em>subdivisionScheme</em> = %s" %\
mesh.GetSubdivisionSchemeAttr().Get()
pi = UsdGeom.PointInstancer(prim)
if pi:
indices = pi.GetProtoIndicesAttr().Get(
self._dataModel.currentFrame)
propertyStr += "<br> -- <em>%d instances</em>" % len(indices)
protos = pi.GetPrototypesRel().GetForwardedTargets()
propertyStr += "<br> -- <em>%d unique prototypes</em>" % len(protos)
if instanceIndex >= 0 and instanceIndex < len(indices):
protoIndex = indices[instanceIndex]
if protoIndex < len(protos):
currProtoPath = protos[protoIndex]
# If, as is common, proto is beneath the PI,
# strip the PI's prefix for display
if currProtoPath.HasPrefix(path):
currProtoPath = currProtoPath.MakeRelativePath(path)
propertyStr += "<br> -- <em>instance of prototype <%s></em>" % str(currProtoPath)
# Material info - this IS expected
materialStr = "<hr><b>Material assignment:</b>"
materialAssigns = {}
materialAssigns['generic'] = (genericMat, genericBindingRel) = \
UsdShade.MaterialBindingAPI(prim).ComputeBoundMaterial(
materialPurpose=UsdShade.Tokens.allPurpose)
materialAssigns[UsdShade.Tokens.preview] = \
UsdShade.MaterialBindingAPI(prim).ComputeBoundMaterial(
materialPurpose=UsdShade.Tokens.preview)
materialAssigns[UsdShade.Tokens.full] = \
UsdShade.MaterialBindingAPI(prim).ComputeBoundMaterial(
materialPurpose=UsdShade.Tokens.full)
gotValidMaterial = False
for purpose, materialAssign in materialAssigns.iteritems():
(material, bindingRel) = materialAssign
if not material:
continue
gotValidMaterial = True
# skip specific purpose binding display if it is the same
# as the generic binding.
if purpose != 'generic' and bindingRel == genericBindingRel:
continue
# if the material is in the same model, make path
# model-relative
materialStr += "<br><em>%s</em>: %s" % (purpose,
_MakeModelRelativePath(material.GetPath(), model))
bindingRelPath = _MakeModelRelativePath(
bindingRel.GetPath(), model)
materialStr += "<br><small><em>Material binding "\
"relationship: %s</em></small>" % str(bindingRelPath)
if not gotValidMaterial:
materialStr += "<small><em>No assigned Material!</em></small>"
# Instance / master info, if this prim is a native instance, else
# instance index/id if it's from a PointInstancer
instanceStr = ""
if prim.IsInstance():
instanceStr = "<hr><b>Instancing:</b><br>"
instanceStr += "<nobr><small><em>Instance of master:</em></small> %s</nobr>" % \
str(prim.GetMaster().GetPath())
elif instanceIndex != -1:
instanceStr = "<hr><b>Instance Index:</b> %d" % instanceIndex
instanceId = GetInstanceIdForIndex(prim, instanceIndex,
self._dataModel.currentFrame)
if instanceId is not None:
instanceStr += "<br><b>Instance Id:</b> %d" % instanceId
# Then put it all together
tip = headerStr + propertyStr + materialStr + instanceStr + aiStr + vsStr
else:
tip = ""
QtWidgets.QToolTip.showText(QtGui.QCursor.pos(), tip, self._stageView)
def processNavKeyEvent(self, kpEvent):
# This method is a standin for a hotkey processor... for now greatly
# limited in scope, as we mostly use Qt's builtin hotkey dispatch.
# Since we want navigation keys to be hover-context-sensitive, we
# cannot use the native mechanism.
key = kpEvent.key()
if key == QtCore.Qt.Key_Right:
self._advanceFrame()
return True
elif key == QtCore.Qt.Key_Left:
self._retreatFrame()
return True
return False
def _viewSettingChanged(self):
self._refreshViewMenubar()
self._displayPurposeChanged()
self._HUDInfoChanged()
def _refreshViewMenubar(self):
"""Refresh the menubar actions associated with a view setting. This
includes updating checked/unchecked and enabled/disabled states for
actions and submenus to match the values in the ViewSettingsDataModel.
"""
self._refreshRenderModeMenu()
self._refreshColorCorrectionModeMenu()
self._refreshPickModeMenu()
self._refreshComplexityMenu()
self._refreshBBoxMenu()
self._refreshLightsMenu()
self._refreshClearColorsMenu()
self._refreshCameraMenu()
self._refreshCameraGuidesMenu()
self._refreshCameraMaskMenu()
self._refreshCameraReticlesMenu()
self._refreshDisplayPurposesMenu()
self._refreshViewMenu()
self._refreshHUDMenu()
self._refreshShowPrimMenu()
self._refreshRedrawOnScrub()
self._refreshRolloverPrimInfoMenu()
self._refreshSelectionHighlightingMenu()
self._refreshSelectionHighlightColorMenu()
def _refreshRenderModeMenu(self):
for action in self._renderModeActions:
action.setChecked(
str(action.text()) == self._dataModel.viewSettings.renderMode)
def _refreshColorCorrectionModeMenu(self):
for action in self._colorCorrectionActions:
action.setChecked(
str(action.text()) == self._dataModel.viewSettings.colorCorrectionMode)
def _refreshPickModeMenu(self):
for action in self._pickModeActions:
action.setChecked(
str(action.text()) == self._dataModel.viewSettings.pickMode)
def _refreshComplexityMenu(self):
complexityName = self._dataModel.viewSettings.complexity.name
for action in self._complexityActions:
action.setChecked(str(action.text()) == complexityName)
def _refreshBBoxMenu(self):
self._ui.showBBoxes.setChecked(self._dataModel.viewSettings.showBBoxes)
self._ui.showAABBox.setChecked(self._dataModel.viewSettings.showAABBox)
self._ui.showOBBox.setChecked(self._dataModel.viewSettings.showOBBox)
self._ui.showBBoxPlayback.setChecked(
self._dataModel.viewSettings.showBBoxPlayback)
def _refreshLightsMenu(self):
# lighting is not activated until a shaded mode is selected
self._ui.menuLights.setEnabled(self._dataModel.viewSettings.renderMode in ShadedRenderModes)
# three point lights not activated until ambient is deselected
self._ui.threePointLights.setEnabled(
not self._dataModel.viewSettings.ambientLightOnly)
self._ui.actionAmbient_Only.setChecked(
self._dataModel.viewSettings.ambientLightOnly)
self._ui.actionKey.setChecked(
self._dataModel.viewSettings.keyLightEnabled)
self._ui.actionFill.setChecked(
self._dataModel.viewSettings.fillLightEnabled)
self._ui.actionBack.setChecked(
self._dataModel.viewSettings.backLightEnabled)
def _refreshClearColorsMenu(self):
clearColorText = self._dataModel.viewSettings.clearColorText
for action in self._clearColorActions:
action.setChecked(str(action.text()) == clearColorText)
def _refreshCameraMenu(self):
cameraPath = self._dataModel.viewSettings.cameraPath
for action in self._ui.menuCamera.actions():
action.setChecked(action.data() == cameraPath)
def _refreshCameraGuidesMenu(self):
self._ui.actionDisplay_Camera_Oracles.setChecked(
self._dataModel.viewSettings.displayCameraOracles)
self._ui.actionCameraMask_Outline.setChecked(
self._dataModel.viewSettings.showMask_Outline)
def _refreshCameraMaskMenu(self):
viewSettings = self._dataModel.viewSettings
self._ui.actionCameraMask_Full.setChecked(
viewSettings.cameraMaskMode == CameraMaskModes.FULL)
self._ui.actionCameraMask_Partial.setChecked(
viewSettings.cameraMaskMode == CameraMaskModes.PARTIAL)
self._ui.actionCameraMask_None.setChecked(
viewSettings.cameraMaskMode == CameraMaskModes.NONE)
def _refreshCameraReticlesMenu(self):
self._ui.actionCameraReticles_Inside.setChecked(
self._dataModel.viewSettings.showReticles_Inside)
self._ui.actionCameraReticles_Outside.setChecked(
self._dataModel.viewSettings.showReticles_Outside)
def _refreshDisplayPurposesMenu(self):
self._ui.actionDisplay_Guide.setChecked(
self._dataModel.viewSettings.displayGuide)
self._ui.actionDisplay_Proxy.setChecked(
self._dataModel.viewSettings.displayProxy)
self._ui.actionDisplay_Render.setChecked(
self._dataModel.viewSettings.displayRender)
def _refreshViewMenu(self):
self._ui.actionEnable_Scene_Materials.setChecked(
self._dataModel.viewSettings.enableSceneMaterials)
self._ui.actionDisplay_PrimId.setChecked(
self._dataModel.viewSettings.displayPrimId)
self._ui.actionCull_Backfaces.setChecked(
self._dataModel.viewSettings.cullBackfaces)
self._ui.actionAuto_Compute_Clipping_Planes.setChecked(
self._dataModel.viewSettings.autoComputeClippingPlanes)
def _refreshHUDMenu(self):
self._ui.actionHUD.setChecked(self._dataModel.viewSettings.showHUD)
self._ui.actionHUD_Info.setChecked(
self._dataModel.viewSettings.showHUD_Info)
self._ui.actionHUD_Complexity.setChecked(
self._dataModel.viewSettings.showHUD_Complexity)
self._ui.actionHUD_Performance.setChecked(
self._dataModel.viewSettings.showHUD_Performance)
self._ui.actionHUD_GPUstats.setChecked(
self._dataModel.viewSettings.showHUD_GPUstats)
def _refreshShowPrimMenu(self):
self._ui.actionShow_Inactive_Prims.setChecked(
self._dataModel.viewSettings.showInactivePrims)
self._ui.actionShow_All_Master_Prims.setChecked(
self._dataModel.viewSettings.showAllMasterPrims)
self._ui.actionShow_Undefined_Prims.setChecked(
self._dataModel.viewSettings.showUndefinedPrims)
self._ui.actionShow_Abstract_Prims.setChecked(
self._dataModel.viewSettings.showAbstractPrims)
def _refreshRedrawOnScrub(self):
self._ui.redrawOnScrub.setChecked(
self._dataModel.viewSettings.redrawOnScrub)
def _refreshRolloverPrimInfoMenu(self):
self._ui.actionRollover_Prim_Info.setChecked(
self._dataModel.viewSettings.rolloverPrimInfo)
def _refreshSelectionHighlightingMenu(self):
for action in self._selHighlightActions:
action.setChecked(
str(action.text())
== self._dataModel.viewSettings.selHighlightMode)
def _refreshSelectionHighlightColorMenu(self):
for action in self._selHighlightColorActions:
action.setChecked(
str(action.text())
== self._dataModel.viewSettings.highlightColorName)
def _displayPurposeChanged(self):
self._updatePropertyView()
if self._stageView:
self._stageView.updateBboxPurposes()
self._stageView.updateView()
def _HUDInfoChanged(self):
"""Called when a HUD setting that requires info refresh has changed."""
if self._isHUDVisible():
self._updateHUDPrimStats()
self._updateHUDGeomCounts()
def _onPrimsChanged(self, primsChange, propertiesChange):
"""Called when prims in the USD stage have changed."""
from rootDataModel import ChangeNotice
self._updateForStageChanges(
hasPrimResync=(primsChange==ChangeNotice.RESYNC))
|
[] |
[] |
[
"HD_DEFAULT_RENDERER",
"HD_ENABLED",
"USDVIEW_DEBUG",
"HOME"
] |
[]
|
["HD_DEFAULT_RENDERER", "HD_ENABLED", "USDVIEW_DEBUG", "HOME"]
|
python
| 4 | 0 | |
main.go
|
package main
import (
"errors"
"io"
"log"
"net"
"os"
"os/signal"
"syscall"
)
var (
optBind = os.Getenv("BIND")
optTarget = os.Getenv("TARGET")
)
func exit(err *error) {
if *err != nil {
log.Printf("exited with error: %s", (*err).Error())
os.Exit(1)
} else {
log.Println("exited")
}
}
func main() {
var err error
defer exit(&err)
if len(optBind) == 0 {
err = errors.New("$BIND not specified")
return
}
if len(optTarget) == 0 {
err = errors.New("$TARGET not specified")
return
}
if _, err = net.ResolveTCPAddr("tcp", optBind); err != nil {
return
}
if _, err = net.ResolveTCPAddr("tcp", optTarget); err != nil {
return
}
log.Printf("forward started, %s -> %s", optBind, optTarget)
var l net.Listener
if l, err = net.Listen("tcp", optBind); err != nil {
return
}
defer l.Close()
done := make(chan interface{})
go func() {
for {
var err error
var c net.Conn
if c, err = l.Accept(); err != nil {
log.Printf("failed to accept connection: %s", err.Error())
break
}
go handle(c)
}
close(done)
}()
sigCh := make(chan os.Signal)
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
select {
case <-done:
err = errors.New("listener closed unexpectedly")
case sig := <-sigCh:
log.Printf("signal received: %s", sig.String())
}
}
func handle(c net.Conn) {
var err error
defer c.Close()
var t net.Conn
if t, err = net.Dial("tcp", optTarget); err != nil {
log.Printf("failed to dial %s for %s: %s", optTarget, c.RemoteAddr().String(), err.Error())
return
}
defer t.Close()
go io.Copy(c, t)
io.Copy(t, c)
}
|
[
"\"BIND\"",
"\"TARGET\""
] |
[] |
[
"TARGET",
"BIND"
] |
[]
|
["TARGET", "BIND"]
|
go
| 2 | 0 | |
ktrain/tests/test_zzz_ner_v1.py
|
#!/usr/bin/env python3
"""
Tests of ktrain text classification flows
"""
import os
from unittest import TestCase, main, skip
import IPython
import numpy as np
import testenv
os.environ["DISABLE_V2_BEHAVIOR"] = "1"
import ktrain
from ktrain import text as txt
class TestNERClassification(TestCase):
def setUp(self):
TDATA = "conll2003/train.txt"
(trn, val, preproc) = txt.entities_from_txt(TDATA, use_char=True)
self.trn = trn
self.val = val
self.preproc = preproc
def test_ner(self):
wv_url = (
"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.en.300.vec.gz"
)
model = txt.sequence_tagger("bilstm-crf", self.preproc, wv_path_or_url=wv_url)
learner = ktrain.get_learner(
model, train_data=self.trn, val_data=self.val, batch_size=128
)
lr = 0.01
hist = learner.fit(lr, 1)
# test training results
# self.assertAlmostEqual(max(hist.history['lr']), lr)
self.assertGreater(learner.validate(), 0.65)
# test top losses
obs = learner.top_losses(n=1)
self.assertIn(obs[0][0], list(range(len(self.val.x))))
learner.view_top_losses(n=1)
# test weight decay
self.assertEqual(learner.get_weight_decay(), None)
learner.set_weight_decay(1e-2)
self.assertAlmostEqual(learner.get_weight_decay(), 1e-2)
# test load and save model
learner.save_model("/tmp/test_model")
learner.load_model("/tmp/test_model")
# test predictor
SENT = "There is a man named John Smith."
p = ktrain.get_predictor(learner.model, self.preproc)
self.assertEqual(p.predict(SENT)[-2][1], "I-PER")
p.save("/tmp/test_predictor")
p = ktrain.load_predictor("/tmp/test_predictor")
self.assertEqual(p.predict(SENT)[-2][1], "I-PER")
if __name__ == "__main__":
main()
|
[] |
[] |
[
"DISABLE_V2_BEHAVIOR"
] |
[]
|
["DISABLE_V2_BEHAVIOR"]
|
python
| 1 | 0 | |
Discovery/Simple/server/simple-server.go
|
package main
import (
"fmt"
"net/http"
"os"
"strconv"
consulapi "github.com/hashicorp/consul/api"
)
func main() {
registerServiceWithConsul()
fmt.Println("Starting Simple Server.")
http.HandleFunc("/info", info)
http.ListenAndServe(port(), nil)
}
func registerServiceWithConsul() {
config := consulapi.DefaultConfig()
consul, err := consulapi.NewClient(config)
if err != nil {
fmt.Println(err)
}
var registration = new(consulapi.AgentServiceRegistration)
registration.ID = "simple-server"
registration.Name = "simple-server"
address := hostname()
registration.Address = address
port, _ := strconv.Atoi(port()[1:len(port())])
registration.Port = port
registration.Check = new(consulapi.AgentServiceCheck)
registration.Check.HTTP = fmt.Sprintf("http://%s:%v/info", address, port)
registration.Check.Interval = "5s"
registration.Check.Timeout = "3s"
consul.Agent().ServiceRegister(registration)
}
func info(w http.ResponseWriter, r *http.Request) {
fmt.Println("The /info endpoint is being called...")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Hello Consul Discovery")
}
func port() string {
port := os.Getenv("PORT")
if len(port) == 0 {
port = "8080"
}
return ":" + port
}
func hostname() string {
hostname, _ := os.Hostname()
return hostname
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
mysql/mysql.go
|
package mysql
import (
"database/sql"
"fmt"
"os"
"strings"
"github.com/GannettDigital/go-newrelic-plugin/helpers"
"github.com/Sirupsen/logrus"
_ "github.com/go-sql-driver/mysql"
)
const NAME string = "mysql"
const PROVIDER string = "mysql"
const PROTOCOL_VERSION string = "1"
const PLUGIN_VERSION string = "1.0.0"
const STATUS string = "OK"
//mysqlConfig is the keeper of the config
type mysqlConfig struct {
host string
port string
user string
password string
database string
queries string
prefixes string
}
// InventoryData is the data type for inventory data produced by a plugin data
// source and emitted to the agent's inventory data store
type InventoryData map[string]interface{}
// MetricData is the data type for events produced by a plugin data source and
// emitted to the agent's metrics data store
type MetricData map[string]interface{}
// EventData is the data type for single shot events
type EventData map[string]interface{}
// PluginData defines the format of the output JSON that plugins will return
type PluginData struct {
Name string `json:"name"`
ProtocolVersion string `json:"protocol_version"`
PluginVersion string `json:"plugin_version"`
Status string `json:"status"`
Metrics []MetricData `json:"metrics"`
Inventory map[string]InventoryData `json:"inventory"`
Events []EventData `json:"events"`
}
var log *logrus.Logger
var config = mysqlConfig{
host: os.Getenv("HOST"),
port: os.Getenv("PORT"),
user: os.Getenv("USER"),
password: os.Getenv("PASSWORD"),
database: os.Getenv("DATABASE"),
queries: os.Getenv("QUERIES"),
prefixes: os.Getenv("PREFIXES"),
}
func Run(logger *logrus.Logger, prettyPrint bool, version string) {
log = logger
// Initialize the output structure
var data = PluginData{
Name: NAME,
PluginVersion: PLUGIN_VERSION,
ProtocolVersion: PROTOCOL_VERSION,
Status: STATUS,
Metrics: make([]MetricData, 0),
Inventory: make(map[string]InventoryData),
Events: make([]EventData, 0),
}
validateConfig()
db, err := sql.Open("mysql", generateDSN())
if err != nil {
log.WithError(err).Error(fmt.Sprintf("getMetric: Cannot connect to mysql %s:%s", config.host, config.port))
return
}
defer db.Close()
metric, err := getMetrics(db)
if err != nil {
data.Status = err.Error()
}
data.Metrics = append(data.Metrics, metric)
fatalIfErr(helpers.OutputJSON(data, prettyPrint), "OutputJSON error")
}
func getMetrics(db *sql.DB) (map[string]interface{}, error) {
metrics := map[string]interface{}{
"event_type": "DatastoreSample",
"provider": PROVIDER,
}
for _, query := range strings.Split(config.queries, ";") {
query = strings.TrimSpace(query)
if query == "" {
continue
}
rows, err := db.Query(query)
if err != nil {
log.WithError(err).Warn(" query; " + query)
continue
}
defer rows.Close()
cols, _ := rows.Columns()
rawResult := make([][]byte, len(cols))
result := make([]string, len(cols))
dest := make([]interface{}, len(cols)) // A temporary interface{} slice
for i, _ := range rawResult {
dest[i] = &rawResult[i] // Put pointers to each string in the interface slice
}
for rows.Next() {
err = rows.Scan(dest...)
if err != nil {
log.WithError(err).Warn(fmt.Sprintf("Failed to scan row. Query: %s", query))
continue
}
if len(rawResult) != 2 {
for i, raw := range rawResult {
if raw == nil {
result[i] = "\\N"
} else {
result[i] = string(raw)
}
}
log.Warn(fmt.Sprintf("Unknown query result: query %s result: %#v\n", query, result))
} else {
name := metricName(string(rawResult[0]))
metrics[name] = helpers.AsValue(string(rawResult[1]))
}
}
}
return metrics, nil
}
func metricName(metric string) string {
log.Debug(fmt.Sprintf("metricName: metric: %s", metric))
result := fmt.Sprintf("mysql.%s", helpers.CamelCase(fixPrefix(metric)))
log.Debug(fmt.Sprintf("metricName: result3: %s", result))
return result
}
func fixPrefix(src string) string {
for _, prefix := range strings.Split(config.prefixes, " ") {
if strings.HasPrefix(src, prefix) {
src = strings.Replace(src, "_", ".", 1)
return src
}
}
return src
}
func generateDSN() string {
dsn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", config.user, config.password, config.host, config.port, config.database)
log.Debug("generateDSN: %s", dsn)
return dsn
}
func validateConfig() {
if config.host == "" {
log.Fatal("Config Yaml is missing HOST value. Please check the config to continue")
}
if config.port == "" {
log.Fatal("Config Yaml is missing PORT value. Please check the config to continue")
}
if config.user == "" {
log.Fatal("Config Yaml is missing USER value. Please check the config to continue")
}
if config.password == "" {
log.Fatal("Config Yaml is missing PASSWORD value. Please check the config to continue")
}
if config.database == "" {
log.Fatal("Config Yaml is missing DATABASE value. Please check the config to continue")
}
if config.queries == "" {
log.Fatal("Config Yaml is missing QUERIES value. Please check the config to continue")
}
if config.prefixes == "" {
log.Fatal("Config Yaml is missing PREFIXES value. Please check the config to continue")
}
}
func fatalIfErr(err error, msg string) {
if err != nil {
log.WithError(err).Fatal(msg)
}
}
|
[
"\"HOST\"",
"\"PORT\"",
"\"USER\"",
"\"PASSWORD\"",
"\"DATABASE\"",
"\"QUERIES\"",
"\"PREFIXES\""
] |
[] |
[
"PORT",
"HOST",
"PASSWORD",
"PREFIXES",
"USER",
"DATABASE",
"QUERIES"
] |
[]
|
["PORT", "HOST", "PASSWORD", "PREFIXES", "USER", "DATABASE", "QUERIES"]
|
go
| 7 | 0 | |
pandablog/pandablog/wsgi.py
|
"""
WSGI config for pandablog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pandablog.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
server/__init__.py
|
from flask import Flask, url_for, redirect, render_template, request, make_response, session, jsonify
from flask_session import Session
from flask_caching import Cache
from dotenv import load_dotenv
import os
import spotipy
from spotipy.oauth2 import SpotifyOAuth
from spotipy.cache_handler import CacheFileHandler
import uuid
import re
from .middleware.login_required import login_required
from .lib import caches_folder, session_cache_path
load_dotenv()
app = Flask(__name__)
config = {
"SECRET_KEY": os.getenv('SERVER_SECRET'),
"SESSION_TYPE": 'filesystem',
"SESSION_FILE_DIR": '../.flask_session/',
"DEBUG": True,
"CACHE_TYPE": "FileSystemCache",
"CACHE_DEFAULT_TIMEOUT": 300,
"CACHE_DIR": "../.data-cache",
}
app.config.from_mapping(config)
Session(app)
cache = Cache(app)
if not os.path.exists(caches_folder):
os.makedirs(caches_folder)
@app.route("/")
def index():
if not session.get('uuid'):
# Step 1. Visitor is unknown, give random ID
session['uuid'] = str(uuid.uuid4())
cache_handler = CacheFileHandler(
cache_path=session_cache_path()
)
auth_manager = SpotifyOAuth(
scope='user-read-private,playlist-read-private',
cache_handler=cache_handler,
show_dialog=True
)
if request.args.get("code"):
# Step 3. Being redirected from Spotify auth page
auth_manager.get_access_token(request.args.get("code"))
return redirect(url_for('index'))
if not auth_manager.validate_token(cache_handler.get_cached_token()):
# Step 2. Display sign in link when no token
return render_template('index.html')
# Step 4. Signed in, display data
sp = spotipy.Spotify(auth_manager=auth_manager)
username = sp.me()['display_name']
return render_template('index.html', username=username)
@app.route("/login")
def login():
cache_handler = CacheFileHandler(
cache_path=session_cache_path()
)
auth_manager = SpotifyOAuth(cache_handler=cache_handler)
resp = redirect(auth_manager.get_authorize_url())
return resp
@app.route("/logout")
def logout():
try:
# Remove the CACHE file (.cache-test) so that a new user can authorize.
os.remove(session_cache_path())
session.clear()
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
return redirect(url_for('index'))
@cache.memoize(60 * 60 * 24) # cache for 24 hours
def user_playlist_tracks(cache_path, playlist_id):
cache_handler = CacheFileHandler(
cache_path=cache_path
)
auth_manager = SpotifyOAuth(cache_handler=cache_handler)
sp = spotipy.Spotify(auth_manager=auth_manager)
print('getting tracks for: ', playlist_id)
return sp.playlist_tracks(playlist_id=playlist_id)['items']
# API endpoints
@app.route('/search-playlists')
@login_required
def playlists(sp):
id = sp.me()['id']
playlists = sp.user_playlists(id)['items']
pattern = re.compile(request.args.get('track_name'), flags=re.IGNORECASE)
matched_tracks = []
for playlist in playlists:
tracks = user_playlist_tracks(session_cache_path(), playlist['id'])
raw_tracks = [track['track'] for track in tracks]
for raw_track in raw_tracks:
if raw_track != None and 'name' in raw_track and raw_track['name'] != None:
track_name = raw_track['name']
if pattern.search(track_name):
track = {
'name': track_name,
'artists': [artist['name'] for artist in raw_track['artists']]
}
matched_tracks.append(track)
return render_template('results.html', tracks=matched_tracks, headings=['track', 'artists'])
if __name__ == '__main__':
print('running in prod!')
app.run(
threaded=True,
port=int(os.environ.get(
"PORT",
os.environ.get("SPOTIPY_REDIRECT_URI", 8080).split(":")[-1])
)
)
|
[] |
[] |
[
"PORT",
"SERVER_SECRET",
"SPOTIPY_REDIRECT_URI"
] |
[]
|
["PORT", "SERVER_SECRET", "SPOTIPY_REDIRECT_URI"]
|
python
| 3 | 0 | |
pkg/jx/cmd/step_tag.go
|
package cmd
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/jenkins-x/jx/pkg/jx/cmd/templates"
"github.com/jenkins-x/jx/pkg/log"
"github.com/jenkins-x/jx/pkg/util"
"github.com/spf13/cobra"
"gopkg.in/AlecAivazis/survey.v1/terminal"
"k8s.io/helm/pkg/chartutil"
)
const (
VERSION = "version"
defaultVersionFile = "VERSION"
ValuesYamlRepositoryPrefix = " repository:"
ValuesYamlTagPrefix = " tag:"
)
// CreateClusterOptions the flags for running create cluster
type StepTagOptions struct {
StepOptions
Flags StepTagFlags
}
type StepTagFlags struct {
Version string
VersionFile string
ChartsDir string
ChartValueRepository string
}
var (
stepTagLong = templates.LongDesc(`
This pipeline step command creates a git tag using a version number prefixed with 'v' and pushes it to a
remote origin repo.
This commands effectively runs:
git commit -a -m "release $(VERSION)" --allow-empty
git tag -fa v$(VERSION) -m "Release version $(VERSION)"
git push origin v$(VERSION)
`)
stepTagExample = templates.Examples(`
jx step tag --version 1.0.0
`)
)
func NewCmdStepTag(f Factory, in terminal.FileReader, out terminal.FileWriter, errOut io.Writer) *cobra.Command {
options := StepTagOptions{
StepOptions: StepOptions{
CommonOptions: CommonOptions{
Factory: f,
In: in,
Out: out,
Err: errOut,
},
},
}
cmd := &cobra.Command{
Use: "tag",
Short: "Creates a git tag and pushes to remote repo",
Long: stepTagLong,
Example: stepTagExample,
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
CheckErr(err)
},
}
options.addCommonFlags(cmd)
cmd.Flags().StringVarP(&options.Flags.Version, VERSION, "v", "", "version number for the tag [required]")
cmd.Flags().StringVarP(&options.Flags.VersionFile, "version-file", "", defaultVersionFile, "The file name used to load the version number from if no '--version' option is specified")
cmd.Flags().StringVarP(&options.Flags.ChartsDir, "charts-dir", "d", "", "the directory of the chart to update the version")
cmd.Flags().StringVarP(&options.Flags.ChartValueRepository, "charts-value-repository", "r", "", "the fully qualified image name without the version tag. e.g. 'dockerregistry/myorg/myapp'")
return cmd
}
func (o *StepTagOptions) Run() error {
if o.Flags.Version == "" {
// lets see if its defined in the VERSION file
path := o.Flags.VersionFile
if path == "" {
path = "VERSION"
}
exists, err := util.FileExists(path)
if exists && err == nil {
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
o.Flags.Version = string(data)
}
}
if o.Flags.Version == "" {
return errors.New("No version flag")
}
if o.Verbose {
log.Infof("looking for charts folder...\n")
}
chartsDir := o.Flags.ChartsDir
if chartsDir == "" {
exists, err := util.FileExists(filepath.Join(chartsDir, "Chart.yaml"))
if !exists && err == nil {
// lets try find the charts/foo dir ignoring the charts/preview dir
chartsDir, err = o.findChartsDir()
if err != nil {
return err
}
}
}
if o.Verbose {
log.Infof("updating chart if it exists\n")
}
err := o.updateChart(o.Flags.Version, chartsDir)
if err != nil {
return err
}
err = o.updateChartValues(o.Flags.Version, chartsDir)
if err != nil {
return err
}
tag := "v" + o.Flags.Version
if o.Verbose {
log.Infof("performing git commit\n")
}
err = o.Git().AddCommit("", fmt.Sprintf("release %s", o.Flags.Version))
if err != nil {
return err
}
err = o.Git().CreateTag("", tag, fmt.Sprintf("release %s", o.Flags.Version))
if err != nil {
return err
}
if o.Verbose {
log.Infof("pushing git tag %s\n", tag)
}
err = o.Git().PushTag("", tag)
if err != nil {
return err
}
log.Successf("Tag %s created and pushed to remote origin", tag)
return nil
}
func (o *StepTagOptions) updateChart(version string, chartsDir string) error {
chartFile := filepath.Join(chartsDir, "Chart.yaml")
exists, err := util.FileExists(chartFile)
if err != nil {
return err
}
if !exists {
return nil
}
chart, err := chartutil.LoadChartfile(chartFile)
if err != nil {
return err
}
if chart.Version == version {
return nil
}
chart.Version = version
err = chartutil.SaveChartfile(chartFile, chart)
if err != nil {
return fmt.Errorf("Failed to save chart %s: %s", chartFile, err)
}
return nil
}
func (o *StepTagOptions) updateChartValues(version string, chartsDir string) error {
valuesFile := filepath.Join(chartsDir, "values.yaml")
exists, err := util.FileExists(valuesFile)
if err != nil {
return err
}
if !exists {
return nil
}
data, err := ioutil.ReadFile(valuesFile)
lines := strings.Split(string(data), "\n")
chartValueRepository := o.Flags.ChartValueRepository
if chartValueRepository == "" {
chartValueRepository = o.defaultChartValueRepository()
}
updated := false
for idx, line := range lines {
if chartValueRepository != "" && strings.HasPrefix(line, ValuesYamlRepositoryPrefix) {
updated = true
lines[idx] = ValuesYamlRepositoryPrefix + " " + chartValueRepository
} else if strings.HasPrefix(line, ValuesYamlTagPrefix) {
updated = true
lines[idx] = ValuesYamlTagPrefix + " " + version
}
}
if updated {
err = ioutil.WriteFile(valuesFile, []byte(strings.Join(lines, "\n")), DefaultWritePermissions)
if err != nil {
return fmt.Errorf("Failed to save chart file %s: %s", valuesFile, err)
}
}
return nil
}
func (o *StepTagOptions) defaultChartValueRepository() string {
dockerRegistry := os.Getenv("DOCKER_REGISTRY")
dockerRegistryOrg := os.Getenv("DOCKER_REGISTRY_ORG")
if dockerRegistryOrg == "" {
dockerRegistryOrg = os.Getenv("ORG")
}
appName := os.Getenv("APP_NAME")
if dockerRegistry != "" && dockerRegistryOrg != "" && appName != "" {
return dockerRegistry + "/" + dockerRegistryOrg + "/" + appName
}
return ""
}
// lets try find the charts dir
func (o *StepTagOptions) findChartsDir() (string, error) {
files, err := filepath.Glob("*/*/Chart.yaml")
if err != nil {
return "", fmt.Errorf("failed to find Chart.yaml file: %s", err)
}
if len(files) > 0 {
for _, file := range files {
paths := strings.Split(file, string(os.PathSeparator))
if len(paths) > 2 && paths[len(paths)-2] != "preview" {
dir, _ := filepath.Split(file)
return dir, nil
}
}
}
return "", nil
}
|
[
"\"DOCKER_REGISTRY\"",
"\"DOCKER_REGISTRY_ORG\"",
"\"ORG\"",
"\"APP_NAME\""
] |
[] |
[
"DOCKER_REGISTRY",
"ORG",
"DOCKER_REGISTRY_ORG",
"APP_NAME"
] |
[]
|
["DOCKER_REGISTRY", "ORG", "DOCKER_REGISTRY_ORG", "APP_NAME"]
|
go
| 4 | 0 | |
security/pkg/nodeagent/cache/secretcache_test.go
|
// Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"reflect"
"strconv"
"sync/atomic"
"testing"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"istio.io/istio/security/pkg/nodeagent/model"
"istio.io/istio/security/pkg/nodeagent/secretfetcher"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
)
var (
certchain, _ = ioutil.ReadFile("./testdata/cert-chain.pem")
mockCertChain1st = []string{"foo", "rootcert"}
mockCertChainRemain = []string{string(certchain)}
testResourceName = "default"
k8sKey = []byte("fake private k8sKey")
k8sCertChain = []byte("fake cert chain")
k8sCaCert = []byte("fake ca cert")
k8sGenericSecretName = "test-generic-scrt"
k8sTestGenericSecret = &v1.Secret{
Data: map[string][]byte{
"cert": k8sCertChain,
"key": k8sKey,
"cacert": k8sCaCert,
},
ObjectMeta: metav1.ObjectMeta{
Name: k8sGenericSecretName,
Namespace: "test-namespace",
},
Type: "test-generic-secret",
}
k8sTLSSecretName = "test-tls-scrt"
k8sTestTLSSecret = &v1.Secret{
Data: map[string][]byte{
"tls.crt": k8sCertChain,
"tls.key": k8sKey,
},
ObjectMeta: metav1.ObjectMeta{
Name: k8sTLSSecretName,
Namespace: "test-namespace",
},
Type: "test-tls-secret",
}
k8sTLSFallbackSecretName = "fallback-scrt"
k8sTLSFallbackSecretKey = []byte("fallback fake private key")
k8sTLSFallbackSecretCertChain = []byte("fallback fake cert chain")
k8sTestTLSFallbackSecret = &v1.Secret{
Data: map[string][]byte{
"tls.crt": k8sTLSFallbackSecretCertChain,
"tls.key": k8sTLSFallbackSecretKey,
},
ObjectMeta: metav1.ObjectMeta{
Name: k8sTLSFallbackSecretName,
Namespace: "test-namespace",
},
Type: "test-tls-secret",
}
)
func TestWorkloadAgentGenerateSecret(t *testing.T) {
fakeCACli := newMockCAClient()
opt := Options{
SecretTTL: time.Minute,
RotationInterval: 300 * time.Microsecond,
EvictionDuration: 2 * time.Second,
InitialBackoff: 10,
SkipValidateCert: true,
}
fetcher := &secretfetcher.SecretFetcher{
UseCaClient: true,
CaClient: fakeCACli,
}
sc := NewSecretCache(fetcher, notifyCb, opt)
atomic.StoreUint32(&sc.skipTokenExpireCheck, 0)
defer func() {
sc.Close()
atomic.StoreUint32(&sc.skipTokenExpireCheck, 1)
}()
checkBool(t, "opt.AlwaysValidTokenFlag default", opt.AlwaysValidTokenFlag, false)
conID := "proxy1-id"
ctx := context.Background()
gotSecret, err := sc.GenerateSecret(ctx, conID, testResourceName, "jwtToken1")
if err != nil {
t.Fatalf("Failed to get secrets: %v", err)
}
if got, want := gotSecret.CertificateChain, convertToBytes(mockCertChain1st); !bytes.Equal(got, want) {
t.Errorf("CertificateChain: got: %v, want: %v", got, want)
}
checkBool(t, "SecretExist", sc.SecretExist(conID, testResourceName, "jwtToken1", gotSecret.Version), true)
checkBool(t, "SecretExist", sc.SecretExist(conID, testResourceName, "nonexisttoken", gotSecret.Version), false)
gotSecretRoot, err := sc.GenerateSecret(ctx, conID, RootCertReqResourceName, "jwtToken1")
if err != nil {
t.Fatalf("Failed to get secrets: %v", err)
}
if got, want := gotSecretRoot.RootCert, []byte("rootcert"); !bytes.Equal(got, want) {
t.Errorf("CertificateChain: got: %v, want: %v", got, want)
}
checkBool(t, "SecretExist", sc.SecretExist(conID, RootCertReqResourceName, "jwtToken1", gotSecretRoot.Version), true)
checkBool(t, "SecretExist", sc.SecretExist(conID, RootCertReqResourceName, "nonexisttoken", gotSecretRoot.Version), false)
if got, want := atomic.LoadUint64(&sc.rootCertChangedCount), uint64(0); got != want {
t.Errorf("rootCertChangedCount: got: %v, want: %v", got, want)
}
key := ConnKey{
ConnectionID: conID,
ResourceName: testResourceName,
}
cachedSecret, found := sc.secrets.Load(key)
if !found {
t.Errorf("Failed to find secret for proxy %q from secret store: %v", conID, err)
}
if !reflect.DeepEqual(*gotSecret, cachedSecret) {
t.Errorf("Secret key: got %+v, want %+v", *gotSecret, cachedSecret)
}
sc.configOptions.SkipValidateCert = false
// Try to get secret again using different jwt token, verify secret is re-generated.
gotSecret, err = sc.GenerateSecret(ctx, conID, testResourceName, "newToken")
if err != nil {
t.Fatalf("Failed to get secrets: %v", err)
}
if got, want := gotSecret.CertificateChain, convertToBytes(mockCertChainRemain); !bytes.Equal(got, want) {
t.Errorf("CertificateChain: got: %v, want: %v", got, want)
}
// Root cert is parsed from CSR response, it's updated since 2nd CSR is different from 1st.
if got, want := atomic.LoadUint64(&sc.rootCertChangedCount), uint64(1); got != want {
t.Errorf("rootCertChangedCount: got: %v, want: %v", got, want)
}
// Wait until unused secrets are evicted.
wait := 500 * time.Millisecond
retries := 0
for ; retries < 3; retries++ {
time.Sleep(wait)
if _, found := sc.secrets.Load(conID); found {
// Retry after some sleep.
wait *= 2
continue
}
break
}
if retries == 3 {
t.Errorf("Unused secrets failed to be evicted from cache")
}
}
func TestWorkloadAgentRefreshSecret(t *testing.T) {
fakeCACli := newMockCAClient()
opt := Options{
SecretTTL: 200 * time.Microsecond,
RotationInterval: 200 * time.Microsecond,
EvictionDuration: 10 * time.Second,
InitialBackoff: 10,
SkipValidateCert: true,
}
fetcher := &secretfetcher.SecretFetcher{
UseCaClient: true,
CaClient: fakeCACli,
}
sc := NewSecretCache(fetcher, notifyCb, opt)
atomic.StoreUint32(&sc.skipTokenExpireCheck, 0)
defer func() {
sc.Close()
atomic.StoreUint32(&sc.skipTokenExpireCheck, 1)
}()
testConnID := "proxy1-id"
_, err := sc.GenerateSecret(context.Background(), testConnID, testResourceName, "jwtToken1")
if err != nil {
t.Fatalf("Failed to get secrets for %q: %v", testConnID, err)
}
for i := 0; i < 10; i++ {
id := "proxy-id" + strconv.Itoa(i)
sc.GenerateSecret(context.Background(), id, testResourceName, "jwtToken1")
}
// Wait until key rotation job run to update cached secret.
wait := 200 * time.Millisecond
retries := 0
for ; retries < 5; retries++ {
time.Sleep(wait)
if atomic.LoadUint64(&sc.secretChangedCount) == uint64(0) {
// Retry after some sleep.
wait *= 2
continue
}
break
}
if retries == 5 {
t.Errorf("Cached secret failed to get refreshed, %d", atomic.LoadUint64(&sc.secretChangedCount))
}
key := ConnKey{
ConnectionID: testConnID,
ResourceName: testResourceName,
}
if _, found := sc.secrets.Load(key); !found {
t.Errorf("Failed to find secret for %+v from cache", key)
}
sc.DeleteSecret(testConnID, testResourceName)
if _, found := sc.secrets.Load(key); found {
t.Errorf("Found deleted secret for %+v from cache", key)
}
}
// TestGatewayAgentGenerateSecret verifies that ingress gateway agent manages secret cache correctly.
func TestGatewayAgentGenerateSecret(t *testing.T) {
sc := createSecretCache()
fetcher := sc.fetcher
atomic.StoreUint32(&sc.skipTokenExpireCheck, 0)
defer func() {
sc.Close()
atomic.StoreUint32(&sc.skipTokenExpireCheck, 1)
}()
connID1 := "proxy1-id"
connID2 := "proxy2-id"
ctx := context.Background()
type expectedSecret struct {
exist bool
secret *model.SecretItem
}
cases := []struct {
addSecret *v1.Secret
connID string
expectedSecrets []expectedSecret
}{
{
addSecret: k8sTestGenericSecret,
connID: connID1,
expectedSecrets: []expectedSecret{
{
exist: true,
secret: &model.SecretItem{
ResourceName: k8sGenericSecretName,
CertificateChain: k8sCertChain,
PrivateKey: k8sKey,
},
},
{
exist: true,
secret: &model.SecretItem{
ResourceName: k8sGenericSecretName + "-cacert",
RootCert: k8sCaCert,
},
},
},
},
{
addSecret: k8sTestTLSSecret,
connID: connID2,
expectedSecrets: []expectedSecret{
{
exist: true,
secret: &model.SecretItem{
ResourceName: k8sTLSSecretName,
CertificateChain: k8sCertChain,
PrivateKey: k8sKey,
},
},
{
exist: false,
secret: &model.SecretItem{
ResourceName: k8sTLSSecretName + "-cacert",
},
},
},
},
}
for _, c := range cases {
fetcher.AddSecret(c.addSecret)
for _, es := range c.expectedSecrets {
gotSecret, err := sc.GenerateSecret(ctx, c.connID, es.secret.ResourceName, "")
if es.exist {
if err != nil {
t.Fatalf("Failed to get secrets: %v", err)
}
if err := verifySecret(gotSecret, es.secret); err != nil {
t.Errorf("Secret verification failed: %v", err)
}
checkBool(t, "SecretExist", sc.SecretExist(c.connID, es.secret.ResourceName, "", gotSecret.Version), true)
checkBool(t, "SecretExist", sc.SecretExist(c.connID, "nonexistsecret", "", gotSecret.Version), false)
}
key := ConnKey{
ConnectionID: c.connID,
ResourceName: es.secret.ResourceName,
}
cachedSecret, found := sc.secrets.Load(key)
if es.exist {
if !found {
t.Errorf("Failed to find secret for proxy %q from secret store: %v", c.connID, err)
}
if !reflect.DeepEqual(*gotSecret, cachedSecret) {
t.Errorf("Secret key: got %+v, want %+v", *gotSecret, cachedSecret)
}
}
if _, err := sc.GenerateSecret(ctx, c.connID, "nonexistk8ssecret", ""); err == nil {
t.Error("Generating secret using a non existing kubernetes secret should fail")
}
}
}
// Wait until unused secrets are evicted.
wait := 500 * time.Millisecond
retries := 0
for ; retries < 3; retries++ {
time.Sleep(wait)
if _, found := sc.secrets.Load(connID1); found {
// Retry after some sleep.
wait *= 2
continue
}
if _, found := sc.secrets.Load(connID2); found {
// Retry after some sleep.
wait *= 2
continue
}
break
}
if retries == 3 {
t.Errorf("Unused secrets failed to be evicted from cache")
}
}
// TestGatewayAgentGenerateSecretUsingFallbackSecret verifies that ingress gateway agent picks
// fallback secret for ingress gateway and serves real secret when that secret is ready.
func TestGatewayAgentGenerateSecretUsingFallbackSecret(t *testing.T) {
os.Setenv("INGRESS_GATEWAY_FALLBACK_SECRET", k8sTLSFallbackSecretName)
sc := createSecretCache()
fetcher := sc.fetcher
if fetcher.FallbackSecretName != k8sTLSFallbackSecretName {
t.Errorf("Fallback secret name does not match. Expected %v but got %v",
k8sTLSFallbackSecretName, fetcher.FallbackSecretName)
}
atomic.StoreUint32(&sc.skipTokenExpireCheck, 0)
defer func() {
sc.Close()
atomic.StoreUint32(&sc.skipTokenExpireCheck, 1)
}()
connID1 := "proxy1-id"
connID2 := "proxy2-id"
ctx := context.Background()
type expectedSecret struct {
exist bool
secret *model.SecretItem
}
cases := []struct {
addSecret *v1.Secret
connID string
expectedFbSecret expectedSecret
expectedSecrets []expectedSecret
}{
{
addSecret: k8sTestGenericSecret,
connID: connID1,
expectedFbSecret: expectedSecret{
exist: true,
secret: &model.SecretItem{
ResourceName: k8sGenericSecretName,
CertificateChain: k8sTLSFallbackSecretCertChain,
PrivateKey: k8sTLSFallbackSecretKey,
},
},
expectedSecrets: []expectedSecret{
{
exist: true,
secret: &model.SecretItem{
ResourceName: k8sGenericSecretName,
CertificateChain: k8sCertChain,
PrivateKey: k8sKey,
},
},
{
exist: true,
secret: &model.SecretItem{
ResourceName: k8sGenericSecretName + "-cacert",
RootCert: k8sCaCert,
},
},
},
},
{
addSecret: k8sTestTLSSecret,
connID: connID2,
expectedFbSecret: expectedSecret{
exist: true,
secret: &model.SecretItem{
ResourceName: k8sTLSSecretName,
CertificateChain: k8sTLSFallbackSecretCertChain,
PrivateKey: k8sTLSFallbackSecretKey,
},
},
expectedSecrets: []expectedSecret{
{
exist: true,
secret: &model.SecretItem{
ResourceName: k8sTLSSecretName,
CertificateChain: k8sCertChain,
PrivateKey: k8sKey,
},
},
{
exist: false,
secret: &model.SecretItem{
ResourceName: k8sTLSSecretName + "-cacert",
},
},
},
},
}
fetcher.AddSecret(k8sTestTLSFallbackSecret)
for _, c := range cases {
if sc.ShouldWaitForIngressGatewaySecret(c.connID, c.expectedFbSecret.secret.ResourceName, "") {
t.Fatal("When fallback secret is enabled, node agent should not wait for gateway secret")
}
// Verify that fallback secret is returned
gotSecret, err := sc.GenerateSecret(ctx, c.connID, c.expectedFbSecret.secret.ResourceName, "")
if err != nil {
t.Fatalf("Failed to get fallback secrets: %v", err)
}
if err := verifySecret(gotSecret, c.expectedFbSecret.secret); err != nil {
t.Errorf("Secret verification failed: %v", err)
}
if got, want := sc.SecretExist(c.connID, c.expectedFbSecret.secret.ResourceName, "", gotSecret.Version), true; got != want {
t.Errorf("SecretExist: got: %v, want: %v", got, want)
}
if got, want := sc.SecretExist(c.connID, "nonexistsecret", "", gotSecret.Version), false; got != want {
t.Errorf("SecretExist: got: %v, want: %v", got, want)
}
key := ConnKey{
ConnectionID: c.connID,
ResourceName: c.expectedFbSecret.secret.ResourceName,
}
cachedSecret, found := sc.secrets.Load(key)
if !found {
t.Errorf("Failed to find secret for proxy %q from secret store: %v", c.connID, err)
}
if !reflect.DeepEqual(*gotSecret, cachedSecret) {
t.Errorf("Secret key: got %+v, want %+v", *gotSecret, cachedSecret)
}
// When real secret is added, verify that real secret is returned.
fetcher.AddSecret(c.addSecret)
for _, es := range c.expectedSecrets {
gotSecret, err := sc.GenerateSecret(ctx, c.connID, es.secret.ResourceName, "")
if es.exist {
if err != nil {
t.Fatalf("Failed to get secrets: %v", err)
}
if err := verifySecret(gotSecret, es.secret); err != nil {
t.Errorf("Secret verification failed: %v", err)
}
if got, want := sc.SecretExist(c.connID, es.secret.ResourceName, "", gotSecret.Version), true; got != want {
t.Errorf("SecretExist: got: %v, want: %v", got, want)
}
if got, want := sc.SecretExist(c.connID, "nonexistsecret", "", gotSecret.Version), false; got != want {
t.Errorf("SecretExist: got: %v, want: %v", got, want)
}
}
key := ConnKey{
ConnectionID: c.connID,
ResourceName: es.secret.ResourceName,
}
cachedSecret, found := sc.secrets.Load(key)
if es.exist {
if !found {
t.Errorf("Failed to find secret for proxy %q from secret store: %v", c.connID, err)
}
if !reflect.DeepEqual(*gotSecret, cachedSecret) {
t.Errorf("Secret key: got %+v, want %+v", *gotSecret, cachedSecret)
}
}
}
// When secret is deleted, node agent should not wait for ingress gateway secret.
fetcher.DeleteSecret(c.addSecret)
if sc.ShouldWaitForIngressGatewaySecret(c.connID, c.expectedFbSecret.secret.ResourceName, "") {
t.Fatal("When fallback secret is enabled, node agent should not wait for gateway secret")
}
}
// Wait until unused secrets are evicted.
wait := 500 * time.Millisecond
retries := 0
for ; retries < 3; retries++ {
time.Sleep(wait)
if _, found := sc.secrets.Load(connID1); found {
// Retry after some sleep.
wait *= 2
continue
}
if _, found := sc.secrets.Load(connID2); found {
// Retry after some sleep.
wait *= 2
continue
}
break
}
if retries == 3 {
t.Errorf("Unused secrets failed to be evicted from cache")
}
}
func createSecretCache() *SecretCache {
fetcher := &secretfetcher.SecretFetcher{
UseCaClient: false,
}
fetcher.FallbackSecretName = "gateway-fallback"
if fallbackSecret := os.Getenv("INGRESS_GATEWAY_FALLBACK_SECRET"); fallbackSecret != "" {
fetcher.FallbackSecretName = fallbackSecret
}
fetcher.InitWithKubeClient(fake.NewSimpleClientset().CoreV1())
ch := make(chan struct{})
fetcher.Run(ch)
opt := Options{
SecretTTL: time.Minute,
RotationInterval: 300 * time.Microsecond,
EvictionDuration: 2 * time.Second,
InitialBackoff: 10,
SkipValidateCert: true,
}
return NewSecretCache(fetcher, notifyCb, opt)
}
// TestGatewayAgentDeleteSecret verifies that ingress gateway agent deletes secret cache correctly.
func TestGatewayAgentDeleteSecret(t *testing.T) {
sc := createSecretCache()
fetcher := sc.fetcher
atomic.StoreUint32(&sc.skipTokenExpireCheck, 0)
defer func() {
sc.Close()
atomic.StoreUint32(&sc.skipTokenExpireCheck, 1)
}()
fetcher.AddSecret(k8sTestGenericSecret)
fetcher.AddSecret(k8sTestTLSSecret)
connID := "proxy1-id"
ctx := context.Background()
gotSecret, err := sc.GenerateSecret(ctx, connID, k8sGenericSecretName, "")
if err != nil {
t.Fatalf("Failed to get secrets: %v", err)
}
checkBool(t, "SecretExist", sc.SecretExist(connID, k8sGenericSecretName, "", gotSecret.Version), true)
gotSecret, err = sc.GenerateSecret(ctx, connID, k8sGenericSecretName+"-cacert", "")
if err != nil {
t.Fatalf("Failed to get secrets: %v", err)
}
checkBool(t, "SecretExist", sc.SecretExist(connID, k8sGenericSecretName+"-cacert", "", gotSecret.Version), true)
gotSecret, err = sc.GenerateSecret(ctx, connID, k8sTLSSecretName, "")
if err != nil {
t.Fatalf("Failed to get secrets: %v", err)
}
checkBool(t, "SecretExist", sc.SecretExist(connID, k8sTLSSecretName, "", gotSecret.Version), true)
_, err = sc.GenerateSecret(ctx, connID, k8sTLSSecretName+"-cacert", "")
if err == nil {
t.Fatalf("Get unexpected secrets: %v", err)
}
checkBool(t, "SecretExist", sc.SecretExist(connID, k8sTLSSecretName+"-cacert", "", gotSecret.Version), false)
sc.DeleteK8sSecret(k8sGenericSecretName)
sc.DeleteK8sSecret(k8sGenericSecretName + "-cacert")
sc.DeleteK8sSecret(k8sTLSSecretName)
checkBool(t, "SecretExist", sc.SecretExist(connID, k8sGenericSecretName, "", gotSecret.Version), false)
checkBool(t, "SecretExist", sc.SecretExist(connID, k8sGenericSecretName+"-cacert", "", gotSecret.Version), false)
checkBool(t, "SecretExist", sc.SecretExist(connID, k8sTLSSecretName, "", gotSecret.Version), false)
}
// TestGatewayAgentUpdateSecret verifies that ingress gateway agent updates secret cache correctly.
func TestGatewayAgentUpdateSecret(t *testing.T) {
sc := createSecretCache()
fetcher := sc.fetcher
atomic.StoreUint32(&sc.skipTokenExpireCheck, 0)
defer func() {
sc.Close()
atomic.StoreUint32(&sc.skipTokenExpireCheck, 1)
}()
fetcher.AddSecret(k8sTestGenericSecret)
connID := "proxy1-id"
ctx := context.Background()
gotSecret, err := sc.GenerateSecret(ctx, connID, k8sGenericSecretName, "")
if err != nil {
t.Fatalf("Failed to get secrets: %v", err)
}
checkBool(t, "SecretExist", sc.SecretExist(connID, k8sGenericSecretName, "", gotSecret.Version), true)
gotSecret, err = sc.GenerateSecret(ctx, connID, k8sGenericSecretName+"-cacert", "")
if err != nil {
t.Fatalf("Failed to get secrets: %v", err)
}
checkBool(t, "SecretExist", sc.SecretExist(connID, k8sGenericSecretName+"-cacert", "", gotSecret.Version), true)
newTime := gotSecret.CreatedTime.Add(time.Duration(10) * time.Second)
newK8sTestSecret := model.SecretItem{
CertificateChain: []byte("new cert chain"),
PrivateKey: []byte("new private key"),
RootCert: []byte("new root cert"),
ResourceName: k8sGenericSecretName,
Token: gotSecret.Token,
CreatedTime: newTime,
Version: newTime.String(),
}
sc.UpdateK8sSecret(k8sGenericSecretName, newK8sTestSecret)
checkBool(t, "SecretExist", sc.SecretExist(connID, k8sGenericSecretName, "", gotSecret.Version), false)
sc.UpdateK8sSecret(k8sGenericSecretName+"-cacert", newK8sTestSecret)
checkBool(t, "SecretExist", sc.SecretExist(connID, k8sGenericSecretName+"-cacert", "", gotSecret.Version), false)
}
func TestConstructCSRHostName(t *testing.T) {
data, err := ioutil.ReadFile("./testdata/testjwt")
if err != nil {
t.Errorf("failed to read test jwt file %v", err)
}
testJwt := string(data)
cases := []struct {
trustDomain string
token string
expected string
errFlag bool
}{
{
token: testJwt,
expected: "spiffe://cluster.local/ns/default/sa/sleep",
errFlag: false,
},
{
trustDomain: "fooDomain",
token: testJwt,
expected: "spiffe://fooDomain/ns/default/sa/sleep",
errFlag: false,
},
{
token: "faketoken",
expected: "",
errFlag: true,
},
}
for _, c := range cases {
got, err := constructCSRHostName(c.trustDomain, c.token)
if err != nil {
if c.errFlag == false {
t.Errorf("constructCSRHostName no error, but got %v", err)
}
continue
}
if c.errFlag == true {
t.Error("constructCSRHostName error")
}
if got != c.expected {
t.Errorf("constructCSRHostName got %q, want %q", got, c.expected)
}
}
}
func checkBool(t *testing.T, name string, got bool, want bool) {
if got != want {
t.Errorf("%s: got: %v, want: %v", name, got, want)
}
}
func TestSetAlwaysValidTokenFlag(t *testing.T) {
fakeCACli := newMockCAClient()
opt := Options{
SecretTTL: 200 * time.Microsecond,
RotationInterval: 200 * time.Microsecond,
EvictionDuration: 10 * time.Second,
InitialBackoff: 10,
AlwaysValidTokenFlag: true,
SkipValidateCert: true,
}
fetcher := &secretfetcher.SecretFetcher{
UseCaClient: true,
CaClient: fakeCACli,
}
sc := NewSecretCache(fetcher, notifyCb, opt)
defer func() {
sc.Close()
}()
checkBool(t, "isTokenExpired", sc.isTokenExpired(), false)
_, err := sc.GenerateSecret(context.Background(), "proxy1-id", testResourceName, "jwtToken1")
if err != nil {
t.Fatalf("Failed to get secrets: %v", err)
}
// Wait until key rotation job run to update cached secret.
wait := 200 * time.Millisecond
retries := 0
for ; retries < 5; retries++ {
time.Sleep(wait)
if atomic.LoadUint64(&sc.secretChangedCount) == uint64(0) {
// Retry after some sleep.
wait *= 2
continue
}
break
}
if retries == 5 {
t.Errorf("Cached secret failed to get refreshed, %d", atomic.LoadUint64(&sc.secretChangedCount))
}
}
func verifySecret(gotSecret *model.SecretItem, expectedSecret *model.SecretItem) error {
if expectedSecret.ResourceName != gotSecret.ResourceName {
return fmt.Errorf("resource name verification error: expected %s but got %s", expectedSecret.ResourceName,
gotSecret.ResourceName)
}
if !bytes.Equal(expectedSecret.CertificateChain, gotSecret.CertificateChain) {
return fmt.Errorf("cert chain verification error: expected %v but got %v", expectedSecret.CertificateChain,
gotSecret.CertificateChain)
}
if !bytes.Equal(expectedSecret.PrivateKey, gotSecret.PrivateKey) {
return fmt.Errorf("k8sKey verification error: expected %v but got %v", expectedSecret.PrivateKey,
gotSecret.PrivateKey)
}
return nil
}
func notifyCb(_ string, _ string, _ *model.SecretItem) error {
return nil
}
type mockCAClient struct {
signInvokeCount uint64
}
func newMockCAClient() *mockCAClient {
cl := mockCAClient{}
atomic.StoreUint64(&cl.signInvokeCount, 0)
return &cl
}
func (c *mockCAClient) CSRSign(ctx context.Context, csrPEM []byte, subjectID string,
certValidTTLInSec int64) ([]string /*PEM-encoded certificate chain*/, error) {
if atomic.LoadUint64(&c.signInvokeCount) == 0 {
atomic.AddUint64(&c.signInvokeCount, 1)
return nil, status.Error(codes.Internal, "some internal error")
}
if atomic.LoadUint64(&c.signInvokeCount) == 1 {
atomic.AddUint64(&c.signInvokeCount, 1)
return mockCertChain1st, nil
}
return mockCertChainRemain, nil
}
func convertToBytes(ss []string) []byte {
res := []byte{}
for _, s := range ss {
res = append(res, []byte(s)...)
}
return res
}
|
[
"\"INGRESS_GATEWAY_FALLBACK_SECRET\""
] |
[] |
[
"INGRESS_GATEWAY_FALLBACK_SECRET"
] |
[]
|
["INGRESS_GATEWAY_FALLBACK_SECRET"]
|
go
| 1 | 0 | |
openface/torch_neural_net.py
|
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for Torch-based neural network usage."""
import atexit
import binascii
from subprocess import Popen, PIPE
import os
import os.path
import sys
import numpy as np
import cv2
myDir = os.path.dirname(os.path.realpath(__file__))
# Workaround for non-standard terminals, originally reported in
# https://github.com/cmusatyalab/openface/issues/66
os.environ['TERM'] = 'linux'
class TorchNeuralNet:
"""Use a `Torch <http://torch.ch>`_ subprocess for feature extraction."""
#: The default Torch model to use.
defaultModel = os.path.join(myDir, '..', 'models', 'openface', 'nn4.small2.v1.t7')
def __init__(self, model=defaultModel, imgDim=96, cuda=False):
"""__init__(self, model=defaultModel, imgDim=96, cuda=False)
Instantiate a 'TorchNeuralNet' object.
Starts `openface_server.lua
<https://github.com/cmusatyalab/openface/blob/master/openface/openface_server.lua>`_
as a subprocess.
:param model: The path to the Torch model to use.
:type model: str
:param imgDim: The edge length of the square input image.
:type imgDim: int
:param cuda: Flag to use CUDA in the subprocess.
:type cuda: bool
"""
assert model is not None
assert imgDim is not None
assert cuda is not None
self.cmd = ['/usr/bin/env', 'th', os.path.join(myDir, 'openface_server.lua'),
'-model', model, '-imgDim', str(imgDim)]
if cuda:
self.cmd.append('-cuda')
self.p = Popen(self.cmd, stdin=PIPE, stdout=PIPE, bufsize=0)
def exitHandler():
if self.p.poll() is None:
self.p.kill()
atexit.register(exitHandler)
def forwardPath(self, imgPath):
"""
Perform a forward network pass of an image on disk.
:param imgPath: The path to the image.
:type imgPath: str
:return: Vector of features extracted with the neural network.
:rtype: numpy.ndarray
"""
assert imgPath is not None
rc = self.p.poll()
if rc is not None and rc != 0:
raise Exception("""
OpenFace: `openface_server.lua` subprocess has died.
+ Is the Torch command `th` on your PATH? Check with `which th`.
+ If `th` is on your PATH, try running `./util/profile-network.lua`
to see if Torch can correctly load and run the network.
If this gives illegal instruction errors, see the section on
this in our FAQ at http://cmusatyalab.github.io/openface/faq/
+ See this GitHub issue if you are running on
a non-64-bit machine:
https://github.com/cmusatyalab/openface/issues/42
+ Please post further issues to our mailing list at
https://groups.google.com/forum/#!forum/cmu-openface
Diagnostic information:
cmd: {}
============
stdout: {}
""".format(self.cmd, self.p.stdout.read()))
self.p.stdin.write(imgPath + "\n")
output = self.p.stdout.readline()
try:
rep = [float(x) for x in output.strip().split(',')]
rep = np.array(rep)
return rep
except Exception as e:
self.p.kill()
stdout, stderr = self.p.communicate()
print("""
Error getting result from Torch subprocess.
Line read: {}
Exception:
{}
============
stdout: {}
""".format(output, str(e), stdout))
sys.exit(-1)
def forward(self, rgbImg):
"""
Perform a forward network pass of an RGB image.
:param rgbImg: RGB image to process. Shape: (imgDim, imgDim, 3)
:type rgbImg: numpy.ndarray
:return: Vector of features extracted from the neural network.
:rtype: numpy.ndarray
"""
assert rgbImg is not None
t = '/tmp/openface-torchwrap-{}.png'.format(
binascii.b2a_hex(os.urandom(8)))
bgrImg = cv2.cvtColor(rgbImg, cv2.COLOR_RGB2BGR)
cv2.imwrite(t, bgrImg)
rep = self.forwardPath(t)
os.remove(t)
return rep
|
[] |
[] |
[
"TERM"
] |
[]
|
["TERM"]
|
python
| 1 | 0 | |
app/auth/router.go
|
package auth
import (
"fmt"
"os"
"github.com/gofiber/fiber/v2"
)
var (
databaseURL string
r Repository
s Service
)
func NewRouter(app *fiber.App) *fiber.App {
// Get DATABASE_URL
databaseURL = os.Getenv("DATABASE_URL")
if databaseURL == "" {
databaseURL = ""
}
// Set UserRePository
r = NewRepo(databaseURL)
// Set UserService
s = NewService(r)
// Health Check
app.Get("/health", func(c *fiber.Ctx) error {
return c.Status(200).JSON(&fiber.Map{
"health": "UP",
})
})
app.Post("/login", func(c *fiber.Ctx) error {
var body = new(Login)
if err := c.BodyParser(body); err != nil {
return c.Status(500).JSON(&fiber.Map{
"message": "Something went wrong",
})
}
res, err := s.Login(*body)
if err != nil {
return c.Status(500).JSON(&fiber.Map{
"message": err.Error(),
})
}
cookie := new(fiber.Cookie)
cookie.Name = "refresh_token"
cookie.Value = (*res).RefreshToken
c.Cookie(cookie)
return c.Status(200).JSON(&fiber.Map{
"message": "Login successfully",
"data": *res,
})
})
app.Post("/register", func(c *fiber.Ctx) error {
var body = new(Register)
if err := c.BodyParser(body); err != nil {
return c.Status(500).JSON(&fiber.Map{
"message": "Something went wrong",
})
}
res, err := s.Register(*body)
if err != nil {
return c.Status(500).JSON(&fiber.Map{
"message": err.Error(),
})
}
cookie := new(fiber.Cookie)
cookie.Name = "refresh_token"
cookie.Value = (*res).RefreshToken
c.Cookie(cookie)
return c.Status(200).JSON(&fiber.Map{
"message": "Register successfully",
"data": *res,
})
})
app.Post("/user/role", func(c *fiber.Ctx) error {
userrole := new(UserRoleInput)
if err := c.BodyParser(userrole); err != nil {
return err
}
return c.Status(200).JSON(&fiber.Map{
"message": "User role added is running 🔥",
"data": &userrole,
})
})
app.Delete("/user/role", func(c *fiber.Ctx) error {
userrole := new(UserRoleInput)
if err := c.BodyParser(userrole); err != nil {
return err
}
return c.Status(200).JSON(&fiber.Map{
"message": "User role deleted is running 🔥",
"data": &userrole,
})
})
// Role
app.Get("/role", func(c *fiber.Ctx) error {
res, err := s.GetRoles()
if err != nil {
return c.Status(500).JSON(&fiber.Map{
"message": fmt.Sprintf("Something wrong : %s", err.Error()),
})
}
return c.Status(200).JSON(&fiber.Map{
"message": "Get all Role",
"data": *res,
})
})
app.Get("/role/:id", func(c *fiber.Ctx) error {
res, err := s.GetRoleByID(c.Params("id"))
if err != nil {
return c.Status(500).JSON(&fiber.Map{
"message": fmt.Sprintf("Something wrong : %s", err.Error()),
})
}
return c.Status(200).JSON(&fiber.Map{
"message": fmt.Sprintf("Role id : %s", c.Params("id")),
"data": *res,
})
})
app.Post("/role", func(c *fiber.Ctx) error {
role := new(RoleInput)
if err := c.BodyParser(role); err != nil {
return err
}
res, err := s.AddRole(*role)
if err != nil {
return c.Status(500).JSON(&fiber.Map{
"message": fmt.Sprintf("Something wrong : %s", err.Error()),
})
}
return c.Status(200).JSON(&fiber.Map{
"message": "Role created",
"data": *res,
})
})
app.Put("/role", func(c *fiber.Ctx) error {
role := new(RoleOutput)
if err := c.BodyParser(role); err != nil {
return err
}
res, err := s.UpdateRole(*role)
if err != nil {
return c.Status(500).JSON(&fiber.Map{
"message": fmt.Sprintf("Something wrong : %s", err.Error()),
})
}
return c.Status(200).JSON(&fiber.Map{
"message": "Role updated",
"data": *res,
})
})
app.Delete("/role/:id", func(c *fiber.Ctx) error {
err := s.DeleteRoleByID(c.Params("id"))
if err != nil {
return c.Status(500).JSON(&fiber.Map{
"message": fmt.Sprintf("Something wrong : %s", err.Error()),
})
}
return c.Status(200).JSON(&fiber.Map{
"message": fmt.Sprintf("Role id : %s deleted", c.Params("id")),
})
})
return app
}
|
[
"\"DATABASE_URL\""
] |
[] |
[
"DATABASE_URL"
] |
[]
|
["DATABASE_URL"]
|
go
| 1 | 0 | |
yatube/manage.py
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yatube.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
hugolib/testhelpers_test.go
|
package hugolib
import (
"bytes"
"fmt"
"image/jpeg"
"io"
"io/fs"
"math/rand"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"testing"
"text/template"
"time"
"unicode/utf8"
"github.com/gohugoio/hugo/config/security"
"github.com/gohugoio/hugo/htesting"
"github.com/gohugoio/hugo/output"
"github.com/gohugoio/hugo/parser/metadecoders"
"github.com/google/go-cmp/cmp"
"github.com/gohugoio/hugo/parser"
"github.com/fsnotify/fsnotify"
"github.com/gohugoio/hugo/common/hexec"
"github.com/gohugoio/hugo/common/maps"
"github.com/gohugoio/hugo/config"
"github.com/gohugoio/hugo/deps"
"github.com/gohugoio/hugo/resources/page"
"github.com/sanity-io/litter"
"github.com/spf13/afero"
"github.com/spf13/cast"
"github.com/gohugoio/hugo/helpers"
"github.com/gohugoio/hugo/tpl"
"github.com/gohugoio/hugo/resources/resource"
qt "github.com/frankban/quicktest"
"github.com/gohugoio/hugo/common/loggers"
"github.com/gohugoio/hugo/hugofs"
)
var (
deepEqualsPages = qt.CmpEquals(cmp.Comparer(func(p1, p2 *pageState) bool { return p1 == p2 }))
deepEqualsOutputFormats = qt.CmpEquals(cmp.Comparer(func(o1, o2 output.Format) bool {
return o1.Name == o2.Name && o1.MediaType.Type() == o2.MediaType.Type()
}))
)
type sitesBuilder struct {
Cfg config.Provider
environ []string
Fs *hugofs.Fs
T testing.TB
depsCfg deps.DepsCfg
*qt.C
logger loggers.Logger
rnd *rand.Rand
dumper litter.Options
// Used to test partial rebuilds.
changedFiles []string
removedFiles []string
// Aka the Hugo server mode.
running bool
H *HugoSites
theme string
// Default toml
configFormat string
configFileSet bool
configSet bool
// Default is empty.
// TODO(bep) revisit this and consider always setting it to something.
// Consider this in relation to using the BaseFs.PublishFs to all publishing.
workingDir string
addNothing bool
// Base data/content
contentFilePairs []filenameContent
templateFilePairs []filenameContent
i18nFilePairs []filenameContent
dataFilePairs []filenameContent
// Additional data/content.
// As in "use the base, but add these on top".
contentFilePairsAdded []filenameContent
templateFilePairsAdded []filenameContent
i18nFilePairsAdded []filenameContent
dataFilePairsAdded []filenameContent
}
type filenameContent struct {
filename string
content string
}
func newTestSitesBuilder(t testing.TB) *sitesBuilder {
v := config.NewWithTestDefaults()
fs := hugofs.NewMem(v)
litterOptions := litter.Options{
HidePrivateFields: true,
StripPackageNames: true,
Separator: " ",
}
return &sitesBuilder{
T: t, C: qt.New(t), Fs: fs, configFormat: "toml",
dumper: litterOptions, rnd: rand.New(rand.NewSource(time.Now().Unix())),
}
}
func newTestSitesBuilderFromDepsCfg(t testing.TB, d deps.DepsCfg) *sitesBuilder {
c := qt.New(t)
litterOptions := litter.Options{
HidePrivateFields: true,
StripPackageNames: true,
Separator: " ",
}
b := &sitesBuilder{T: t, C: c, depsCfg: d, Fs: d.Fs, dumper: litterOptions, rnd: rand.New(rand.NewSource(time.Now().Unix()))}
workingDir := d.Cfg.GetString("workingDir")
b.WithWorkingDir(workingDir)
return b.WithViper(d.Cfg.(config.Provider))
}
func (s *sitesBuilder) Running() *sitesBuilder {
s.running = true
return s
}
func (s *sitesBuilder) WithNothingAdded() *sitesBuilder {
s.addNothing = true
return s
}
func (s *sitesBuilder) WithLogger(logger loggers.Logger) *sitesBuilder {
s.logger = logger
return s
}
func (s *sitesBuilder) WithWorkingDir(dir string) *sitesBuilder {
s.workingDir = filepath.FromSlash(dir)
return s
}
func (s *sitesBuilder) WithEnviron(env ...string) *sitesBuilder {
for i := 0; i < len(env); i += 2 {
s.environ = append(s.environ, fmt.Sprintf("%s=%s", env[i], env[i+1]))
}
return s
}
func (s *sitesBuilder) WithConfigTemplate(data any, format, configTemplate string) *sitesBuilder {
s.T.Helper()
if format == "" {
format = "toml"
}
templ, err := template.New("test").Parse(configTemplate)
if err != nil {
s.Fatalf("Template parse failed: %s", err)
}
var b bytes.Buffer
templ.Execute(&b, data)
return s.WithConfigFile(format, b.String())
}
func (s *sitesBuilder) WithViper(v config.Provider) *sitesBuilder {
s.T.Helper()
if s.configFileSet {
s.T.Fatal("WithViper: use Viper or config.toml, not both")
}
defer func() {
s.configSet = true
}()
// Write to a config file to make sure the tests follow the same code path.
var buff bytes.Buffer
m := v.Get("").(maps.Params)
s.Assert(parser.InterfaceToConfig(m, metadecoders.TOML, &buff), qt.IsNil)
return s.WithConfigFile("toml", buff.String())
}
func (s *sitesBuilder) WithConfigFile(format, conf string) *sitesBuilder {
s.T.Helper()
if s.configSet {
s.T.Fatal("WithConfigFile: use config.Config or config.toml, not both")
}
s.configFileSet = true
filename := s.absFilename("config." + format)
writeSource(s.T, s.Fs, filename, conf)
s.configFormat = format
return s
}
func (s *sitesBuilder) WithThemeConfigFile(format, conf string) *sitesBuilder {
s.T.Helper()
if s.theme == "" {
s.theme = "test-theme"
}
filename := filepath.Join("themes", s.theme, "config."+format)
writeSource(s.T, s.Fs, s.absFilename(filename), conf)
return s
}
func (s *sitesBuilder) WithSourceFile(filenameContent ...string) *sitesBuilder {
s.T.Helper()
for i := 0; i < len(filenameContent); i += 2 {
writeSource(s.T, s.Fs, s.absFilename(filenameContent[i]), filenameContent[i+1])
}
return s
}
func (s *sitesBuilder) absFilename(filename string) string {
filename = filepath.FromSlash(filename)
if filepath.IsAbs(filename) {
return filename
}
if s.workingDir != "" && !strings.HasPrefix(filename, s.workingDir) {
filename = filepath.Join(s.workingDir, filename)
}
return filename
}
const commonConfigSections = `
[services]
[services.disqus]
shortname = "disqus_shortname"
[services.googleAnalytics]
id = "UA-ga_id"
[privacy]
[privacy.disqus]
disable = false
[privacy.googleAnalytics]
respectDoNotTrack = true
anonymizeIP = true
[privacy.instagram]
simple = true
[privacy.twitter]
enableDNT = true
[privacy.vimeo]
disable = false
[privacy.youtube]
disable = false
privacyEnhanced = true
`
func (s *sitesBuilder) WithSimpleConfigFile() *sitesBuilder {
s.T.Helper()
return s.WithSimpleConfigFileAndBaseURL("http://example.com/")
}
func (s *sitesBuilder) WithSimpleConfigFileAndBaseURL(baseURL string) *sitesBuilder {
s.T.Helper()
return s.WithSimpleConfigFileAndSettings(map[string]any{"baseURL": baseURL})
}
func (s *sitesBuilder) WithSimpleConfigFileAndSettings(settings any) *sitesBuilder {
s.T.Helper()
var buf bytes.Buffer
parser.InterfaceToConfig(settings, metadecoders.TOML, &buf)
config := buf.String() + commonConfigSections
return s.WithConfigFile("toml", config)
}
func (s *sitesBuilder) WithDefaultMultiSiteConfig() *sitesBuilder {
defaultMultiSiteConfig := `
baseURL = "http://example.com/blog"
paginate = 1
disablePathToLower = true
defaultContentLanguage = "en"
defaultContentLanguageInSubdir = true
[permalinks]
other = "/somewhere/else/:filename"
[Taxonomies]
tag = "tags"
[Languages]
[Languages.en]
weight = 10
title = "In English"
languageName = "English"
[[Languages.en.menu.main]]
url = "/"
name = "Home"
weight = 0
[Languages.fr]
weight = 20
title = "Le Français"
languageName = "Français"
[Languages.fr.Taxonomies]
plaque = "plaques"
[Languages.nn]
weight = 30
title = "På nynorsk"
languageName = "Nynorsk"
paginatePath = "side"
[Languages.nn.Taxonomies]
lag = "lag"
[[Languages.nn.menu.main]]
url = "/"
name = "Heim"
weight = 1
[Languages.nb]
weight = 40
title = "På bokmål"
languageName = "Bokmål"
paginatePath = "side"
[Languages.nb.Taxonomies]
lag = "lag"
` + commonConfigSections
return s.WithConfigFile("toml", defaultMultiSiteConfig)
}
func (s *sitesBuilder) WithSunset(in string) {
// Write a real image into one of the bundle above.
src, err := os.Open(filepath.FromSlash("testdata/sunset.jpg"))
s.Assert(err, qt.IsNil)
out, err := s.Fs.Source.Create(filepath.FromSlash(filepath.Join(s.workingDir, in)))
s.Assert(err, qt.IsNil)
_, err = io.Copy(out, src)
s.Assert(err, qt.IsNil)
out.Close()
src.Close()
}
func (s *sitesBuilder) createFilenameContent(pairs []string) []filenameContent {
var slice []filenameContent
s.appendFilenameContent(&slice, pairs...)
return slice
}
func (s *sitesBuilder) appendFilenameContent(slice *[]filenameContent, pairs ...string) {
if len(pairs)%2 != 0 {
panic("file content mismatch")
}
for i := 0; i < len(pairs); i += 2 {
c := filenameContent{
filename: pairs[i],
content: pairs[i+1],
}
*slice = append(*slice, c)
}
}
func (s *sitesBuilder) WithContent(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.contentFilePairs, filenameContent...)
return s
}
func (s *sitesBuilder) WithContentAdded(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.contentFilePairsAdded, filenameContent...)
return s
}
func (s *sitesBuilder) WithTemplates(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.templateFilePairs, filenameContent...)
return s
}
func (s *sitesBuilder) WithTemplatesAdded(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.templateFilePairsAdded, filenameContent...)
return s
}
func (s *sitesBuilder) WithData(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.dataFilePairs, filenameContent...)
return s
}
func (s *sitesBuilder) WithDataAdded(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.dataFilePairsAdded, filenameContent...)
return s
}
func (s *sitesBuilder) WithI18n(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.i18nFilePairs, filenameContent...)
return s
}
func (s *sitesBuilder) WithI18nAdded(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.i18nFilePairsAdded, filenameContent...)
return s
}
func (s *sitesBuilder) EditFiles(filenameContent ...string) *sitesBuilder {
for i := 0; i < len(filenameContent); i += 2 {
filename, content := filepath.FromSlash(filenameContent[i]), filenameContent[i+1]
absFilename := s.absFilename(filename)
s.changedFiles = append(s.changedFiles, absFilename)
writeSource(s.T, s.Fs, absFilename, content)
}
return s
}
func (s *sitesBuilder) RemoveFiles(filenames ...string) *sitesBuilder {
for _, filename := range filenames {
absFilename := s.absFilename(filename)
s.removedFiles = append(s.removedFiles, absFilename)
s.Assert(s.Fs.Source.Remove(absFilename), qt.IsNil)
}
return s
}
func (s *sitesBuilder) writeFilePairs(folder string, files []filenameContent) *sitesBuilder {
// We have had some "filesystem ordering" bugs that we have not discovered in
// our tests running with the in memory filesystem.
// That file system is backed by a map so not sure how this helps, but some
// randomness in tests doesn't hurt.
// TODO(bep) this turns out to be more confusing than helpful.
// s.rnd.Shuffle(len(files), func(i, j int) { files[i], files[j] = files[j], files[i] })
for _, fc := range files {
target := folder
// TODO(bep) clean up this magic.
if strings.HasPrefix(fc.filename, folder) {
target = ""
}
if s.workingDir != "" {
target = filepath.Join(s.workingDir, target)
}
writeSource(s.T, s.Fs, filepath.Join(target, fc.filename), fc.content)
}
return s
}
func (s *sitesBuilder) CreateSites() *sitesBuilder {
if err := s.CreateSitesE(); err != nil {
s.Fatalf("Failed to create sites: %s", err)
}
s.Assert(s.Fs.PublishDir, qt.IsNotNil)
s.Assert(s.Fs.WorkingDirReadOnly, qt.IsNotNil)
return s
}
func (s *sitesBuilder) LoadConfig() error {
if !s.configFileSet {
s.WithSimpleConfigFile()
}
cfg, _, err := LoadConfig(ConfigSourceDescriptor{
WorkingDir: s.workingDir,
Fs: s.Fs.Source,
Logger: s.logger,
Environ: s.environ,
Filename: "config." + s.configFormat,
}, func(cfg config.Provider) error {
return nil
})
if err != nil {
return err
}
s.Cfg = cfg
return nil
}
func (s *sitesBuilder) CreateSitesE() error {
if !s.addNothing {
if _, ok := s.Fs.Source.(*afero.OsFs); ok {
for _, dir := range []string{
"content/sect",
"layouts/_default",
"layouts/_default/_markup",
"layouts/partials",
"layouts/shortcodes",
"data",
"i18n",
} {
if err := os.MkdirAll(filepath.Join(s.workingDir, dir), 0777); err != nil {
return fmt.Errorf("failed to create %q: %w", dir, err)
}
}
}
s.addDefaults()
s.writeFilePairs("content", s.contentFilePairsAdded)
s.writeFilePairs("layouts", s.templateFilePairsAdded)
s.writeFilePairs("data", s.dataFilePairsAdded)
s.writeFilePairs("i18n", s.i18nFilePairsAdded)
s.writeFilePairs("i18n", s.i18nFilePairs)
s.writeFilePairs("data", s.dataFilePairs)
s.writeFilePairs("content", s.contentFilePairs)
s.writeFilePairs("layouts", s.templateFilePairs)
}
if err := s.LoadConfig(); err != nil {
return fmt.Errorf("failed to load config: %w", err)
}
s.Fs.PublishDir = hugofs.NewCreateCountingFs(s.Fs.PublishDir)
depsCfg := s.depsCfg
depsCfg.Fs = s.Fs
depsCfg.Cfg = s.Cfg
depsCfg.Logger = s.logger
depsCfg.Running = s.running
sites, err := NewHugoSites(depsCfg)
if err != nil {
return fmt.Errorf("failed to create sites: %w", err)
}
s.H = sites
return nil
}
func (s *sitesBuilder) BuildE(cfg BuildCfg) error {
if s.H == nil {
s.CreateSites()
}
return s.H.Build(cfg)
}
func (s *sitesBuilder) Build(cfg BuildCfg) *sitesBuilder {
s.T.Helper()
return s.build(cfg, false)
}
func (s *sitesBuilder) BuildFail(cfg BuildCfg) *sitesBuilder {
s.T.Helper()
return s.build(cfg, true)
}
func (s *sitesBuilder) changeEvents() []fsnotify.Event {
var events []fsnotify.Event
for _, v := range s.changedFiles {
events = append(events, fsnotify.Event{
Name: v,
Op: fsnotify.Write,
})
}
for _, v := range s.removedFiles {
events = append(events, fsnotify.Event{
Name: v,
Op: fsnotify.Remove,
})
}
return events
}
func (s *sitesBuilder) build(cfg BuildCfg, shouldFail bool) *sitesBuilder {
s.Helper()
defer func() {
s.changedFiles = nil
}()
if s.H == nil {
s.CreateSites()
}
err := s.H.Build(cfg, s.changeEvents()...)
if err == nil {
logErrorCount := s.H.NumLogErrors()
if logErrorCount > 0 {
err = fmt.Errorf("logged %d errors", logErrorCount)
}
}
if err != nil && !shouldFail {
s.Fatalf("Build failed: %s", err)
} else if err == nil && shouldFail {
s.Fatalf("Expected error")
}
return s
}
func (s *sitesBuilder) addDefaults() {
var (
contentTemplate = `---
title: doc1
weight: 1
tags:
- tag1
date: "2018-02-28"
---
# doc1
*some "content"*
{{< shortcode >}}
{{< lingo >}}
`
defaultContent = []string{
"content/sect/doc1.en.md", contentTemplate,
"content/sect/doc1.fr.md", contentTemplate,
"content/sect/doc1.nb.md", contentTemplate,
"content/sect/doc1.nn.md", contentTemplate,
}
listTemplateCommon = "{{ $p := .Paginator }}{{ $p.PageNumber }}|{{ .Title }}|{{ i18n \"hello\" }}|{{ .Permalink }}|Pager: {{ template \"_internal/pagination.html\" . }}|Kind: {{ .Kind }}|Content: {{ .Content }}|Len Pages: {{ len .Pages }}|Len RegularPages: {{ len .RegularPages }}| HasParent: {{ if .Parent }}YES{{ else }}NO{{ end }}"
defaultTemplates = []string{
"_default/single.html", "Single: {{ .Title }}|{{ i18n \"hello\" }}|{{.Language.Lang}}|RelPermalink: {{ .RelPermalink }}|Permalink: {{ .Permalink }}|{{ .Content }}|Resources: {{ range .Resources }}{{ .MediaType }}: {{ .RelPermalink}} -- {{ end }}|Summary: {{ .Summary }}|Truncated: {{ .Truncated }}|Parent: {{ .Parent.Title }}",
"_default/list.html", "List Page " + listTemplateCommon,
"index.html", "{{ $p := .Paginator }}Default Home Page {{ $p.PageNumber }}: {{ .Title }}|{{ .IsHome }}|{{ i18n \"hello\" }}|{{ .Permalink }}|{{ .Site.Data.hugo.slogan }}|String Resource: {{ ( \"Hugo Pipes\" | resources.FromString \"text/pipes.txt\").RelPermalink }}",
"index.fr.html", "{{ $p := .Paginator }}French Home Page {{ $p.PageNumber }}: {{ .Title }}|{{ .IsHome }}|{{ i18n \"hello\" }}|{{ .Permalink }}|{{ .Site.Data.hugo.slogan }}|String Resource: {{ ( \"Hugo Pipes\" | resources.FromString \"text/pipes.txt\").RelPermalink }}",
"_default/terms.html", "Taxonomy Term Page " + listTemplateCommon,
"_default/taxonomy.html", "Taxonomy List Page " + listTemplateCommon,
// Shortcodes
"shortcodes/shortcode.html", "Shortcode: {{ i18n \"hello\" }}",
// A shortcode in multiple languages
"shortcodes/lingo.html", "LingoDefault",
"shortcodes/lingo.fr.html", "LingoFrench",
// Special templates
"404.html", "404|{{ .Lang }}|{{ .Title }}",
"robots.txt", "robots|{{ .Lang }}|{{ .Title }}",
}
defaultI18n = []string{
"en.yaml", `
hello:
other: "Hello"
`,
"fr.yaml", `
hello:
other: "Bonjour"
`,
}
defaultData = []string{
"hugo.toml", "slogan = \"Hugo Rocks!\"",
}
)
if len(s.contentFilePairs) == 0 {
s.writeFilePairs("content", s.createFilenameContent(defaultContent))
}
if len(s.templateFilePairs) == 0 {
s.writeFilePairs("layouts", s.createFilenameContent(defaultTemplates))
}
if len(s.dataFilePairs) == 0 {
s.writeFilePairs("data", s.createFilenameContent(defaultData))
}
if len(s.i18nFilePairs) == 0 {
s.writeFilePairs("i18n", s.createFilenameContent(defaultI18n))
}
}
func (s *sitesBuilder) Fatalf(format string, args ...any) {
s.T.Helper()
s.T.Fatalf(format, args...)
}
func (s *sitesBuilder) AssertFileContentFn(filename string, f func(s string) bool) {
s.T.Helper()
content := s.FileContent(filename)
if !f(content) {
s.Fatalf("Assert failed for %q in content\n%s", filename, content)
}
}
// Helper to migrate tests to new format.
func (s *sitesBuilder) DumpTxtar() string {
var sb strings.Builder
skipRe := regexp.MustCompile(`^(public|resources|package-lock.json|go.sum)`)
afero.Walk(s.Fs.Source, s.workingDir, func(path string, info fs.FileInfo, err error) error {
rel := strings.TrimPrefix(path, s.workingDir+"/")
if skipRe.MatchString(rel) {
if info.IsDir() {
return filepath.SkipDir
}
return nil
}
if info == nil || info.IsDir() {
return nil
}
sb.WriteString(fmt.Sprintf("-- %s --\n", rel))
b, err := afero.ReadFile(s.Fs.Source, path)
s.Assert(err, qt.IsNil)
sb.WriteString(strings.TrimSpace(string(b)))
sb.WriteString("\n")
return nil
})
return sb.String()
}
func (s *sitesBuilder) AssertHome(matches ...string) {
s.AssertFileContent("public/index.html", matches...)
}
func (s *sitesBuilder) AssertFileContent(filename string, matches ...string) {
s.T.Helper()
content := s.FileContent(filename)
for _, m := range matches {
lines := strings.Split(m, "\n")
for _, match := range lines {
match = strings.TrimSpace(match)
if match == "" {
continue
}
if !strings.Contains(content, match) {
s.Fatalf("No match for %q in content for %s\n%s\n%q", match, filename, content, content)
}
}
}
}
func (s *sitesBuilder) AssertFileDoesNotExist(filename string) {
if s.CheckExists(filename) {
s.Fatalf("File %q exists but must not exist.", filename)
}
}
func (s *sitesBuilder) AssertImage(width, height int, filename string) {
f, err := s.Fs.WorkingDirReadOnly.Open(filename)
s.Assert(err, qt.IsNil)
defer f.Close()
cfg, err := jpeg.DecodeConfig(f)
s.Assert(err, qt.IsNil)
s.Assert(cfg.Width, qt.Equals, width)
s.Assert(cfg.Height, qt.Equals, height)
}
func (s *sitesBuilder) AssertNoDuplicateWrites() {
s.Helper()
d := s.Fs.PublishDir.(hugofs.DuplicatesReporter)
s.Assert(d.ReportDuplicates(), qt.Equals, "")
}
func (s *sitesBuilder) FileContent(filename string) string {
s.Helper()
filename = filepath.FromSlash(filename)
return readWorkingDir(s.T, s.Fs, filename)
}
func (s *sitesBuilder) AssertObject(expected string, object any) {
s.T.Helper()
got := s.dumper.Sdump(object)
expected = strings.TrimSpace(expected)
if expected != got {
fmt.Println(got)
diff := htesting.DiffStrings(expected, got)
s.Fatalf("diff:\n%s\nexpected\n%s\ngot\n%s", diff, expected, got)
}
}
func (s *sitesBuilder) AssertFileContentRe(filename string, matches ...string) {
content := readWorkingDir(s.T, s.Fs, filename)
for _, match := range matches {
r := regexp.MustCompile("(?s)" + match)
if !r.MatchString(content) {
s.Fatalf("No match for %q in content for %s\n%q", match, filename, content)
}
}
}
func (s *sitesBuilder) CheckExists(filename string) bool {
return workingDirExists(s.Fs, filepath.Clean(filename))
}
func (s *sitesBuilder) GetPage(ref string) page.Page {
p, err := s.H.Sites[0].getPageNew(nil, ref)
s.Assert(err, qt.IsNil)
return p
}
func (s *sitesBuilder) GetPageRel(p page.Page, ref string) page.Page {
p, err := s.H.Sites[0].getPageNew(p, ref)
s.Assert(err, qt.IsNil)
return p
}
func (s *sitesBuilder) NpmInstall() hexec.Runner {
sc := security.DefaultConfig
sc.Exec.Allow = security.NewWhitelist("npm")
ex := hexec.New(sc)
command, err := ex.New("npm", "install")
s.Assert(err, qt.IsNil)
return command
}
func newTestHelper(cfg config.Provider, fs *hugofs.Fs, t testing.TB) testHelper {
return testHelper{
Cfg: cfg,
Fs: fs,
C: qt.New(t),
}
}
type testHelper struct {
Cfg config.Provider
Fs *hugofs.Fs
*qt.C
}
func (th testHelper) assertFileContent(filename string, matches ...string) {
th.Helper()
filename = th.replaceDefaultContentLanguageValue(filename)
content := readWorkingDir(th, th.Fs, filename)
for _, match := range matches {
match = th.replaceDefaultContentLanguageValue(match)
th.Assert(strings.Contains(content, match), qt.Equals, true, qt.Commentf(match+" not in: \n"+content))
}
}
func (th testHelper) assertFileContentRegexp(filename string, matches ...string) {
filename = th.replaceDefaultContentLanguageValue(filename)
content := readWorkingDir(th, th.Fs, filename)
for _, match := range matches {
match = th.replaceDefaultContentLanguageValue(match)
r := regexp.MustCompile(match)
matches := r.MatchString(content)
if !matches {
fmt.Println(match+":\n", content)
}
th.Assert(matches, qt.Equals, true)
}
}
func (th testHelper) assertFileNotExist(filename string) {
exists, err := helpers.Exists(filename, th.Fs.PublishDir)
th.Assert(err, qt.IsNil)
th.Assert(exists, qt.Equals, false)
}
func (th testHelper) replaceDefaultContentLanguageValue(value string) string {
defaultInSubDir := th.Cfg.GetBool("defaultContentLanguageInSubDir")
replace := th.Cfg.GetString("defaultContentLanguage") + "/"
if !defaultInSubDir {
value = strings.Replace(value, replace, "", 1)
}
return value
}
func loadTestConfig(fs afero.Fs, withConfig ...func(cfg config.Provider) error) (config.Provider, error) {
v, _, err := LoadConfig(ConfigSourceDescriptor{Fs: fs}, withConfig...)
return v, err
}
func newTestCfgBasic() (config.Provider, *hugofs.Fs) {
mm := afero.NewMemMapFs()
v := config.NewWithTestDefaults()
v.Set("defaultContentLanguageInSubdir", true)
fs := hugofs.NewFrom(hugofs.NewBaseFileDecorator(mm), v)
return v, fs
}
func newTestCfg(withConfig ...func(cfg config.Provider) error) (config.Provider, *hugofs.Fs) {
mm := afero.NewMemMapFs()
v, err := loadTestConfig(mm, func(cfg config.Provider) error {
// Default is false, but true is easier to use as default in tests
cfg.Set("defaultContentLanguageInSubdir", true)
for _, w := range withConfig {
w(cfg)
}
return nil
})
if err != nil && err != ErrNoConfigFile {
panic(err)
}
fs := hugofs.NewFrom(hugofs.NewBaseFileDecorator(mm), v)
return v, fs
}
func newTestSitesFromConfig(t testing.TB, afs afero.Fs, tomlConfig string, layoutPathContentPairs ...string) (testHelper, *HugoSites) {
if len(layoutPathContentPairs)%2 != 0 {
t.Fatalf("Layouts must be provided in pairs")
}
c := qt.New(t)
writeToFs(t, afs, filepath.Join("content", ".gitkeep"), "")
writeToFs(t, afs, "config.toml", tomlConfig)
cfg, err := LoadConfigDefault(afs)
c.Assert(err, qt.IsNil)
fs := hugofs.NewFrom(afs, cfg)
th := newTestHelper(cfg, fs, t)
for i := 0; i < len(layoutPathContentPairs); i += 2 {
writeSource(t, fs, layoutPathContentPairs[i], layoutPathContentPairs[i+1])
}
h, err := NewHugoSites(deps.DepsCfg{Fs: fs, Cfg: cfg})
c.Assert(err, qt.IsNil)
return th, h
}
func createWithTemplateFromNameValues(additionalTemplates ...string) func(templ tpl.TemplateManager) error {
return func(templ tpl.TemplateManager) error {
for i := 0; i < len(additionalTemplates); i += 2 {
err := templ.AddTemplate(additionalTemplates[i], additionalTemplates[i+1])
if err != nil {
return err
}
}
return nil
}
}
// TODO(bep) replace these with the builder
func buildSingleSite(t testing.TB, depsCfg deps.DepsCfg, buildCfg BuildCfg) *Site {
t.Helper()
return buildSingleSiteExpected(t, false, false, depsCfg, buildCfg)
}
func buildSingleSiteExpected(t testing.TB, expectSiteInitError, expectBuildError bool, depsCfg deps.DepsCfg, buildCfg BuildCfg) *Site {
t.Helper()
b := newTestSitesBuilderFromDepsCfg(t, depsCfg).WithNothingAdded()
err := b.CreateSitesE()
if expectSiteInitError {
b.Assert(err, qt.Not(qt.IsNil))
return nil
} else {
b.Assert(err, qt.IsNil)
}
h := b.H
b.Assert(len(h.Sites), qt.Equals, 1)
if expectBuildError {
b.Assert(h.Build(buildCfg), qt.Not(qt.IsNil))
return nil
}
b.Assert(h.Build(buildCfg), qt.IsNil)
return h.Sites[0]
}
func writeSourcesToSource(t *testing.T, base string, fs *hugofs.Fs, sources ...[2]string) {
for _, src := range sources {
writeSource(t, fs, filepath.Join(base, src[0]), src[1])
}
}
func getPage(in page.Page, ref string) page.Page {
p, err := in.GetPage(ref)
if err != nil {
panic(err)
}
return p
}
func content(c resource.ContentProvider) string {
cc, err := c.Content()
if err != nil {
panic(err)
}
ccs, err := cast.ToStringE(cc)
if err != nil {
panic(err)
}
return ccs
}
func pagesToString(pages ...page.Page) string {
var paths []string
for _, p := range pages {
paths = append(paths, p.Pathc())
}
sort.Strings(paths)
return strings.Join(paths, "|")
}
func dumpPagesLinks(pages ...page.Page) {
var links []string
for _, p := range pages {
links = append(links, p.RelPermalink())
}
sort.Strings(links)
for _, link := range links {
fmt.Println(link)
}
}
func dumpPages(pages ...page.Page) {
fmt.Println("---------")
for _, p := range pages {
fmt.Printf("Kind: %s Title: %-10s RelPermalink: %-10s Path: %-10s sections: %s Lang: %s\n",
p.Kind(), p.Title(), p.RelPermalink(), p.Pathc(), p.SectionsPath(), p.Lang())
}
}
func dumpSPages(pages ...*pageState) {
for i, p := range pages {
fmt.Printf("%d: Kind: %s Title: %-10s RelPermalink: %-10s Path: %-10s sections: %s\n",
i+1,
p.Kind(), p.Title(), p.RelPermalink(), p.Pathc(), p.SectionsPath())
}
}
func printStringIndexes(s string) {
lines := strings.Split(s, "\n")
i := 0
for _, line := range lines {
for _, r := range line {
fmt.Printf("%-3s", strconv.Itoa(i))
i += utf8.RuneLen(r)
}
i++
fmt.Println()
for _, r := range line {
fmt.Printf("%-3s", string(r))
}
fmt.Println()
}
}
// See https://github.com/golang/go/issues/19280
// Not in use.
var parallelEnabled = true
func parallel(t *testing.T) {
if parallelEnabled {
t.Parallel()
}
}
func skipSymlink(t *testing.T) {
if runtime.GOOS == "windows" && os.Getenv("CI") == "" {
t.Skip("skip symlink test on local Windows (needs admin)")
}
}
func captureStderr(f func() error) (string, error) {
old := os.Stderr
r, w, _ := os.Pipe()
os.Stderr = w
err := f()
w.Close()
os.Stderr = old
var buf bytes.Buffer
io.Copy(&buf, r)
return buf.String(), err
}
func captureStdout(f func() error) (string, error) {
old := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
err := f()
w.Close()
os.Stdout = old
var buf bytes.Buffer
io.Copy(&buf, r)
return buf.String(), err
}
|
[
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
client_local.go
|
package wishlist
import (
"context"
"fmt"
"log"
"os"
"os/user"
"path/filepath"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"golang.org/x/term"
)
// NewLocalSSHClient returns a SSH Client for local usage.
func NewLocalSSHClient() SSHClient {
return &localClient{}
}
type localClient struct{}
func (c *localClient) Connect(e *Endpoint) error {
user, err := user.Current()
if err != nil {
return fmt.Errorf("failed to get current username: %w", err)
}
methods, err := localBestAuthMethod(e)
if err != nil {
return fmt.Errorf("failed to setup a authentication method: %w", err)
}
conf := &ssh.ClientConfig{
User: firstNonEmpty(e.User, user.Username),
Auth: methods,
HostKeyCallback: hostKeyCallback(e, filepath.Join(user.HomeDir, ".ssh/known_hosts")),
}
session, client, cls, err := createSession(conf, e)
defer cls.close()
if err != nil {
return fmt.Errorf("failed to create session: %w", err)
}
session.Stdout = os.Stdout
session.Stderr = os.Stderr
session.Stdin = os.Stdin
if e.ForwardAgent {
log.Println("forwarding SSH agent")
agt, err := getLocalAgent()
if err != nil {
return err
}
if agt == nil {
return fmt.Errorf("requested ForwardAgent, but no agent is available")
}
if err := agent.RequestAgentForwarding(session); err != nil {
return fmt.Errorf("failed to forward agent: %w", err)
}
if err := agent.ForwardToAgent(client, agt); err != nil {
return fmt.Errorf("failed to forward agent: %w", err)
}
}
if e.RequestTTY || e.RemoteCommand == "" {
fd := int(os.Stdout.Fd())
if !term.IsTerminal(fd) {
return fmt.Errorf("requested a TTY, but current session is not TTY, aborting")
}
log.Println("requesting tty")
originalState, err := term.MakeRaw(fd)
if err != nil {
return fmt.Errorf("failed get terminal state: %w", err)
}
defer func() {
if err := term.Restore(fd, originalState); err != nil {
log.Println("couldn't restore terminal state:", err)
}
}()
w, h, err := term.GetSize(fd)
if err != nil {
return fmt.Errorf("failed to get term size: %w", err)
}
if err := session.RequestPty(os.Getenv("TERM"), h, w, nil); err != nil {
return fmt.Errorf("failed to request a pty: %w", err)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go c.notifyWindowChanges(ctx, session)
} else {
log.Println("did not request a tty")
}
if e.RemoteCommand == "" {
return shellAndWait(session)
}
return runAndWait(session, e.RemoteCommand)
}
|
[
"\"TERM\""
] |
[] |
[
"TERM"
] |
[]
|
["TERM"]
|
go
| 1 | 0 | |
tests/func/test_repro.py
|
import filecmp
import getpass
import os
import posixpath
import re
import shutil
import uuid
from pathlib import Path
from subprocess import PIPE
from subprocess import Popen
from urllib.parse import urljoin
from unittest import SkipTest
import boto3
import paramiko
import pytest
from flaky.flaky_decorator import flaky
from google.cloud import storage as gc
from mock import patch
from dvc.compat import fspath
from dvc.exceptions import CyclicGraphError
from dvc.exceptions import ReproductionError
from dvc.exceptions import StagePathAsOutputError
from dvc.main import main
from dvc.output.base import OutputBase
from dvc.path_info import URLInfo
from dvc.remote.local import RemoteLOCAL
from dvc.repo import Repo as DvcRepo
from dvc.stage import Stage
from dvc.stage import StageFileDoesNotExistError
from dvc.system import System
from dvc.utils import file_md5
from dvc.utils import relpath
from dvc.utils.stage import dump_stage_file
from dvc.utils.stage import load_stage_file
from tests.basic_env import TestDvc
from tests.remotes import (
GCP,
HDFS,
Local,
S3,
SSH,
SSHMocked,
TEST_AWS_REPO_BUCKET,
TEST_GCP_REPO_BUCKET,
)
from tests.utils.httpd import StaticFileServer, ContentMD5Handler
class TestRepro(TestDvc):
def setUp(self):
super().setUp()
stages = self.dvc.add(self.FOO)
self.assertEqual(len(stages), 1)
self.foo_stage = stages[0]
self.assertTrue(self.foo_stage is not None)
self.file1 = "file1"
self.file1_stage = self.file1 + ".dvc"
self.dvc.run(
fname=self.file1_stage,
outs=[self.file1],
deps=[self.FOO, self.CODE],
cmd="python {} {} {}".format(self.CODE, self.FOO, self.file1),
)
class TestReproFail(TestRepro):
def test(self):
os.unlink(self.CODE)
ret = main(["repro", self.file1_stage])
self.assertNotEqual(ret, 0)
class TestReproCyclicGraph(TestDvc):
def test(self):
self.dvc.run(
deps=[self.FOO], outs=["bar.txt"], cmd="echo bar > bar.txt"
)
self.dvc.run(
deps=["bar.txt"], outs=["baz.txt"], cmd="echo baz > baz.txt"
)
stage_dump = {
"cmd": "echo baz > foo",
"deps": [{"path": "baz.txt"}],
"outs": [{"path": self.FOO}],
}
dump_stage_file("cycle.dvc", stage_dump)
with self.assertRaises(CyclicGraphError):
self.dvc.reproduce("cycle.dvc")
class TestReproWorkingDirectoryAsOutput(TestDvc):
"""
| stage.cwd | out.path | cwd as output |
|:-----------:|:---------:|:-------------:|
| dir | dir | True |
| dir/subdir/ | dir | True |
| dir | dir-1 | False |
| . | something | False |
"""
def test(self):
# File structure:
# .
# |-- dir1
# | |__ dir2.dvc (out.path == ../dir2)
# |__ dir2
# |__ something.dvc (stage.cwd == ./dir2)
os.mkdir(os.path.join(self.dvc.root_dir, "dir1"))
self.dvc.run(
cwd="dir1",
outs=["../dir2"],
cmd="mkdir {path}".format(path=os.path.join("..", "dir2")),
)
faulty_stage_path = os.path.join("dir2", "something.dvc")
output = os.path.join("..", "something")
stage_dump = {
"cmd": "echo something > {}".format(output),
"outs": [{"path": output}],
}
dump_stage_file(faulty_stage_path, stage_dump)
with self.assertRaises(StagePathAsOutputError):
self.dvc.reproduce(faulty_stage_path)
def test_nested(self):
# .
# |-- a
# | |__ nested
# | |__ dir
# | |__ error.dvc (stage.cwd == 'a/nested/dir')
# |__ b
# |__ nested.dvc (stage.out == 'a/nested')
dir1 = "b"
dir2 = "a"
os.mkdir(dir1)
os.mkdir(dir2)
nested_dir = os.path.join(dir2, "nested")
out_dir = relpath(nested_dir, dir1)
nested_stage = self.dvc.run(
cwd=dir1, # b
outs=[out_dir], # ../a/nested
cmd="mkdir {path}".format(path=out_dir),
)
os.mkdir(os.path.join(nested_dir, "dir"))
error_stage_path = os.path.join(nested_dir, "dir", "error.dvc")
output = os.path.join("..", "..", "something")
stage_dump = {
"cmd": "echo something > {}".format(output),
"outs": [{"path": output}],
}
dump_stage_file(error_stage_path, stage_dump)
# NOTE: os.walk() walks in a sorted order and we need dir2 subdirs to
# be processed before dir1 to load error.dvc first.
self.dvc.stages = [
nested_stage,
Stage.load(self.dvc, error_stage_path),
]
with self.assertRaises(StagePathAsOutputError):
self.dvc.reproduce(error_stage_path)
def test_similar_paths(self):
# File structure:
#
# .
# |-- something.dvc (out.path == something)
# |-- something
# |__ something-1
# |-- a
# |__ a.dvc (stage.cwd == something-1)
self.dvc.run(outs=["something"], cmd="mkdir something")
os.mkdir("something-1")
stage = os.path.join("something-1", "a.dvc")
stage_dump = {"cmd": "echo a > a", "outs": [{"path": "a"}]}
dump_stage_file(stage, stage_dump)
try:
self.dvc.reproduce(stage)
except StagePathAsOutputError:
self.fail("should not raise StagePathAsOutputError")
class TestReproDepUnderDir(TestDvc):
def test(self):
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
self.dir_stage = stages[0]
self.assertTrue(self.dir_stage is not None)
self.file1 = "file1"
self.file1_stage = self.file1 + ".dvc"
self.dvc.run(
fname=self.file1_stage,
outs=[self.file1],
deps=[self.DATA, self.CODE],
cmd="python {} {} {}".format(self.CODE, self.DATA, self.file1),
)
self.assertTrue(filecmp.cmp(self.file1, self.DATA, shallow=False))
os.unlink(self.DATA)
shutil.copyfile(self.FOO, self.DATA)
stages = self.dvc.reproduce(self.file1_stage)
self.assertEqual(len(stages), 2)
self.assertTrue(filecmp.cmp(self.file1, self.FOO, shallow=False))
class TestReproDepDirWithOutputsUnderIt(TestDvc):
def test(self):
stages = self.dvc.add(self.DATA)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
stages = self.dvc.add(self.DATA_SUB)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
stage = self.dvc.run(fname="Dvcfile", deps=[self.DATA, self.DATA_SUB])
self.assertTrue(stage is not None)
file1 = "file1"
file1_stage = file1 + ".dvc"
stage = self.dvc.run(
fname=file1_stage,
deps=[self.DATA_DIR],
outs=[file1],
cmd="python {} {} {}".format(self.CODE, self.DATA, file1),
)
self.assertTrue(stage is not None)
os.unlink(self.DATA)
shutil.copyfile(self.FOO, self.DATA)
stages = self.dvc.reproduce(file1_stage)
self.assertEqual(len(stages), 2)
class TestReproNoDeps(TestRepro):
def test(self):
out = "out"
code_file = "out.py"
stage_file = "out.dvc"
code = (
'import uuid\nwith open("{}", "w+") as fd:\n'
"\tfd.write(str(uuid.uuid4()))\n".format(out)
)
with open(code_file, "w+") as fd:
fd.write(code)
self.dvc.run(
fname=stage_file, outs=[out], cmd="python {}".format(code_file)
)
stages = self.dvc.reproduce(stage_file)
self.assertEqual(len(stages), 1)
class TestReproForce(TestRepro):
def test(self):
stages = self.dvc.reproduce(self.file1_stage, force=True)
self.assertEqual(len(stages), 2)
class TestReproChangedCode(TestRepro):
def test(self):
self.swap_code()
stages = self.dvc.reproduce(self.file1_stage)
self.assertTrue(filecmp.cmp(self.file1, self.BAR, shallow=False))
self.assertEqual(len(stages), 1)
def swap_code(self):
os.unlink(self.CODE)
new_contents = self.CODE_CONTENTS
new_contents += "\nshutil.copyfile('{}', " "sys.argv[2])\n".format(
self.BAR
)
self.create(self.CODE, new_contents)
class TestReproChangedData(TestRepro):
def test(self):
self.swap_foo_with_bar()
stages = self.dvc.reproduce(self.file1_stage)
self.assertTrue(filecmp.cmp(self.file1, self.BAR, shallow=False))
self.assertEqual(len(stages), 2)
def swap_foo_with_bar(self):
os.unlink(self.FOO)
shutil.copyfile(self.BAR, self.FOO)
class TestReproDry(TestReproChangedData):
def test(self):
self.swap_foo_with_bar()
stages = self.dvc.reproduce(self.file1_stage, dry=True)
self.assertTrue(len(stages), 2)
self.assertFalse(filecmp.cmp(self.file1, self.BAR, shallow=False))
ret = main(["repro", "--dry", self.file1_stage])
self.assertEqual(ret, 0)
self.assertFalse(filecmp.cmp(self.file1, self.BAR, shallow=False))
class TestReproUpToDate(TestRepro):
def test(self):
ret = main(["repro", self.file1_stage])
self.assertEqual(ret, 0)
class TestReproDryNoExec(TestDvc):
def test(self):
deps = []
for d in range(3):
idir = "idir{}".format(d)
odir = "odir{}".format(d)
deps.append("-d")
deps.append(odir)
os.mkdir(idir)
f = os.path.join(idir, "file")
with open(f, "w+") as fobj:
fobj.write(str(d))
ret = main(
[
"run",
"--no-exec",
"-d",
idir,
"-o",
odir,
"python -c 'import shutil; "
'shutil.copytree("{}", "{}")\''.format(idir, odir),
]
)
self.assertEqual(ret, 0)
ret = main(["run", "--no-exec", "-f", "Dvcfile"] + deps)
self.assertEqual(ret, 0)
ret = main(["repro", "--dry"])
self.assertEqual(ret, 0)
class TestReproChangedDeepData(TestReproChangedData):
def setUp(self):
super().setUp()
self.file2 = "file2"
self.file2_stage = self.file2 + ".dvc"
self.dvc.run(
fname=self.file2_stage,
outs=[self.file2],
deps=[self.file1, self.CODE],
cmd="python {} {} {}".format(self.CODE, self.file1, self.file2),
)
def test(self):
self.swap_foo_with_bar()
stages = self.dvc.reproduce(self.file2_stage)
self.assertTrue(filecmp.cmp(self.file1, self.BAR, shallow=False))
self.assertTrue(filecmp.cmp(self.file2, self.BAR, shallow=False))
self.assertEqual(len(stages), 3)
class TestReproIgnoreBuildCache(TestDvc):
def test(self):
stages = self.dvc.add(self.FOO)
self.assertEqual(len(stages), 1)
foo_stage = stages[0]
self.assertTrue(foo_stage is not None)
code1 = "code1.py"
shutil.copyfile(self.CODE, code1)
file1 = "file1"
file1_stage = self.dvc.run(
outs=[file1],
deps=[self.FOO, code1],
cmd="python {} {} {}".format(code1, self.FOO, file1),
)
self.assertTrue(file1_stage is not None)
code2 = "code2.py"
shutil.copyfile(self.CODE, code2)
file2 = "file2"
file2_stage = self.dvc.run(
outs=[file2],
deps=[file1, code2],
cmd="python {} {} {}".format(code2, file1, file2),
)
self.assertTrue(file2_stage is not None)
code3 = "code3.py"
shutil.copyfile(self.CODE, code3)
file3 = "file3"
file3_stage = self.dvc.run(
outs=[file3],
deps=[file2, code3],
cmd="python {} {} {}".format(code3, file2, file3),
)
self.assertTrue(file3_stage is not None)
with open(code2, "a") as fobj:
fobj.write("\n\n")
stages = self.dvc.reproduce(file3_stage.path, ignore_build_cache=True)
self.assertEqual(len(stages), 2)
self.assertEqual(stages[0].path, file2_stage.path)
self.assertEqual(stages[1].path, file3_stage.path)
class TestReproPipeline(TestReproChangedDeepData):
def test(self):
stages = self.dvc.reproduce(
self.file1_stage, force=True, pipeline=True
)
self.assertEqual(len(stages), 3)
def test_cli(self):
ret = main(["repro", "--pipeline", "-f", self.file1_stage])
self.assertEqual(ret, 0)
class TestReproPipelines(TestDvc):
def setUp(self):
super().setUp()
stages = self.dvc.add(self.FOO)
self.assertEqual(len(stages), 1)
self.foo_stage = stages[0]
self.assertTrue(self.foo_stage is not None)
stages = self.dvc.add(self.BAR)
self.assertEqual(len(stages), 1)
self.bar_stage = stages[0]
self.assertTrue(self.bar_stage is not None)
self.file1 = "file1"
self.file1_stage = self.file1 + ".dvc"
self.dvc.run(
fname=self.file1_stage,
outs=[self.file1],
deps=[self.FOO, self.CODE],
cmd="python {} {} {}".format(self.CODE, self.FOO, self.file1),
)
self.file2 = "file2"
self.file2_stage = self.file2 + ".dvc"
self.dvc.run(
fname=self.file2_stage,
outs=[self.file2],
deps=[self.BAR, self.CODE],
cmd="python {} {} {}".format(self.CODE, self.BAR, self.file2),
)
def test(self):
stages = self.dvc.reproduce(all_pipelines=True, force=True)
self.assertEqual(len(stages), 4)
names = [stage.relpath for stage in stages]
self.assertTrue(self.foo_stage.relpath in names)
self.assertTrue(self.bar_stage.relpath in names)
self.assertTrue(self.file1_stage in names)
self.assertTrue(self.file2_stage in names)
def test_cli(self):
ret = main(["repro", "-f", "-P"])
self.assertEqual(ret, 0)
class TestReproLocked(TestReproChangedData):
def test(self):
file2 = "file2"
file2_stage = file2 + ".dvc"
self.dvc.run(
fname=file2_stage,
outs=[file2],
deps=[self.file1, self.CODE],
cmd="python {} {} {}".format(self.CODE, self.file1, file2),
)
self.swap_foo_with_bar()
ret = main(["lock", file2_stage])
self.assertEqual(ret, 0)
stages = self.dvc.reproduce(file2_stage)
self.assertEqual(len(stages), 0)
ret = main(["unlock", file2_stage])
self.assertEqual(ret, 0)
stages = self.dvc.reproduce(file2_stage)
self.assertTrue(filecmp.cmp(self.file1, self.BAR, shallow=False))
self.assertTrue(filecmp.cmp(file2, self.BAR, shallow=False))
self.assertEqual(len(stages), 3)
def test_non_existing(self):
with self.assertRaises(StageFileDoesNotExistError):
self.dvc.lock_stage("non-existing-stage")
ret = main(["lock", "non-existing-stage"])
self.assertNotEqual(ret, 0)
class TestReproLockedCallback(TestDvc):
def test(self):
file1 = "file1"
file1_stage = file1 + ".dvc"
# NOTE: purposefully not specifying dependencies
# to create a callback stage.
stage = self.dvc.run(
fname=file1_stage,
outs=[file1],
cmd="python {} {} {}".format(self.CODE, self.FOO, file1),
)
self.assertTrue(stage is not None)
self.assertEqual(stage.relpath, file1_stage)
stages = self.dvc.reproduce(file1_stage)
self.assertEqual(len(stages), 1)
self.dvc.lock_stage(file1_stage)
stages = self.dvc.reproduce(file1_stage)
self.assertEqual(len(stages), 0)
self.dvc.lock_stage(file1_stage, unlock=True)
stages = self.dvc.reproduce(file1_stage)
self.assertEqual(len(stages), 1)
class TestReproLockedUnchanged(TestRepro):
def test(self):
"""
Check that locking/unlocking doesn't affect stage state
"""
self.dvc.lock_stage(self.file1_stage)
stages = self.dvc.reproduce(self.file1_stage)
self.assertEqual(len(stages), 0)
self.dvc.lock_stage(self.file1_stage, unlock=True)
stages = self.dvc.reproduce(self.file1_stage)
self.assertEqual(len(stages), 0)
class TestReproMetricsAddUnchanged(TestDvc):
def test(self):
"""
Check that adding/removing metrics doesn't affect stage state
"""
stages = self.dvc.add(self.FOO)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
file1 = "file1"
file1_stage = file1 + ".dvc"
self.dvc.run(
fname=file1_stage,
outs_no_cache=[file1],
deps=[self.FOO, self.CODE],
cmd="python {} {} {}".format(self.CODE, self.FOO, file1),
)
stages = self.dvc.reproduce(file1_stage)
self.assertEqual(len(stages), 0)
self.dvc.metrics.add(file1)
stages = self.dvc.reproduce(file1_stage)
self.assertEqual(len(stages), 0)
self.dvc.metrics.remove(file1)
stages = self.dvc.reproduce(file1_stage)
self.assertEqual(len(stages), 0)
class TestReproPhony(TestReproChangedData):
def test(self):
stage = self.dvc.run(deps=[self.file1])
self.swap_foo_with_bar()
self.dvc.reproduce(stage.path)
self.assertTrue(filecmp.cmp(self.file1, self.BAR, shallow=False))
class TestNonExistingOutput(TestRepro):
def test(self):
os.unlink(self.FOO)
with self.assertRaises(ReproductionError):
self.dvc.reproduce(self.file1_stage)
class TestReproDataSource(TestReproChangedData):
def test(self):
self.swap_foo_with_bar()
stages = self.dvc.reproduce(self.foo_stage.path)
self.assertTrue(filecmp.cmp(self.FOO, self.BAR, shallow=False))
self.assertEqual(stages[0].outs[0].checksum, file_md5(self.BAR)[0])
class TestReproChangedDir(TestDvc):
def test(self):
file_name = "file"
shutil.copyfile(self.FOO, file_name)
stage_name = "dir.dvc"
dir_name = "dir"
dir_code = "dir.py"
code = (
'import os; import shutil; os.mkdir("{}"); '
'shutil.copyfile("{}", os.path.join("{}", "{}"))'
)
with open(dir_code, "w+") as fd:
fd.write(code.format(dir_name, file_name, dir_name, file_name))
self.dvc.run(
fname=stage_name,
outs=[dir_name],
deps=[file_name, dir_code],
cmd="python {}".format(dir_code),
)
stages = self.dvc.reproduce(stage_name)
self.assertEqual(len(stages), 0)
os.unlink(file_name)
shutil.copyfile(self.BAR, file_name)
stages = self.dvc.reproduce(stage_name)
self.assertEqual(len(stages), 1)
class TestReproChangedDirData(TestDvc):
def test(self):
dir_name = "dir"
dir_code = "dir_code.py"
with open(dir_code, "w+") as fd:
fd.write(
"import os; import sys; import shutil; "
"shutil.copytree(sys.argv[1], sys.argv[2])"
)
stage = self.dvc.run(
outs=[dir_name],
deps=[self.DATA_DIR, dir_code],
cmd="python {} {} {}".format(dir_code, self.DATA_DIR, dir_name),
)
self.assertTrue(stage is not None)
stages = self.dvc.reproduce(stage.path)
self.assertEqual(len(stages), 0)
with open(self.DATA_SUB, "a") as fd:
fd.write("add")
stages = self.dvc.reproduce(stage.path)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
# Check that dvc indeed registers changed output dir
shutil.move(self.BAR, dir_name)
stages = self.dvc.reproduce(stage.path)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
# Check that dvc registers mtime change for the directory.
System.hardlink(self.DATA_SUB, self.DATA_SUB + ".lnk")
stages = self.dvc.reproduce(stage.path)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
class TestReproMissingMd5InStageFile(TestRepro):
def test(self):
d = load_stage_file(self.file1_stage)
del d[Stage.PARAM_OUTS][0][RemoteLOCAL.PARAM_CHECKSUM]
del d[Stage.PARAM_DEPS][0][RemoteLOCAL.PARAM_CHECKSUM]
dump_stage_file(self.file1_stage, d)
stages = self.dvc.reproduce(self.file1_stage)
self.assertEqual(len(stages), 1)
class TestCmdRepro(TestReproChangedData):
def test(self):
self.swap_foo_with_bar()
ret = main(["status"])
self.assertEqual(ret, 0)
ret = main(["repro", self.file1_stage])
self.assertEqual(ret, 0)
ret = main(["repro", "non-existing-file"])
self.assertNotEqual(ret, 0)
class TestCmdReproChdirCwdBackwardCompatible(TestDvc):
def test(self):
dname = "dir"
os.mkdir(dname)
foo = os.path.join(dname, self.FOO)
bar = os.path.join(dname, self.BAR)
code = os.path.join(dname, self.CODE)
shutil.copyfile(self.FOO, foo)
shutil.copyfile(self.CODE, code)
ret = main(
[
"run",
"-f",
"Dvcfile",
"-c",
dname,
"-d",
self.FOO,
"-o",
self.BAR,
"python {} {} {}".format(self.CODE, self.FOO, self.BAR),
]
)
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile(foo))
self.assertTrue(os.path.isfile(bar))
self.assertTrue(filecmp.cmp(foo, bar, shallow=False))
os.unlink(bar)
ret = main(["repro", "-c", dname])
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile(foo))
self.assertTrue(os.path.isfile(bar))
self.assertTrue(filecmp.cmp(foo, bar, shallow=False))
class TestCmdReproChdir(TestDvc):
def test(self):
dname = "dir"
os.mkdir(dname)
foo = os.path.join(dname, self.FOO)
bar = os.path.join(dname, self.BAR)
code = os.path.join(dname, self.CODE)
shutil.copyfile(self.FOO, foo)
shutil.copyfile(self.CODE, code)
ret = main(
[
"run",
"-f",
"{}/Dvcfile".format(dname),
"-w",
"{}".format(dname),
"-d",
self.FOO,
"-o",
self.BAR,
"python {} {} {}".format(self.CODE, self.FOO, self.BAR),
]
)
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile(foo))
self.assertTrue(os.path.isfile(bar))
self.assertTrue(filecmp.cmp(foo, bar, shallow=False))
os.unlink(bar)
ret = main(["repro", "-c", dname])
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile(foo))
self.assertTrue(os.path.isfile(bar))
self.assertTrue(filecmp.cmp(foo, bar, shallow=False))
class TestReproExternalBase(TestDvc):
@staticmethod
def should_test():
return False
@property
def cache_scheme(self):
return self.scheme
@property
def cache_type(self):
return "copy"
@property
def scheme(self):
return None
@property
def scheme_sep(self):
return "://"
@property
def sep(self):
return "/"
def check_already_cached(self, stage):
stage.outs[0].remove()
patch_download = patch.object(
stage.deps[0], "download", wraps=stage.deps[0].download
)
patch_checkout = patch.object(
stage.outs[0], "checkout", wraps=stage.outs[0].checkout
)
patch_run = patch.object(stage, "_run", wraps=stage._run)
with self.dvc.lock, self.dvc.state:
with patch_download as mock_download:
with patch_checkout as mock_checkout:
with patch_run as mock_run:
stage.locked = False
stage.run()
stage.locked = True
mock_run.assert_not_called()
mock_download.assert_not_called()
mock_checkout.assert_called_once()
@patch("dvc.prompt.confirm", return_value=True)
def test(self, mock_prompt):
if not self.should_test():
raise SkipTest(
"Test {} is disabled".format(self.__class__.__name__)
)
cache = (
self.scheme
+ self.scheme_sep
+ self.bucket
+ self.sep
+ str(uuid.uuid4())
)
ret = main(["config", "cache." + self.cache_scheme, "myrepo"])
self.assertEqual(ret, 0)
ret = main(["remote", "add", "myrepo", cache])
self.assertEqual(ret, 0)
ret = main(["remote", "modify", "myrepo", "type", self.cache_type])
self.assertEqual(ret, 0)
remote_name = "myremote"
remote_key = str(uuid.uuid4())
remote = (
self.scheme + self.scheme_sep + self.bucket + self.sep + remote_key
)
ret = main(["remote", "add", remote_name, remote])
self.assertEqual(ret, 0)
ret = main(["remote", "modify", remote_name, "type", self.cache_type])
self.assertEqual(ret, 0)
self.dvc = DvcRepo(".")
foo_key = remote_key + self.sep + self.FOO
bar_key = remote_key + self.sep + self.BAR
foo_path = (
self.scheme + self.scheme_sep + self.bucket + self.sep + foo_key
)
bar_path = (
self.scheme + self.scheme_sep + self.bucket + self.sep + bar_key
)
# Using both plain and remote notation
out_foo_path = "remote://" + remote_name + "/" + self.FOO
out_bar_path = bar_path
self.write(self.bucket, foo_key, self.FOO_CONTENTS)
import_stage = self.dvc.imp_url(out_foo_path, "import")
self.assertTrue(os.path.exists("import"))
self.assertTrue(filecmp.cmp("import", self.FOO, shallow=False))
self.assertEqual(self.dvc.status([import_stage.path]), {})
self.check_already_cached(import_stage)
import_remote_stage = self.dvc.imp_url(
out_foo_path, out_foo_path + "_imported"
)
self.assertEqual(self.dvc.status([import_remote_stage.path]), {})
cmd_stage = self.dvc.run(
outs=[out_bar_path],
deps=[out_foo_path],
cmd=self.cmd(foo_path, bar_path),
)
self.assertEqual(self.dvc.status([cmd_stage.path]), {})
self.assertEqual(self.dvc.status(), {})
self.check_already_cached(cmd_stage)
self.write(self.bucket, foo_key, self.BAR_CONTENTS)
self.assertNotEqual(self.dvc.status(), {})
self.dvc.update(import_stage.path)
self.assertTrue(os.path.exists("import"))
self.assertTrue(filecmp.cmp("import", self.BAR, shallow=False))
self.assertEqual(self.dvc.status([import_stage.path]), {})
self.dvc.update(import_remote_stage.path)
self.assertEqual(self.dvc.status([import_remote_stage.path]), {})
stages = self.dvc.reproduce(cmd_stage.path)
self.assertEqual(len(stages), 1)
self.assertEqual(self.dvc.status([cmd_stage.path]), {})
self.assertEqual(self.dvc.status(), {})
self.dvc.gc()
self.assertEqual(self.dvc.status(), {})
self.dvc.remove(cmd_stage.path, outs_only=True)
self.assertNotEqual(self.dvc.status([cmd_stage.path]), {})
self.dvc.checkout([cmd_stage.path], force=True)
self.assertEqual(self.dvc.status([cmd_stage.path]), {})
@pytest.mark.skipif(os.name == "nt", reason="temporarily disabled on windows")
class TestReproExternalS3(S3, TestReproExternalBase):
@property
def scheme(self):
return "s3"
@property
def bucket(self):
return TEST_AWS_REPO_BUCKET
def cmd(self, i, o):
return "aws s3 cp {} {}".format(i, o)
def write(self, bucket, key, body):
s3 = boto3.client("s3")
s3.put_object(Bucket=bucket, Key=key, Body=body)
class TestReproExternalGS(GCP, TestReproExternalBase):
@property
def scheme(self):
return "gs"
@property
def bucket(self):
return TEST_GCP_REPO_BUCKET
def cmd(self, i, o):
return "gsutil cp {} {}".format(i, o)
def write(self, bucket, key, body):
client = gc.Client()
bucket = client.bucket(bucket)
bucket.blob(key).upload_from_string(body)
class TestReproExternalHDFS(HDFS, TestReproExternalBase):
@property
def scheme(self):
return "hdfs"
@property
def bucket(self):
return "{}@127.0.0.1".format(getpass.getuser())
def cmd(self, i, o):
return "hadoop fs -cp {} {}".format(i, o)
def write(self, bucket, key, body):
url = self.scheme + "://" + bucket + "/" + key
p = Popen(
"hadoop fs -rm -f {}".format(url),
shell=True,
executable=os.getenv("SHELL"),
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
p.communicate()
p = Popen(
"hadoop fs -mkdir -p {}".format(posixpath.dirname(url)),
shell=True,
executable=os.getenv("SHELL"),
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
out, err = p.communicate()
if p.returncode != 0:
print(out)
print(err)
self.assertEqual(p.returncode, 0)
with open("tmp", "w+") as fd:
fd.write(body)
p = Popen(
"hadoop fs -copyFromLocal {} {}".format("tmp", url),
shell=True,
executable=os.getenv("SHELL"),
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
out, err = p.communicate()
if p.returncode != 0:
print(out)
print(err)
self.assertEqual(p.returncode, 0)
@flaky(max_runs=3, min_passes=1)
class TestReproExternalSSH(SSH, TestReproExternalBase):
_dir = None
@property
def scheme(self):
return "ssh"
@property
def bucket(self):
if not self._dir:
self._dir = self.mkdtemp()
return "{}@127.0.0.1:{}".format(getpass.getuser(), self._dir)
def cmd(self, i, o):
prefix = "ssh://"
assert i.startswith(prefix) and o.startswith(prefix)
i = i[len(prefix) :]
o = o[len(prefix) :]
return "scp {} {}".format(i, o)
def write(self, bucket, key, body):
path = posixpath.join(self._dir, key)
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect("127.0.0.1")
sftp = ssh.open_sftp()
try:
sftp.stat(path)
sftp.remove(path)
except IOError:
pass
stdin, stdout, stderr = ssh.exec_command(
"mkdir -p $(dirname {})".format(path)
)
self.assertEqual(stdout.channel.recv_exit_status(), 0)
with sftp.open(path, "w+") as fobj:
fobj.write(body)
class TestReproExternalLOCAL(Local, TestReproExternalBase):
def setUp(self):
super().setUp()
self.tmpdir = self.mkdtemp()
ret = main(["config", "cache.type", "hardlink"])
self.assertEqual(ret, 0)
self.dvc = DvcRepo(".")
@property
def cache_type(self):
return "hardlink"
@property
def cache_scheme(self):
return "local"
@property
def scheme(self):
return ""
@property
def scheme_sep(self):
return ""
@property
def sep(self):
return os.sep
@property
def bucket(self):
return self.tmpdir
def cmd(self, i, o):
if os.name == "nt":
return "copy {} {}".format(i, o)
return "cp {} {}".format(i, o)
def write(self, bucket, key, body):
path = os.path.join(bucket, key)
dname = os.path.dirname(path)
if not os.path.exists(dname):
os.makedirs(dname)
with open(path, "w+") as fd:
fd.write(body)
class TestReproExternalHTTP(TestReproExternalBase):
_external_cache_id = None
@staticmethod
def get_remote(port):
return "http://localhost:{}/".format(port)
@property
def local_cache(self):
return os.path.join(self.dvc.dvc_dir, "cache")
def test(self):
# Import
with StaticFileServer() as httpd:
import_url = urljoin(self.get_remote(httpd.server_port), self.FOO)
import_output = "imported_file"
import_stage = self.dvc.imp_url(import_url, import_output)
self.assertTrue(os.path.exists(import_output))
self.assertTrue(filecmp.cmp(import_output, self.FOO, shallow=False))
self.dvc.remove("imported_file.dvc")
with StaticFileServer(handler_class=ContentMD5Handler) as httpd:
import_url = urljoin(self.get_remote(httpd.server_port), self.FOO)
import_output = "imported_file"
import_stage = self.dvc.imp_url(import_url, import_output)
self.assertTrue(os.path.exists(import_output))
self.assertTrue(filecmp.cmp(import_output, self.FOO, shallow=False))
# Run --deps
with StaticFileServer() as httpd:
remote = self.get_remote(httpd.server_port)
cache_id = str(uuid.uuid4())
cache = urljoin(remote, cache_id)
ret1 = main(["remote", "add", "mycache", cache])
ret2 = main(["remote", "add", "myremote", remote])
self.assertEqual(ret1, 0)
self.assertEqual(ret2, 0)
self.dvc = DvcRepo(".")
run_dependency = urljoin(remote, self.BAR)
run_output = "remote_file"
cmd = 'open("{}", "w+")'.format(run_output)
with open("create-output.py", "w") as fd:
fd.write(cmd)
run_stage = self.dvc.run(
deps=[run_dependency],
outs=[run_output],
cmd="python create-output.py",
)
self.assertTrue(run_stage is not None)
self.assertTrue(os.path.exists(run_output))
# Pull
self.dvc.remove(import_stage.path, outs_only=True)
self.assertFalse(os.path.exists(import_output))
shutil.move(self.local_cache, cache_id)
self.assertFalse(os.path.exists(self.local_cache))
self.dvc.pull([import_stage.path], remote="mycache")
self.assertTrue(os.path.exists(import_output))
class TestReproShell(TestDvc):
def test(self):
if os.name == "nt":
return
fname = "shell.txt"
stage = fname + ".dvc"
self.dvc.run(
fname=stage, outs=[fname], cmd="echo $SHELL > {}".format(fname)
)
with open(fname, "r") as fd:
self.assertEqual(os.getenv("SHELL"), fd.read().strip())
os.unlink(fname)
self.dvc.reproduce(stage)
with open(fname, "r") as fd:
self.assertEqual(os.getenv("SHELL"), fd.read().strip())
class TestReproAllPipelines(TestDvc):
def test(self):
self.dvc.run(
fname="start.dvc", outs=["start.txt"], cmd="echo start > start.txt"
)
self.dvc.run(
fname="middle.dvc",
deps=["start.txt"],
outs=["middle.txt"],
cmd="echo middle > middle.txt",
)
self.dvc.run(
fname="final.dvc",
deps=["middle.txt"],
outs=["final.txt"],
cmd="echo final > final.txt",
)
self.dvc.run(
fname="disconnected.dvc",
outs=["disconnected.txt"],
cmd="echo other > disconnected.txt",
)
with patch.object(Stage, "reproduce") as mock_reproduce:
ret = main(["repro", "--all-pipelines"])
self.assertEqual(ret, 0)
self.assertEqual(mock_reproduce.call_count, 4)
class TestReproNoCommit(TestRepro):
def test(self):
shutil.rmtree(self.dvc.cache.local.cache_dir)
ret = main(["repro", self.file1_stage, "--no-commit"])
self.assertEqual(ret, 0)
self.assertFalse(os.path.exists(self.dvc.cache.local.cache_dir))
class TestReproAlreadyCached(TestRepro):
def test(self):
run_out = self.dvc.run(
fname="datetime.dvc",
deps=[],
outs=["datetime.txt"],
cmd='python -c "import time; print(time.time())" > datetime.txt',
).outs[0]
repro_out = self.dvc.reproduce(target="datetime.dvc")[0].outs[0]
self.assertNotEqual(run_out.checksum, repro_out.checksum)
def test_force_with_dependencies(self):
run_out = self.dvc.run(
fname="datetime.dvc",
deps=[self.FOO],
outs=["datetime.txt"],
cmd='python -c "import time; print(time.time())" > datetime.txt',
).outs[0]
ret = main(["repro", "--force", "datetime.dvc"])
self.assertEqual(ret, 0)
repro_out = Stage.load(self.dvc, "datetime.dvc").outs[0]
self.assertNotEqual(run_out.checksum, repro_out.checksum)
def test_force_import(self):
ret = main(["import-url", self.FOO, self.BAR])
self.assertEqual(ret, 0)
patch_download = patch.object(
RemoteLOCAL,
"download",
side_effect=RemoteLOCAL.download,
autospec=True,
)
patch_checkout = patch.object(
OutputBase,
"checkout",
side_effect=OutputBase.checkout,
autospec=True,
)
with patch_download as mock_download:
with patch_checkout as mock_checkout:
assert main(["unlock", "bar.dvc"]) == 0
ret = main(["repro", "--force", "bar.dvc"])
self.assertEqual(ret, 0)
self.assertEqual(mock_download.call_count, 1)
self.assertEqual(mock_checkout.call_count, 0)
class TestShouldDisplayMetricsOnReproWithMetricsOption(TestDvc):
def test(self):
metrics_file = "metrics_file"
metrics_value = 0.123489015
ret = main(
[
"run",
"-m",
metrics_file,
"echo {} >> {}".format(metrics_value, metrics_file),
]
)
self.assertEqual(0, ret)
self._caplog.clear()
ret = main(
[
"repro",
"--force",
"--metrics",
metrics_file + Stage.STAGE_FILE_SUFFIX,
]
)
self.assertEqual(0, ret)
expected_metrics_display = "{}: {}".format(metrics_file, metrics_value)
self.assertIn(expected_metrics_display, self._caplog.text)
@pytest.fixture
def repro_dir(tmp_dir, dvc, run_copy):
# Creates repo with following structure:
# data_dir/dir_file origin_data
# | | |
# | | origin_copy.dvc
# unrelated2.dvc | | |
# | | unrelated1.dvc
# dir/subdir/dir_file_copy.dvc |
# | |
# | dir/origin_copy_2.dvc
# | |
# \ /
# \ /
# dir/Dvcfile
tmp_dir.gen(
{
"origin_data": "origin data content",
"data_dir": {"dir_file": "dir file content"},
"dir": {"subdir": {}},
}
)
stages = {}
origin_copy = tmp_dir / "origin_copy"
stage = run_copy("origin_data", fspath(origin_copy))
assert stage is not None
assert origin_copy.read_text() == "origin data content"
stages["origin_copy"] = stage
origin_copy_2 = tmp_dir / "dir" / "origin_copy_2"
stage = run_copy(
fspath(origin_copy),
fspath(origin_copy_2),
fname=fspath(origin_copy_2) + ".dvc",
)
assert stage is not None
assert origin_copy_2.read_text() == "origin data content"
stages["origin_copy_2"] = stage
dir_file_path = tmp_dir / "data_dir" / "dir_file"
dir_file_copy = tmp_dir / "dir" / "subdir" / "dir_file_copy"
stage = run_copy(
fspath(dir_file_path),
fspath(dir_file_copy),
fname=fspath(dir_file_copy) + ".dvc",
)
assert stage is not None
assert dir_file_copy.read_text() == "dir file content"
stages["dir_file_copy"] = stage
last_stage = tmp_dir / "dir" / "Dvcfile"
stage = dvc.run(
fname=fspath(last_stage),
deps=[fspath(origin_copy_2), fspath(dir_file_copy)],
)
assert stage is not None
stages["last_stage"] = stage
# Unrelated are to verify that reproducing `dir` will not trigger them too
assert run_copy(fspath(origin_copy), "unrelated1") is not None
assert run_copy(fspath(dir_file_path), "unrelated2") is not None
yield stages
def _rewrite_file(path_elements, new_content):
if isinstance(path_elements, str):
path_elements = [path_elements]
file = Path(os.sep.join(path_elements))
file.unlink()
file.write_text(new_content)
def _read_out(stage):
return Path(stage.outs[0].fspath).read_text()
def test_recursive_repro_default(dvc, repro_dir):
"""
Test recursive repro on dir after a dep outside this dir has changed.
"""
_rewrite_file("origin_data", "new origin data content")
stages = dvc.reproduce("dir", recursive=True)
# Check that the dependency ("origin_copy") and the dependent stages
# inside the folder have been reproduced ("origin_copy_2", "last_stage")
assert stages == [
repro_dir["origin_copy"],
repro_dir["origin_copy_2"],
repro_dir["last_stage"],
]
assert _read_out(repro_dir["origin_copy"]) == "new origin data content"
assert _read_out(repro_dir["origin_copy_2"]) == "new origin data content"
def test_recursive_repro_single(dvc, repro_dir):
"""
Test recursive single-item repro on dir
after a dep outside this dir has changed.
"""
_rewrite_file("origin_data", "new origin content")
_rewrite_file(["data_dir", "dir_file"], "new dir file content")
stages = dvc.reproduce("dir", recursive=True, single_item=True)
# Check that just stages inside given dir
# with changed direct deps have been reproduced.
# This means that "origin_copy_2" stage should not be reproduced
# since it depends on "origin_copy".
# Also check that "dir_file_copy" stage was reproduced before "last_stage"
assert stages == [repro_dir["dir_file_copy"], repro_dir["last_stage"]]
assert _read_out(repro_dir["dir_file_copy"]) == "new dir file content"
def test_recursive_repro_single_force(dvc, repro_dir):
"""
Test recursive single-item force repro on dir
without any dependencies changing.
"""
stages = dvc.reproduce("dir", recursive=True, single_item=True, force=True)
# Check that all stages inside given dir have been reproduced
# Also check that "dir_file_copy" stage was reproduced before "last_stage"
# and that "origin_copy" stage was reproduced before "last_stage" stage
assert len(stages) == 3
assert set(stages) == {
repro_dir["origin_copy_2"],
repro_dir["dir_file_copy"],
repro_dir["last_stage"],
}
assert stages.index(repro_dir["origin_copy_2"]) < stages.index(
repro_dir["last_stage"]
)
assert stages.index(repro_dir["dir_file_copy"]) < stages.index(
repro_dir["last_stage"]
)
def test_recursive_repro_empty_dir(tmp_dir, dvc):
"""
Test recursive repro on an empty directory
"""
(tmp_dir / "emptydir").mkdir()
stages = dvc.reproduce("emptydir", recursive=True, force=True)
assert stages == []
def test_recursive_repro_recursive_missing_file(dvc):
"""
Test recursive repro on a missing file
"""
with pytest.raises(StageFileDoesNotExistError):
dvc.reproduce("notExistingStage.dvc", recursive=True)
with pytest.raises(StageFileDoesNotExistError):
dvc.reproduce("notExistingDir/", recursive=True)
def test_recursive_repro_on_stage_file(dvc, repro_dir):
"""
Test recursive repro on a stage file instead of directory
"""
stages = dvc.reproduce(
repro_dir["origin_copy_2"].relpath, recursive=True, force=True
)
assert stages == [repro_dir["origin_copy"], repro_dir["origin_copy_2"]]
def test_dvc_formatting_retained(tmp_dir, dvc, run_copy):
tmp_dir.dvc_gen("foo", "foo content")
stage = run_copy("foo", "foo_copy", fname="foo_copy.dvc")
stage_path = tmp_dir / stage.relpath
# Add comments and custom formatting to DVC-file
lines = list(map(_format_dvc_line, stage_path.read_text().splitlines()))
lines.insert(0, "# Starting comment")
stage_text = "".join(l + "\n" for l in lines)
stage_path.write_text(stage_text)
# Rewrite data source and repro
(tmp_dir / "foo").write_text("new foo")
dvc.reproduce("foo_copy.dvc", force=True)
assert _hide_md5(stage_text) == _hide_md5(stage_path.read_text())
def _format_dvc_line(line):
# Add line comment for all cache and md5 keys
if "cache:" in line or "md5:" in line:
return line + " # line comment"
# Format command as one word per line
elif line.startswith("cmd: "):
pre, command = line.split(None, 1)
return pre + " >\n" + "\n".join(" " + s for s in command.split())
else:
return line
def _hide_md5(text):
return re.sub(r"\b[a-f0-9]{32}\b", "<md5>", text)
class TestReproDownstream(TestDvc):
def test(self):
# The dependency graph should look like this:
#
# E
# / \
# D F
# / \ \
# B C G
# \ /
# A
#
assert main(["run", "-o", "A", "echo A>A"]) == 0
assert main(["run", "-d", "A", "-o", "B", "echo B>B"]) == 0
assert main(["run", "-d", "A", "-o", "C", "echo C>C"]) == 0
assert main(["run", "-d", "B", "-d", "C", "-o", "D", "echo D>D"]) == 0
assert main(["run", "-o", "G", "echo G>G"]) == 0
assert main(["run", "-d", "G", "-o", "F", "echo F>F"]) == 0
assert main(["run", "-d", "D", "-d", "F", "-o", "E", "echo E>E"]) == 0
# We want the evaluation to move from B to E
#
# E
# /
# D
# /
# B
#
evaluation = self.dvc.reproduce("B.dvc", downstream=True, force=True)
assert len(evaluation) == 3
assert evaluation[0].relpath == "B.dvc"
assert evaluation[1].relpath == "D.dvc"
assert evaluation[2].relpath == "E.dvc"
@pytest.mark.skipif(
os.name == "nt",
reason="external output scenario is not supported on Windows",
)
def test_ssh_dir_out(tmp_dir, dvc, ssh_server):
tmp_dir.gen({"foo": "foo content"})
# Set up remote and cache
user = ssh_server.test_creds["username"]
port = ssh_server.port
keyfile = ssh_server.test_creds["key_filename"]
remote_url = SSHMocked.get_url(user, port)
assert main(["remote", "add", "upstream", remote_url]) == 0
assert main(["remote", "modify", "upstream", "keyfile", keyfile]) == 0
cache_url = SSHMocked.get_url(user, port)
assert main(["remote", "add", "sshcache", cache_url]) == 0
assert main(["config", "cache.ssh", "sshcache"]) == 0
assert main(["remote", "modify", "sshcache", "keyfile", keyfile]) == 0
# Recreating to reread configs
repo = DvcRepo(dvc.root_dir)
# To avoid "WARNING: UNPROTECTED PRIVATE KEY FILE" from ssh
os.chmod(keyfile, 0o600)
(tmp_dir / "script.py").write_text(
"import sys, pathlib\n"
"path = pathlib.Path(sys.argv[1])\n"
"dir_out = path / 'dir-out'\n"
"dir_out.mkdir()\n"
"(dir_out / '1.txt').write_text('1')\n"
"(dir_out / '2.txt').write_text('2')\n"
)
url_info = URLInfo(remote_url)
repo.run(
cmd="python {} {}".format(tmp_dir / "script.py", url_info.path),
outs=["remote://upstream/dir-out"],
deps=["foo"], # add a fake dep to not consider this a callback
)
repo.reproduce("dir-out.dvc")
repo.reproduce("dir-out.dvc", force=True)
|
[] |
[] |
[
"SHELL"
] |
[]
|
["SHELL"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'xuanke.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
dbbudget/settings.py
|
"""
Django settings for dbbudget project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = '^x8(5ll-#gjb3+8&oe22s4kjyo9u)bp$qipw3by01u7l^!v!p+'
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '^x8(5ll-#gjb3+8&oe22s4kjyo9u)bp$qipw3by01u7l^!v!p')
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
DEBUG = bool( os.environ.get('DJANGO_DEBUG', True) )
ALLOWED_HOSTS = ['db-budget.herokuapp.com','127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'budget.apps.BudgetConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dbbudget.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dbbudget.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'de-CH'
TIME_ZONE = 'Europe/Zurich'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/budget/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "__shared__"),
]
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
|
[] |
[] |
[
"DJANGO_DEBUG",
"DJANGO_SECRET_KEY"
] |
[]
|
["DJANGO_DEBUG", "DJANGO_SECRET_KEY"]
|
python
| 2 | 0 | |
tools/bisect-perf-regression.py
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Performance Test Bisect Tool
This script bisects a series of changelists using binary search. It starts at
a bad revision where a performance metric has regressed, and asks for a last
known-good revision. It will then binary search across this revision range by
syncing, building, and running a performance test. If the change is
suspected to occur as a result of WebKit/V8 changes, the script will
further bisect changes to those depots and attempt to narrow down the revision
range.
An example usage (using svn cl's):
./tools/bisect-perf-regression.py -c\
"out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
-g 168222 -b 168232 -m shutdown/simple-user-quit
Be aware that if you're using the git workflow and specify an svn revision,
the script will attempt to find the git SHA1 where svn changes up to that
revision were merged in.
An example usage (using git hashes):
./tools/bisect-perf-regression.py -c\
"out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
-g 1f6e67861535121c5c819c16a666f2436c207e7b\
-b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
-m shutdown/simple-user-quit
"""
import errno
import imp
import math
import optparse
import os
import re
import shlex
import shutil
import subprocess
import sys
import threading
import time
import bisect_utils
# The additional repositories that might need to be bisected.
# If the repository has any dependant repositories (such as skia/src needs
# skia/include and skia/gyp to be updated), specify them in the 'depends'
# so that they're synced appropriately.
# Format is:
# src: path to the working directory.
# recurse: True if this repositry will get bisected.
# depends: A list of other repositories that are actually part of the same
# repository in svn.
# svn: Needed for git workflow to resolve hashes to svn revisions.
# from: Parent depot that must be bisected before this is bisected.
DEPOT_DEPS_NAME = {
'chromium' : {
"src" : "src/",
"recurse" : True,
"depends" : None,
"from" : 'cros'
},
'webkit' : {
"src" : "src/third_party/WebKit",
"recurse" : True,
"depends" : None,
"from" : 'chromium'
},
'v8' : {
"src" : "src/v8",
"recurse" : True,
"depends" : None,
"build_with": 'v8_bleeding_edge',
"from" : 'chromium'
},
'v8_bleeding_edge' : {
"src" : "src/v8_bleeding_edge",
"recurse" : False,
"depends" : None,
"svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
"from" : 'chromium'
},
'skia/src' : {
"src" : "src/third_party/skia/src",
"recurse" : True,
"svn" : "http://skia.googlecode.com/svn/trunk/src",
"depends" : ['skia/include', 'skia/gyp'],
"from" : 'chromium'
},
'skia/include' : {
"src" : "src/third_party/skia/include",
"recurse" : False,
"svn" : "http://skia.googlecode.com/svn/trunk/include",
"depends" : None,
"from" : 'chromium'
},
'skia/gyp' : {
"src" : "src/third_party/skia/gyp",
"recurse" : False,
"svn" : "http://skia.googlecode.com/svn/trunk/gyp",
"depends" : None,
"from" : 'chromium'
}
}
DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
CROS_VERSION_PATTERN = 'new version number from %s'
CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
'testing_rsa')
CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
'mod_for_test_scripts', 'ssh_keys',
'testing_rsa')
def CalculateTruncatedMean(data_set, truncate_percent):
"""Calculates the truncated mean of a set of values.
Args:
data_set: Set of values to use in calculation.
truncate_percent: The % from the upper/lower portions of the data set to
discard, expressed as a value in [0, 1].
Returns:
The truncated mean as a float.
"""
if len(data_set) > 2:
data_set = sorted(data_set)
discard_num_float = len(data_set) * truncate_percent
discard_num_int = int(math.floor(discard_num_float))
kept_weight = len(data_set) - discard_num_float * 2
data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
weight_left = 1.0 - (discard_num_float - discard_num_int)
if weight_left < 1:
# If the % to discard leaves a fractional portion, need to weight those
# values.
unweighted_vals = data_set[1:len(data_set)-1]
weighted_vals = [data_set[0], data_set[len(data_set)-1]]
weighted_vals = [w * weight_left for w in weighted_vals]
data_set = weighted_vals + unweighted_vals
else:
kept_weight = len(data_set)
truncated_mean = reduce(lambda x, y: float(x) + float(y),
data_set) / kept_weight
return truncated_mean
def CalculateStandardDeviation(v):
mean = CalculateTruncatedMean(v, 0.0)
variances = [float(x) - mean for x in v]
variances = [x * x for x in variances]
variance = reduce(lambda x, y: float(x) + float(y), variances) / (len(v) - 1)
std_dev = math.sqrt(variance)
return std_dev
def IsStringFloat(string_to_check):
"""Checks whether or not the given string can be converted to a floating
point number.
Args:
string_to_check: Input string to check if it can be converted to a float.
Returns:
True if the string can be converted to a float.
"""
try:
float(string_to_check)
return True
except ValueError:
return False
def IsStringInt(string_to_check):
"""Checks whether or not the given string can be converted to a integer.
Args:
string_to_check: Input string to check if it can be converted to an int.
Returns:
True if the string can be converted to an int.
"""
try:
int(string_to_check)
return True
except ValueError:
return False
def IsWindows():
"""Checks whether or not the script is running on Windows.
Returns:
True if running on Windows.
"""
return os.name == 'nt'
def RunProcess(command, print_output=False):
"""Run an arbitrary command, returning its output and return code.
Args:
command: A list containing the command and args to execute.
print_output: Optional parameter to write output to stdout as it's
being collected.
Returns:
A tuple of the output and return code.
"""
if print_output:
print 'Running: [%s]' % ' '.join(command)
# On Windows, use shell=True to get PATH interpretation.
shell = IsWindows()
proc = subprocess.Popen(command,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0)
out = ['']
def ReadOutputWhileProcessRuns(stdout, print_output, out):
while True:
line = stdout.readline()
out[0] += line
if line == '':
break
if print_output:
sys.stdout.write(line)
thread = threading.Thread(target=ReadOutputWhileProcessRuns,
args=(proc.stdout, print_output, out))
thread.start()
proc.wait()
thread.join()
return (out[0], proc.returncode)
def RunGit(command):
"""Run a git subcommand, returning its output and return code.
Args:
command: A list containing the args to git.
Returns:
A tuple of the output and return code.
"""
command = ['git'] + command
return RunProcess(command)
def CheckRunGit(command):
"""Run a git subcommand, returning its output and return code. Asserts if
the return code of the call is non-zero.
Args:
command: A list containing the args to git.
Returns:
A tuple of the output and return code.
"""
(output, return_code) = RunGit(command)
assert not return_code, 'An error occurred while running'\
' "git %s"' % ' '.join(command)
return output
def BuildWithMake(threads, targets, print_output):
cmd = ['make', 'BUILDTYPE=Release', '-j%d' % threads] + targets
(output, return_code) = RunProcess(cmd, print_output)
return not return_code
def BuildWithNinja(threads, targets, print_output):
cmd = ['ninja', '-C', os.path.join('out', 'Release'),
'-j%d' % threads] + targets
(output, return_code) = RunProcess(cmd, print_output)
return not return_code
def BuildWithVisualStudio(targets, print_output):
path_to_devenv = os.path.abspath(
os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
cmd = [path_to_devenv, '/build', 'Release', path_to_sln]
for t in targets:
cmd.extend(['/Project', t])
(output, return_code) = RunProcess(cmd, print_output)
return not return_code
class Builder(object):
"""Builder is used by the bisect script to build relevant targets and deploy.
"""
def Build(self, depot, opts):
raise NotImplementedError()
class DesktopBuilder(Builder):
"""DesktopBuilder is used to build Chromium on linux/mac/windows."""
def Build(self, depot, opts):
"""Builds chrome and performance_ui_tests using options passed into
the script.
Args:
depot: Current depot being bisected.
opts: The options parsed from the command line.
Returns:
True if build was successful.
"""
targets = ['chrome', 'performance_ui_tests']
threads = 16
if opts.use_goma:
threads = 64
build_success = False
if opts.build_preference == 'make':
build_success = BuildWithMake(threads, targets,
opts.output_buildbot_annotations)
elif opts.build_preference == 'ninja':
if IsWindows():
targets = [t + '.exe' for t in targets]
build_success = BuildWithNinja(threads, targets,
opts.output_buildbot_annotations)
elif opts.build_preference == 'msvs':
assert IsWindows(), 'msvs is only supported on Windows.'
build_success = BuildWithVisualStudio(targets,
opts.output_buildbot_annotations)
else:
assert False, 'No build system defined.'
return build_success
class AndroidBuilder(Builder):
"""AndroidBuilder is used to build on android."""
def InstallAPK(self, opts):
"""Installs apk to device.
Args:
opts: The options parsed from the command line.
Returns:
True if successful.
"""
path_to_tool = os.path.join('build', 'android', 'adb_install_apk.py')
cmd = [path_to_tool, '--apk', 'ContentShell.apk', '--apk_package',
'org.chromium.content_shell_apk', '--release']
(_, return_code) = RunProcess(cmd, opts.output_buildbot_annotations)
return not return_code
def Build(self, depot, opts):
"""Builds the android content shell and other necessary tools using options
passed into the script.
Args:
depot: Current depot being bisected.
opts: The options parsed from the command line.
Returns:
True if build was successful.
"""
targets = ['content_shell_apk', 'forwarder2', 'md5sum']
threads = 16
if opts.use_goma:
threads = 64
build_success = False
if opts.build_preference == 'ninja':
build_success = BuildWithNinja(threads, targets,
opts.output_buildbot_annotations)
else:
assert False, 'No build system defined.'
if build_success:
build_success = self.InstallAPK(opts)
return build_success
class CrosBuilder(Builder):
"""CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
target platform."""
def ImageToTarget(self, opts):
"""Installs latest image to target specified by opts.cros_remote_ip.
Args:
opts: Program options containing cros_board and cros_remote_ip.
Returns:
True if successful.
"""
try:
# Keys will most likely be set to 0640 after wiping the chroot.
os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
os.chmod(CROS_TEST_KEY_PATH, 0600)
cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
'--remote=%s' % opts.cros_remote_ip,
'--board=%s' % opts.cros_board, '--test', '--verbose']
(_, return_code) = RunProcess(cmd, opts.output_buildbot_annotations)
return not return_code
except OSError, e:
return False
def BuildPackages(self, opts, depot):
"""Builds packages for cros.
Args:
opts: Program options containing cros_board.
depot: The depot being bisected.
Returns:
True if successful.
"""
cmd = [CROS_SDK_PATH]
if depot != 'cros':
path_to_chrome = os.path.join(os.getcwd(), '..')
cmd += ['--chrome_root=%s' % path_to_chrome]
cmd += ['--']
if depot != 'cros':
cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
cmd += ['BUILDTYPE=Release', './build_packages',
'--board=%s' % opts.cros_board]
(_, return_code) = RunProcess(cmd, True)
return not return_code
def BuildImage(self, opts, depot):
"""Builds test image for cros.
Args:
opts: Program options containing cros_board.
depot: The depot being bisected.
Returns:
True if successful.
"""
cmd = [CROS_SDK_PATH]
if depot != 'cros':
path_to_chrome = os.path.join(os.getcwd(), '..')
cmd += ['--chrome_root=%s' % path_to_chrome]
cmd += ['--']
if depot != 'cros':
cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
cmd += ['BUILDTYPE=Release', '--', './build_image',
'--board=%s' % opts.cros_board, 'test']
(_, return_code) = RunProcess(cmd, opts.output_buildbot_annotations)
return not return_code
def Build(self, depot, opts):
"""Builds targets using options passed into the script.
Args:
depot: Current depot being bisected.
opts: The options parsed from the command line.
Returns:
True if build was successful.
"""
if self.BuildPackages(opts, depot):
if self.BuildImage(opts, depot):
return self.ImageToTarget(opts)
return False
class SourceControl(object):
"""SourceControl is an abstraction over the underlying source control
system used for chromium. For now only git is supported, but in the
future, the svn workflow could be added as well."""
def __init__(self):
super(SourceControl, self).__init__()
def SyncToRevisionWithGClient(self, revision):
"""Uses gclient to sync to the specified revision.
ie. gclient sync --revision <revision>
Args:
revision: The git SHA1 or svn CL (depending on workflow).
Returns:
The return code of the call.
"""
return bisect_utils.RunGClient(['sync', '--revision',
revision, '--verbose'])
def SyncToRevisionWithRepo(self, timestamp):
"""Uses repo to sync all the underlying git depots to the specified
time.
Args:
timestamp: The unix timestamp to sync to.
Returns:
The return code of the call.
"""
return bisect_utils.RunRepoSyncAtTimestamp(timestamp)
class GitSourceControl(SourceControl):
"""GitSourceControl is used to query the underlying source control. """
def __init__(self, opts):
super(GitSourceControl, self).__init__()
self.opts = opts
def IsGit(self):
return True
def GetRevisionList(self, revision_range_end, revision_range_start):
"""Retrieves a list of revisions between |revision_range_start| and
|revision_range_end|.
Args:
revision_range_end: The SHA1 for the end of the range.
revision_range_start: The SHA1 for the beginning of the range.
Returns:
A list of the revisions between |revision_range_start| and
|revision_range_end| (inclusive).
"""
revision_range = '%s..%s' % (revision_range_start, revision_range_end)
cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
log_output = CheckRunGit(cmd)
revision_hash_list = log_output.split()
revision_hash_list.append(revision_range_start)
return revision_hash_list
def SyncToRevision(self, revision, sync_client=None):
"""Syncs to the specified revision.
Args:
revision: The revision to sync to.
use_gclient: Specifies whether or not we should sync using gclient or
just use source control directly.
Returns:
True if successful.
"""
if not sync_client:
results = RunGit(['checkout', revision])[1]
elif sync_client == 'gclient':
results = self.SyncToRevisionWithGClient(revision)
elif sync_client == 'repo':
results = self.SyncToRevisionWithRepo(revision)
return not results
def ResolveToRevision(self, revision_to_check, depot, search):
"""If an SVN revision is supplied, try to resolve it to a git SHA1.
Args:
revision_to_check: The user supplied revision string that may need to be
resolved to a git SHA1.
depot: The depot the revision_to_check is from.
search: The number of changelists to try if the first fails to resolve
to a git hash. If the value is negative, the function will search
backwards chronologically, otherwise it will search forward.
Returns:
A string containing a git SHA1 hash, otherwise None.
"""
if depot != 'cros':
if not IsStringInt(revision_to_check):
return revision_to_check
depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
if depot != 'chromium':
depot_svn = DEPOT_DEPS_NAME[depot]['svn']
svn_revision = int(revision_to_check)
git_revision = None
if search > 0:
search_range = xrange(svn_revision, svn_revision + search, 1)
else:
search_range = xrange(svn_revision, svn_revision + search, -1)
for i in search_range:
svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
'origin/master']
(log_output, return_code) = RunGit(cmd)
assert not return_code, 'An error occurred while running'\
' "git %s"' % ' '.join(cmd)
if not return_code:
log_output = log_output.strip()
if log_output:
git_revision = log_output
break
return git_revision
else:
if IsStringInt(revision_to_check):
return int(revision_to_check)
else:
cwd = os.getcwd()
os.chdir(os.path.join(os.getcwd(), 'src', 'third_party',
'chromiumos-overlay'))
pattern = CROS_VERSION_PATTERN % revision_to_check
cmd = ['log', '--format=%ct', '-1', '--grep', pattern]
git_revision = None
log_output = CheckRunGit(cmd)
if log_output:
git_revision = log_output
git_revision = int(log_output.strip())
os.chdir(cwd)
return git_revision
def IsInProperBranch(self):
"""Confirms they're in the master branch for performing the bisection.
This is needed or gclient will fail to sync properly.
Returns:
True if the current branch on src is 'master'
"""
cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
log_output = CheckRunGit(cmd)
log_output = log_output.strip()
return log_output == "master"
def SVNFindRev(self, revision):
"""Maps directly to the 'git svn find-rev' command.
Args:
revision: The git SHA1 to use.
Returns:
An integer changelist #, otherwise None.
"""
cmd = ['svn', 'find-rev', revision]
output = CheckRunGit(cmd)
svn_revision = output.strip()
if IsStringInt(svn_revision):
return int(svn_revision)
return None
def QueryRevisionInfo(self, revision):
"""Gathers information on a particular revision, such as author's name,
email, subject, and date.
Args:
revision: Revision you want to gather information on.
Returns:
A dict in the following format:
{
'author': %s,
'email': %s,
'date': %s,
'subject': %s,
}
"""
commit_info = {}
formats = ['%cN', '%cE', '%s', '%cD']
targets = ['author', 'email', 'subject', 'date']
for i in xrange(len(formats)):
cmd = ['log', '--format=%s' % formats[i], '-1', revision]
output = CheckRunGit(cmd)
commit_info[targets[i]] = output.rstrip()
return commit_info
def CheckoutFileAtRevision(self, file_name, revision):
"""Performs a checkout on a file at the given revision.
Returns:
True if successful.
"""
return not RunGit(['checkout', revision, file_name])[1]
def RevertFileToHead(self, file_name):
"""Unstages a file and returns it to HEAD.
Returns:
True if successful.
"""
# Reset doesn't seem to return 0 on success.
RunGit(['reset', 'HEAD', bisect_utils.FILE_DEPS_GIT])
return not RunGit(['checkout', bisect_utils.FILE_DEPS_GIT])[1]
def QueryFileRevisionHistory(self, filename, revision_start, revision_end):
"""Returns a list of commits that modified this file.
Args:
filename: Name of file.
revision_start: Start of revision range.
revision_end: End of revision range.
Returns:
Returns a list of commits that touched this file.
"""
cmd = ['log', '--format=%H', '%s~1..%s' % (revision_start, revision_end),
filename]
output = CheckRunGit(cmd)
return [o for o in output.split('\n') if o]
class BisectPerformanceMetrics(object):
"""BisectPerformanceMetrics performs a bisection against a list of range
of revisions to narrow down where performance regressions may have
occurred."""
def __init__(self, source_control, opts):
super(BisectPerformanceMetrics, self).__init__()
self.opts = opts
self.source_control = source_control
self.src_cwd = os.getcwd()
self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
self.depot_cwd = {}
self.cleanup_commands = []
self.warnings = []
self.builder = None
if opts.target_platform == 'cros':
self.builder = CrosBuilder()
elif opts.target_platform == 'android':
self.builder = AndroidBuilder()
else:
self.builder = DesktopBuilder()
# This always starts true since the script grabs latest first.
self.was_blink = True
for d in DEPOT_NAMES:
# The working directory of each depot is just the path to the depot, but
# since we're already in 'src', we can skip that part.
self.depot_cwd[d] = self.src_cwd + DEPOT_DEPS_NAME[d]['src'][3:]
def PerformCleanup(self):
"""Performs cleanup when script is finished."""
os.chdir(self.src_cwd)
for c in self.cleanup_commands:
if c[0] == 'mv':
shutil.move(c[1], c[2])
else:
assert False, 'Invalid cleanup command.'
def GetRevisionList(self, depot, bad_revision, good_revision):
"""Retrieves a list of all the commits between the bad revision and
last known good revision."""
revision_work_list = []
if depot == 'cros':
revision_range_start = good_revision
revision_range_end = bad_revision
cwd = os.getcwd()
self.ChangeToDepotWorkingDirectory('cros')
# Print the commit timestamps for every commit in the revision time
# range. We'll sort them and bisect by that. There is a remote chance that
# 2 (or more) commits will share the exact same timestamp, but it's
# probably safe to ignore that case.
cmd = ['repo', 'forall', '-c',
'git log --format=%%ct --before=%d --after=%d' % (
revision_range_end, revision_range_start)]
(output, return_code) = RunProcess(cmd)
assert not return_code, 'An error occurred while running'\
' "%s"' % ' '.join(cmd)
os.chdir(cwd)
revision_work_list = list(set(
[int(o) for o in output.split('\n') if IsStringInt(o)]))
revision_work_list = sorted(revision_work_list, reverse=True)
else:
revision_work_list = self.source_control.GetRevisionList(bad_revision,
good_revision)
return revision_work_list
def Get3rdPartyRevisionsFromCurrentRevision(self, depot):
"""Parses the DEPS file to determine WebKit/v8/etc... versions.
Returns:
A dict in the format {depot:revision} if successful, otherwise None.
"""
cwd = os.getcwd()
self.ChangeToDepotWorkingDirectory(depot)
results = {}
if depot == 'chromium':
locals = {'Var': lambda _: locals["vars"][_],
'From': lambda *args: None}
execfile(bisect_utils.FILE_DEPS_GIT, {}, locals)
os.chdir(cwd)
rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
for d in DEPOT_NAMES:
if DEPOT_DEPS_NAME[d]['recurse'] and\
DEPOT_DEPS_NAME[d]['from'] == depot:
if locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src']):
re_results = rxp.search(locals['deps'][DEPOT_DEPS_NAME[d]['src']])
if re_results:
results[d] = re_results.group('revision')
else:
return None
else:
return None
elif depot == 'cros':
cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
CROS_CHROMEOS_PATTERN]
(output, return_code) = RunProcess(cmd)
assert not return_code, 'An error occurred while running'\
' "%s"' % ' '.join(cmd)
if len(output) > CROS_CHROMEOS_PATTERN:
output = output[len(CROS_CHROMEOS_PATTERN):]
if len(output) > 1:
output = output.split('_')[0]
if len(output) > 3:
contents = output.split('.')
version = contents[2]
if contents[3] != '0':
warningText = 'Chrome version: %s.%s but using %s.0 to bisect.' %\
(version, contents[3], version)
if not warningText in self.warnings:
self.warnings.append(warningText)
cwd = os.getcwd()
self.ChangeToDepotWorkingDirectory('chromium')
return_code = CheckRunGit(['log', '-1', '--format=%H',
'[email protected]', '--grep=to %s' % version,
'origin/master'])
os.chdir(cwd)
results['chromium'] = output.strip()
return results
def BuildCurrentRevision(self, depot):
"""Builds chrome and performance_ui_tests on the current revision.
Returns:
True if the build was successful.
"""
if self.opts.debug_ignore_build:
return True
cwd = os.getcwd()
os.chdir(self.src_cwd)
build_success = self.builder.Build(depot, self.opts)
os.chdir(cwd)
return build_success
def RunGClientHooks(self):
"""Runs gclient with runhooks command.
Returns:
True if gclient reports no errors.
"""
if self.opts.debug_ignore_build:
return True
return not bisect_utils.RunGClient(['runhooks'])
def ParseMetricValuesFromOutput(self, metric, text):
"""Parses output from performance_ui_tests and retrieves the results for
a given metric.
Args:
metric: The metric as a list of [<trace>, <value>] strings.
text: The text to parse the metric values from.
Returns:
A list of floating point numbers found.
"""
# Format is: RESULT <graph>: <trace>= <value> <units>
metric_formatted = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
text_lines = text.split('\n')
values_list = []
for current_line in text_lines:
# Parse the output from the performance test for the metric we're
# interested in.
metric_re = metric_formatted +\
"(\s)*(?P<values>[0-9]+(\.[0-9]*)?)"
metric_re = re.compile(metric_re)
regex_results = metric_re.search(current_line)
if not regex_results is None:
values_list += [regex_results.group('values')]
else:
metric_re = metric_formatted +\
"(\s)*\[(\s)*(?P<values>[0-9,.]+)\]"
metric_re = re.compile(metric_re)
regex_results = metric_re.search(current_line)
if not regex_results is None:
metric_values = regex_results.group('values')
values_list += metric_values.split(',')
values_list = [float(v) for v in values_list if IsStringFloat(v)]
# If the metric is times/t, we need to sum the timings in order to get
# similar regression results as the try-bots.
if metric == ['times', 't']:
if values_list:
values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
return values_list
def RunPerformanceTestAndParseResults(self, command_to_run, metric):
"""Runs a performance test on the current revision by executing the
'command_to_run' and parses the results.
Args:
command_to_run: The command to be run to execute the performance test.
metric: The metric to parse out from the results of the performance test.
Returns:
On success, it will return a tuple of the average value of the metric,
and a success code of 0.
"""
if self.opts.debug_ignore_perf_test:
return ({'mean': 0.0, 'std_dev': 0.0}, 0)
if IsWindows():
command_to_run = command_to_run.replace('/', r'\\')
args = shlex.split(command_to_run)
# If running a telemetry test for cros, insert the remote ip, and
# identity parameters.
if self.opts.target_platform == 'cros':
if 'tools/perf/run_' in args[0]:
args.append('--remote=%s' % self.opts.cros_remote_ip)
args.append('--identity=%s' % CROS_TEST_KEY_PATH)
cwd = os.getcwd()
os.chdir(self.src_cwd)
start_time = time.time()
metric_values = []
for i in xrange(self.opts.repeat_test_count):
# Can ignore the return code since if the tests fail, it won't return 0.
(output, return_code) = RunProcess(args,
self.opts.output_buildbot_annotations)
metric_values += self.ParseMetricValuesFromOutput(metric, output)
elapsed_minutes = (time.time() - start_time) / 60.0
if elapsed_minutes >= self.opts.repeat_test_max_time or not metric_values:
break
os.chdir(cwd)
# Need to get the average value if there were multiple values.
if metric_values:
truncated_mean = CalculateTruncatedMean(metric_values,
self.opts.truncate_percent)
standard_dev = CalculateStandardDeviation(metric_values)
values = {
'mean': truncated_mean,
'std_dev': standard_dev,
}
print 'Results of performance test: %12f %12f' % (
truncated_mean, standard_dev)
print
return (values, 0)
else:
return ('Invalid metric specified, or no values returned from '
'performance test.', -1)
def FindAllRevisionsToSync(self, revision, depot):
"""Finds all dependant revisions and depots that need to be synced for a
given revision. This is only useful in the git workflow, as an svn depot
may be split into multiple mirrors.
ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
skia/include. To sync skia/src properly, one has to find the proper
revisions in skia/gyp and skia/include.
Args:
revision: The revision to sync to.
depot: The depot in use at the moment (probably skia).
Returns:
A list of [depot, revision] pairs that need to be synced.
"""
revisions_to_sync = [[depot, revision]]
is_base = (depot == 'chromium') or (depot == 'cros')
# Some SVN depots were split into multiple git depots, so we need to
# figure out for each mirror which git revision to grab. There's no
# guarantee that the SVN revision will exist for each of the dependant
# depots, so we have to grep the git logs and grab the next earlier one.
if not is_base and\
DEPOT_DEPS_NAME[depot]['depends'] and\
self.source_control.IsGit():
svn_rev = self.source_control.SVNFindRev(revision)
for d in DEPOT_DEPS_NAME[depot]['depends']:
self.ChangeToDepotWorkingDirectory(d)
dependant_rev = self.source_control.ResolveToRevision(svn_rev, d, -1000)
if dependant_rev:
revisions_to_sync.append([d, dependant_rev])
num_resolved = len(revisions_to_sync)
num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
self.ChangeToDepotWorkingDirectory(depot)
if not ((num_resolved - 1) == num_needed):
return None
return revisions_to_sync
def PerformPreBuildCleanup(self):
"""Performs necessary cleanup between runs."""
print 'Cleaning up between runs.'
print
# Having these pyc files around between runs can confuse the
# perf tests and cause them to crash.
for (path, dir, files) in os.walk(self.src_cwd):
for cur_file in files:
if cur_file.endswith('.pyc'):
path_to_file = os.path.join(path, cur_file)
os.remove(path_to_file)
def PerformWebkitDirectoryCleanup(self, revision):
"""If the script is switching between Blink and WebKit during bisect,
its faster to just delete the directory rather than leave it up to git
to sync.
Returns:
True if successful.
"""
if not self.source_control.CheckoutFileAtRevision(
bisect_utils.FILE_DEPS_GIT, revision):
return False
cwd = os.getcwd()
os.chdir(self.src_cwd)
is_blink = bisect_utils.IsDepsFileBlink()
os.chdir(cwd)
if not self.source_control.RevertFileToHead(
bisect_utils.FILE_DEPS_GIT):
return False
if self.was_blink != is_blink:
self.was_blink = is_blink
return bisect_utils.RemoveThirdPartyWebkitDirectory()
return True
def PerformCrosChrootCleanup(self):
"""Deletes the chroot.
Returns:
True if successful.
"""
cwd = os.getcwd()
self.ChangeToDepotWorkingDirectory('cros')
cmd = [CROS_SDK_PATH, '--delete']
(_, return_code) = RunProcess(cmd, self.opts.output_buildbot_annotations)
os.chdir(cwd)
return not return_code
def CreateCrosChroot(self):
"""Creates a new chroot.
Returns:
True if successful.
"""
cwd = os.getcwd()
self.ChangeToDepotWorkingDirectory('cros')
cmd = [CROS_SDK_PATH, '--create']
(_, return_code) = RunProcess(cmd, self.opts.output_buildbot_annotations)
os.chdir(cwd)
return not return_code
def PerformPreSyncCleanup(self, revision, depot):
"""Performs any necessary cleanup before syncing.
Returns:
True if successful.
"""
if depot == 'chromium':
return self.PerformWebkitDirectoryCleanup(revision)
elif depot == 'cros':
return self.PerformCrosChrootCleanup()
return True
def RunPostSync(self, depot):
"""Performs any work after syncing.
Returns:
True if successful.
"""
if depot == 'chromium':
return self.RunGClientHooks()
elif depot == 'cros':
return self.CreateCrosChroot()
return True
def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric):
"""Performs a full sync/build/run of the specified revision.
Args:
revision: The revision to sync to.
depot: The depot that's being used at the moment (src, webkit, etc.)
command_to_run: The command to execute the performance test.
metric: The performance metric being tested.
Returns:
On success, a tuple containing the results of the performance test.
Otherwise, a tuple with the error message.
"""
sync_client = None
if depot == 'chromium':
sync_client = 'gclient'
elif depot == 'cros':
sync_client = 'repo'
revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
if not revisions_to_sync:
return ('Failed to resolve dependant depots.', 1)
if not self.PerformPreSyncCleanup(revision, depot):
return ('Failed to perform pre-sync cleanup.', 1)
success = True
if not self.opts.debug_ignore_sync:
for r in revisions_to_sync:
self.ChangeToDepotWorkingDirectory(r[0])
if sync_client:
self.PerformPreBuildCleanup()
if not self.source_control.SyncToRevision(r[1], sync_client):
success = False
break
if success:
success = self.RunPostSync(depot)
if success:
if self.BuildCurrentRevision(depot):
results = self.RunPerformanceTestAndParseResults(command_to_run,
metric)
if results[1] == 0 and sync_client:
external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision(
depot)
if external_revisions:
return (results[0], results[1], external_revisions)
else:
return ('Failed to parse DEPS file for external revisions.', 1)
else:
return results
else:
return ('Failed to build revision: [%s]' % (str(revision, )), 1)
else:
return ('Failed to run [gclient runhooks].', 1)
else:
return ('Failed to sync revision: [%s]' % (str(revision, )), 1)
def CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
"""Given known good and bad values, decide if the current_value passed
or failed.
Args:
current_value: The value of the metric being checked.
known_bad_value: The reference value for a "failed" run.
known_good_value: The reference value for a "passed" run.
Returns:
True if the current_value is closer to the known_good_value than the
known_bad_value.
"""
dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
return dist_to_good_value < dist_to_bad_value
def ChangeToDepotWorkingDirectory(self, depot_name):
"""Given a depot, changes to the appropriate working directory.
Args:
depot_name: The name of the depot (see DEPOT_NAMES).
"""
if depot_name == 'chromium':
os.chdir(self.src_cwd)
elif depot_name == 'cros':
os.chdir(self.cros_cwd)
elif depot_name in DEPOT_NAMES:
os.chdir(self.depot_cwd[depot_name])
else:
assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
' was added without proper support?' %\
(depot_name,)
def PrepareToBisectOnDepot(self,
current_depot,
end_revision,
start_revision):
"""Changes to the appropriate directory and gathers a list of revisions
to bisect between |start_revision| and |end_revision|.
Args:
current_depot: The depot we want to bisect.
end_revision: End of the revision range.
start_revision: Start of the revision range.
Returns:
A list containing the revisions between |start_revision| and
|end_revision| inclusive.
"""
# Change into working directory of external library to run
# subsequent commands.
old_cwd = os.getcwd()
os.chdir(self.depot_cwd[current_depot])
# V8 (and possibly others) is merged in periodically. Bisecting
# this directory directly won't give much good info.
if DEPOT_DEPS_NAME[current_depot].has_key('build_with'):
new_depot = DEPOT_DEPS_NAME[current_depot]['build_with']
svn_start_revision = self.source_control.SVNFindRev(start_revision)
svn_end_revision = self.source_control.SVNFindRev(end_revision)
os.chdir(self.depot_cwd[new_depot])
start_revision = self.source_control.ResolveToRevision(
svn_start_revision, new_depot, -1000)
end_revision = self.source_control.ResolveToRevision(
svn_end_revision, new_depot, -1000)
old_name = DEPOT_DEPS_NAME[current_depot]['src'][4:]
new_name = DEPOT_DEPS_NAME[new_depot]['src'][4:]
os.chdir(self.src_cwd)
shutil.move(old_name, old_name + '.bak')
shutil.move(new_name, old_name)
os.chdir(self.depot_cwd[current_depot])
self.cleanup_commands.append(['mv', old_name, new_name])
self.cleanup_commands.append(['mv', old_name + '.bak', old_name])
os.chdir(self.depot_cwd[current_depot])
depot_revision_list = self.GetRevisionList(current_depot,
end_revision,
start_revision)
os.chdir(old_cwd)
return depot_revision_list
def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
"""Gathers reference values by running the performance tests on the
known good and bad revisions.
Args:
good_rev: The last known good revision where the performance regression
has not occurred yet.
bad_rev: A revision where the performance regression has already occurred.
cmd: The command to execute the performance test.
metric: The metric being tested for regression.
Returns:
A tuple with the results of building and running each revision.
"""
bad_run_results = self.SyncBuildAndRunRevision(bad_rev,
target_depot,
cmd,
metric)
good_run_results = None
if not bad_run_results[1]:
good_run_results = self.SyncBuildAndRunRevision(good_rev,
target_depot,
cmd,
metric)
return (bad_run_results, good_run_results)
def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data):
"""Adds new revisions to the revision_data dict and initializes them.
Args:
revisions: List of revisions to add.
depot: Depot that's currently in use (src, webkit, etc...)
sort: Sorting key for displaying revisions.
revision_data: A dict to add the new revisions into. Existing revisions
will have their sort keys offset.
"""
num_depot_revisions = len(revisions)
for k, v in revision_data.iteritems():
if v['sort'] > sort:
v['sort'] += num_depot_revisions
for i in xrange(num_depot_revisions):
r = revisions[i]
revision_data[r] = {'revision' : r,
'depot' : depot,
'value' : None,
'passed' : '?',
'sort' : i + sort + 1}
def PrintRevisionsToBisectMessage(self, revision_list, depot):
if self.opts.output_buildbot_annotations:
step_name = 'Bisection Range: [%s - %s]' % (
revision_list[len(revision_list)-1], revision_list[0])
bisect_utils.OutputAnnotationStepStart(step_name)
print
print 'Revisions to bisect on [%s]:' % depot
for revision_id in revision_list:
print ' -> %s' % (revision_id, )
print
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
"""Checks to see if changes to DEPS file occurred, and that the revision
range also includes the change to .DEPS.git. If it doesn't, attempts to
expand the revision range to include it.
Args:
bad_rev: First known bad revision.
good_revision: Last known good revision.
Returns:
A tuple with the new bad and good revisions.
"""
if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
changes_to_deps = self.source_control.QueryFileRevisionHistory(
'DEPS', good_revision, bad_revision)
if changes_to_deps:
# DEPS file was changed, search from the oldest change to DEPS file to
# bad_revision to see if there are matching .DEPS.git changes.
oldest_deps_change = changes_to_deps[-1]
changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
if len(changes_to_deps) != len(changes_to_gitdeps):
# Grab the timestamp of the last DEPS change
cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
output = CheckRunGit(cmd)
commit_time = int(output)
# Try looking for a commit that touches the .DEPS.git file in the
# next 15 minutes after the DEPS file change.
cmd = ['log', '--format=%H', '-1',
'--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
'origin/master', bisect_utils.FILE_DEPS_GIT]
output = CheckRunGit(cmd)
output = output.strip()
if output:
self.warnings.append('Detected change to DEPS and modified '
'revision range to include change to .DEPS.git')
return (output, good_revision)
else:
self.warnings.append('Detected change to DEPS but couldn\'t find '
'matching change to .DEPS.git')
return (bad_revision, good_revision)
def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
"""Given known good and bad revisions, run a binary search on all
intermediate revisions to determine the CL where the performance regression
occurred.
Args:
command_to_run: Specify the command to execute the performance test.
good_revision: Number/tag of the known good revision.
bad_revision: Number/tag of the known bad revision.
metric: The performance metric to monitor.
Returns:
A dict with 2 members, 'revision_data' and 'error'. On success,
'revision_data' will contain a dict mapping revision ids to
data about that revision. Each piece of revision data consists of a
dict with the following keys:
'passed': Represents whether the performance test was successful at
that revision. Possible values include: 1 (passed), 0 (failed),
'?' (skipped), 'F' (build failed).
'depot': The depot that this revision is from (ie. WebKit)
'external': If the revision is a 'src' revision, 'external' contains
the revisions of each of the external libraries.
'sort': A sort value for sorting the dict in order of commits.
For example:
{
'error':None,
'revision_data':
{
'CL #1':
{
'passed':False,
'depot':'chromium',
'external':None,
'sort':0
}
}
}
If an error occurred, the 'error' field will contain the message and
'revision_data' will be empty.
"""
results = {'revision_data' : {},
'error' : None}
# Choose depot to bisect first
target_depot = 'chromium'
if self.opts.target_platform == 'cros':
target_depot = 'cros'
cwd = os.getcwd()
self.ChangeToDepotWorkingDirectory(target_depot)
# If they passed SVN CL's, etc... we can try match them to git SHA1's.
bad_revision = self.source_control.ResolveToRevision(bad_revision_in,
target_depot, 100)
good_revision = self.source_control.ResolveToRevision(good_revision_in,
target_depot, -100)
os.chdir(cwd)
if bad_revision is None:
results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
return results
if good_revision is None:
results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
return results
(bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange(
bad_revision, good_revision)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
print 'Gathering revision range for bisection.'
# Retrieve a list of revisions to do bisection on.
src_revision_list = self.GetRevisionList(target_depot,
bad_revision,
good_revision)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
if src_revision_list:
# revision_data will store information about a revision such as the
# depot it came from, the webkit/V8 revision at that time,
# performance timing, build state, etc...
revision_data = results['revision_data']
# revision_list is the list we're binary searching through at the moment.
revision_list = []
sort_key_ids = 0
for current_revision_id in src_revision_list:
sort_key_ids += 1
revision_data[current_revision_id] = {'value' : None,
'passed' : '?',
'depot' : target_depot,
'external' : None,
'sort' : sort_key_ids}
revision_list.append(current_revision_id)
min_revision = 0
max_revision = len(revision_list) - 1
self.PrintRevisionsToBisectMessage(revision_list, target_depot)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
print 'Gathering reference values for bisection.'
# Perform the performance tests on the good and bad revisions, to get
# reference values.
(bad_results, good_results) = self.GatherReferenceValues(good_revision,
bad_revision,
command_to_run,
metric,
target_depot)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
if bad_results[1]:
results['error'] = bad_results[0]
return results
if good_results[1]:
results['error'] = good_results[0]
return results
# We need these reference values to determine if later runs should be
# classified as pass or fail.
known_bad_value = bad_results[0]
known_good_value = good_results[0]
# Can just mark the good and bad revisions explicitly here since we
# already know the results.
bad_revision_data = revision_data[revision_list[0]]
bad_revision_data['external'] = bad_results[2]
bad_revision_data['passed'] = 0
bad_revision_data['value'] = known_bad_value
good_revision_data = revision_data[revision_list[max_revision]]
good_revision_data['external'] = good_results[2]
good_revision_data['passed'] = 1
good_revision_data['value'] = known_good_value
while True:
if not revision_list:
break
min_revision_data = revision_data[revision_list[min_revision]]
max_revision_data = revision_data[revision_list[max_revision]]
if max_revision - min_revision <= 1:
if min_revision_data['passed'] == '?':
next_revision_index = min_revision
elif max_revision_data['passed'] == '?':
next_revision_index = max_revision
elif min_revision_data['depot'] == 'chromium' or\
min_revision_data['depot'] == 'cros':
# If there were changes to any of the external libraries we track,
# should bisect the changes there as well.
external_depot = None
for current_depot in DEPOT_NAMES:
if DEPOT_DEPS_NAME[current_depot]["recurse"] and\
DEPOT_DEPS_NAME[current_depot]['from'] ==\
min_revision_data['depot']:
if min_revision_data['external'][current_depot] !=\
max_revision_data['external'][current_depot]:
external_depot = current_depot
break
# If there was no change in any of the external depots, the search
# is over.
if not external_depot:
break
earliest_revision = max_revision_data['external'][current_depot]
latest_revision = min_revision_data['external'][current_depot]
new_revision_list = self.PrepareToBisectOnDepot(external_depot,
latest_revision,
earliest_revision)
if not new_revision_list:
results['error'] = 'An error occurred attempting to retrieve'\
' revision range: [%s..%s]' %\
(depot_rev_range[1], depot_rev_range[0])
return results
self.AddRevisionsIntoRevisionData(new_revision_list,
external_depot,
min_revision_data['sort'],
revision_data)
# Reset the bisection and perform it on the newly inserted
# changelists.
revision_list = new_revision_list
min_revision = 0
max_revision = len(revision_list) - 1
sort_key_ids += len(revision_list)
print 'Regression in metric:%s appears to be the result of changes'\
' in [%s].' % (metric, current_depot)
self.PrintRevisionsToBisectMessage(revision_list, external_depot)
continue
else:
break
else:
next_revision_index = int((max_revision - min_revision) / 2) +\
min_revision
next_revision_id = revision_list[next_revision_index]
next_revision_data = revision_data[next_revision_id]
next_revision_depot = next_revision_data['depot']
self.ChangeToDepotWorkingDirectory(next_revision_depot)
if self.opts.output_buildbot_annotations:
step_name = 'Working on [%s]' % next_revision_id
bisect_utils.OutputAnnotationStepStart(step_name)
print 'Working on revision: [%s]' % next_revision_id
run_results = self.SyncBuildAndRunRevision(next_revision_id,
next_revision_depot,
command_to_run,
metric)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
# If the build is successful, check whether or not the metric
# had regressed.
if not run_results[1]:
if len(run_results) > 2:
next_revision_data['external'] = run_results[2]
passed_regression = self.CheckIfRunPassed(run_results[0],
known_good_value,
known_bad_value)
next_revision_data['passed'] = passed_regression
next_revision_data['value'] = run_results[0]
if passed_regression:
max_revision = next_revision_index
else:
min_revision = next_revision_index
else:
next_revision_data['passed'] = 'F'
# If the build is broken, remove it and redo search.
revision_list.pop(next_revision_index)
max_revision -= 1
else:
# Weren't able to sync and retrieve the revision range.
results['error'] = 'An error occurred attempting to retrieve revision '\
'range: [%s..%s]' % (good_revision, bad_revision)
return results
def FormatAndPrintResults(self, bisect_results):
"""Prints the results from a bisection run in a readable format.
Args
bisect_results: The results from a bisection test run.
"""
revision_data = bisect_results['revision_data']
revision_data_sorted = sorted(revision_data.iteritems(),
key = lambda x: x[1]['sort'])
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart('Results')
print
print 'Full results of bisection:'
for current_id, current_data in revision_data_sorted:
build_status = current_data['passed']
if type(build_status) is bool:
build_status = int(build_status)
print ' %8s %40s %s' % (current_data['depot'],
current_id, build_status)
print
print
print 'Tested commits:'
for current_id, current_data in revision_data_sorted:
if current_data['value']:
print ' %8s %40s %12f %12f' % (
current_data['depot'], current_id,
current_data['value']['mean'], current_data['value']['std_dev'])
print
# Find range where it possibly broke.
first_working_revision = None
last_broken_revision = None
for k, v in revision_data_sorted:
if v['passed'] == 1:
if not first_working_revision:
first_working_revision = k
if not v['passed']:
last_broken_revision = k
if last_broken_revision != None and first_working_revision != None:
print 'Results: Regression may have occurred in range:'
print ' -> First Bad Revision: [%40s] [%s]' %\
(last_broken_revision,
revision_data[last_broken_revision]['depot'])
print ' -> Last Good Revision: [%40s] [%s]' %\
(first_working_revision,
revision_data[first_working_revision]['depot'])
cwd = os.getcwd()
self.ChangeToDepotWorkingDirectory(
revision_data[last_broken_revision]['depot'])
if revision_data[last_broken_revision]['depot'] == 'cros':
# Want to get a list of all the commits and what depots they belong
# to so that we can grab info about each.
cmd = ['repo', 'forall', '-c',
'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
last_broken_revision, first_working_revision + 1)]
(output, return_code) = RunProcess(cmd)
changes = []
assert not return_code, 'An error occurred while running'\
' "%s"' % ' '.join(cmd)
last_depot = None
cwd = os.getcwd()
for l in output.split('\n'):
if l:
# Output will be in form:
# /path_to_depot
# /path_to_other_depot
# <SHA1>
# /path_again
# <SHA1>
# etc.
if l[0] == '/':
last_depot = l
else:
contents = l.split(' ')
if len(contents) > 1:
changes.append([last_depot, contents[0]])
print
for c in changes:
os.chdir(c[0])
info = self.source_control.QueryRevisionInfo(c[1])
print
print 'Commit : %s' % c[1]
print 'Author : %s' % info['author']
print 'Email : %s' % info['email']
print 'Date : %s' % info['date']
print 'Subject : %s' % info['subject']
print
else:
info = self.source_control.QueryRevisionInfo(last_broken_revision)
print
print 'Commit : %s' % last_broken_revision
print 'Author : %s' % info['author']
print 'Email : %s' % info['email']
print 'Date : %s' % info['date']
print 'Subject : %s' % info['subject']
print
os.chdir(cwd)
# Give a warning if the values were very close together
good_std_dev = revision_data[first_working_revision]['value']['std_dev']
good_mean = revision_data[first_working_revision]['value']['mean']
bad_mean = revision_data[last_broken_revision]['value']['mean']
# A standard deviation of 0 could indicate either insufficient runs
# or a test that consistently returns the same value.
if good_std_dev > 0:
deviations = math.fabs(bad_mean - good_mean) / good_std_dev
if deviations < 1.5:
self.warnings.append('Regression was less than 1.5 standard '
'deviations from "good" value. Results may not be accurate.')
elif self.opts.repeat_test_count == 1:
self.warnings.append('Tests were only set to run once. This '
'may be insufficient to get meaningful results.')
# Check for any other possible regression ranges
prev_revision_data = revision_data_sorted[0][1]
prev_revision_id = revision_data_sorted[0][0]
possible_regressions = []
for current_id, current_data in revision_data_sorted:
if current_data['value']:
prev_mean = prev_revision_data['value']['mean']
cur_mean = current_data['value']['mean']
if good_std_dev:
deviations = math.fabs(prev_mean - cur_mean) / good_std_dev
else:
deviations = None
if good_mean:
percent_change = (prev_mean - cur_mean) / good_mean
# If the "good" valuse are supposed to be higher than the "bad"
# values (ie. scores), flip the sign of the percent change so that
# a positive value always represents a regression.
if bad_mean < good_mean:
percent_change *= -1.0
else:
percent_change = None
if deviations >= 1.5 or percent_change > 0.01:
if current_id != first_working_revision:
possible_regressions.append(
[current_id, prev_revision_id, percent_change, deviations])
prev_revision_data = current_data
prev_revision_id = current_id
if possible_regressions:
print
print 'Other regressions may have occurred:'
print
for p in possible_regressions:
current_id = p[0]
percent_change = p[2]
deviations = p[3]
current_data = revision_data[current_id]
previous_id = p[1]
previous_data = revision_data[previous_id]
if deviations is None:
deviations = 'N/A'
else:
deviations = '%.2f' % deviations
if percent_change is None:
percent_change = 0
print ' %8s %s [%.2f%%, %s x std.dev]' % (
previous_data['depot'], previous_id, 100 * percent_change,
deviations)
print ' %8s %s' % (
current_data['depot'], current_id)
print
if self.warnings:
print
print 'The following warnings were generated:'
print
for w in self.warnings:
print ' - %s' % w
print
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
def DetermineAndCreateSourceControl(opts):
"""Attempts to determine the underlying source control workflow and returns
a SourceControl object.
Returns:
An instance of a SourceControl object, or None if the current workflow
is unsupported.
"""
(output, return_code) = RunGit(['rev-parse', '--is-inside-work-tree'])
if output.strip() == 'true':
return GitSourceControl(opts)
return None
def SetNinjaBuildSystemDefault():
"""Makes ninja the default build system to be used by
the bisection script."""
gyp_var = os.getenv('GYP_GENERATORS')
if not gyp_var or not 'ninja' in gyp_var:
if gyp_var:
os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
else:
os.environ['GYP_GENERATORS'] = 'ninja'
if IsWindows():
os.environ['GYP_DEFINES'] = 'component=shared_library '\
'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
'chromium_win_pch=0'
def SetMakeBuildSystemDefault():
"""Makes make the default build system to be used by
the bisection script."""
os.environ['GYP_GENERATORS'] = 'make'
def CheckPlatformSupported(opts):
"""Checks that this platform and build system are supported.
Args:
opts: The options parsed from the command line.
Returns:
True if the platform and build system are supported.
"""
# Haven't tested the script out on any other platforms yet.
supported = ['posix', 'nt']
if not os.name in supported:
print "Sorry, this platform isn't supported yet."
print
return False
if IsWindows():
if not opts.build_preference:
opts.build_preference = 'msvs'
if opts.build_preference == 'msvs':
if not os.getenv('VS100COMNTOOLS'):
print 'Error: Path to visual studio could not be determined.'
print
return False
elif opts.build_preference == 'ninja':
SetNinjaBuildSystemDefault()
else:
assert False, 'Error: %s build not supported' % opts.build_preference
else:
if not opts.build_preference:
if 'ninja' in os.getenv('GYP_GENERATORS'):
opts.build_preference = 'ninja'
else:
opts.build_preference = 'make'
if opts.build_preference == 'ninja':
SetNinjaBuildSystemDefault()
elif opts.build_preference == 'make':
SetMakeBuildSystemDefault()
elif opts.build_preference != 'make':
assert False, 'Error: %s build not supported' % opts.build_preference
bisect_utils.RunGClient(['runhooks'])
return True
def RmTreeAndMkDir(path_to_dir):
"""Removes the directory tree specified, and then creates an empty
directory in the same location.
Args:
path_to_dir: Path to the directory tree.
Returns:
True if successful, False if an error occurred.
"""
try:
if os.path.exists(path_to_dir):
shutil.rmtree(path_to_dir)
except OSError, e:
if e.errno != errno.ENOENT:
return False
try:
os.makedirs(path_to_dir)
except OSError, e:
if e.errno != errno.EEXIST:
return False
return True
def RemoveBuildFiles():
"""Removes build files from previous runs."""
if RmTreeAndMkDir(os.path.join('out', 'Release')):
if RmTreeAndMkDir(os.path.join('build', 'Release')):
return True
return False
def main():
usage = ('%prog [options] [-- chromium-options]\n'
'Perform binary search on revision history to find a minimal '
'range of revisions where a peformance metric regressed.\n')
parser = optparse.OptionParser(usage=usage)
parser.add_option('-c', '--command',
type='str',
help='A command to execute your performance test at' +
' each point in the bisection.')
parser.add_option('-b', '--bad_revision',
type='str',
help='A bad revision to start bisection. ' +
'Must be later than good revision. May be either a git' +
' or svn revision.')
parser.add_option('-g', '--good_revision',
type='str',
help='A revision to start bisection where performance' +
' test is known to pass. Must be earlier than the ' +
'bad revision. May be either a git or svn revision.')
parser.add_option('-m', '--metric',
type='str',
help='The desired metric to bisect on. For example ' +
'"vm_rss_final_b/vm_rss_f_b"')
parser.add_option('-w', '--working_directory',
type='str',
help='Path to the working directory where the script will '
'do an initial checkout of the chromium depot. The '
'files will be placed in a subdirectory "bisect" under '
'working_directory and that will be used to perform the '
'bisection. This parameter is optional, if it is not '
'supplied, the script will work from the current depot.')
parser.add_option('-r', '--repeat_test_count',
type='int',
default=20,
help='The number of times to repeat the performance test. '
'Values will be clamped to range [1, 100]. '
'Default value is 20.')
parser.add_option('--repeat_test_max_time',
type='int',
default=20,
help='The maximum time (in minutes) to take running the '
'performance tests. The script will run the performance '
'tests according to --repeat_test_count, so long as it '
'doesn\'t exceed --repeat_test_max_time. Values will be '
'clamped to range [1, 60].'
'Default value is 20.')
parser.add_option('-t', '--truncate_percent',
type='int',
default=25,
help='The highest/lowest % are discarded to form a '
'truncated mean. Values will be clamped to range [0, 25]. '
'Default value is 25 (highest/lowest 25% will be '
'discarded).')
parser.add_option('--build_preference',
type='choice',
choices=['msvs', 'ninja', 'make'],
help='The preferred build system to use. On linux/mac '
'the options are make/ninja. On Windows, the options '
'are msvs/ninja.')
parser.add_option('--target_platform',
type='choice',
choices=['chromium', 'cros', 'android'],
default='chromium',
help='The target platform. Choices are "chromium" (current '
'platform), "cros", or "android". If you specify something '
'other than "chromium", you must be properly set up to '
'build that platform.')
parser.add_option('--cros_board',
type='str',
help='The cros board type to build.')
parser.add_option('--cros_remote_ip',
type='str',
help='The remote machine to image to.')
parser.add_option('--use_goma',
action="store_true",
help='Add a bunch of extra threads for goma.')
parser.add_option('--output_buildbot_annotations',
action="store_true",
help='Add extra annotation output for buildbot.')
parser.add_option('--debug_ignore_build',
action="store_true",
help='DEBUG: Don\'t perform builds.')
parser.add_option('--debug_ignore_sync',
action="store_true",
help='DEBUG: Don\'t perform syncs.')
parser.add_option('--debug_ignore_perf_test',
action="store_true",
help='DEBUG: Don\'t perform performance tests.')
(opts, args) = parser.parse_args()
if not opts.command:
print 'Error: missing required parameter: --command'
print
parser.print_help()
return 1
if not opts.good_revision:
print 'Error: missing required parameter: --good_revision'
print
parser.print_help()
return 1
if not opts.bad_revision:
print 'Error: missing required parameter: --bad_revision'
print
parser.print_help()
return 1
if not opts.metric:
print 'Error: missing required parameter: --metric'
print
parser.print_help()
return 1
if opts.target_platform == 'cros':
# Run sudo up front to make sure credentials are cached for later.
print 'Sudo is required to build cros:'
print
RunProcess(['sudo', 'true'])
if not opts.cros_board:
print 'Error: missing required parameter: --cros_board'
print
parser.print_help()
return 1
if not opts.cros_remote_ip:
print 'Error: missing required parameter: --cros_remote_ip'
print
parser.print_help()
return 1
if not opts.working_directory:
print 'Error: missing required parameter: --working_directory'
print
parser.print_help()
return 1
opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
opts.repeat_test_max_time = min(max(opts.repeat_test_max_time, 1), 60)
opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
opts.truncate_percent = opts.truncate_percent / 100.0
metric_values = opts.metric.split('/')
if len(metric_values) != 2:
print "Invalid metric specified: [%s]" % (opts.metric,)
print
return 1
if opts.working_directory:
if bisect_utils.CreateBisectDirectoryAndSetupDepot(opts):
return 1
if not bisect_utils.SetupPlatformBuildEnvironment(opts):
print 'Error: Failed to set platform environment.'
print
return 1
os.chdir(os.path.join(os.getcwd(), 'src'))
if not RemoveBuildFiles():
print "Something went wrong removing the build files."
print
return 1
if not CheckPlatformSupported(opts):
return 1
# Check what source control method they're using. Only support git workflow
# at the moment.
source_control = DetermineAndCreateSourceControl(opts)
if not source_control:
print "Sorry, only the git workflow is supported at the moment."
print
return 1
# gClient sync seems to fail if you're not in master branch.
if not source_control.IsInProperBranch() and not opts.debug_ignore_sync:
print "You must switch to master branch to run bisection."
print
return 1
bisect_test = BisectPerformanceMetrics(source_control, opts)
try:
bisect_results = bisect_test.Run(opts.command,
opts.bad_revision,
opts.good_revision,
metric_values)
if not(bisect_results['error']):
bisect_test.FormatAndPrintResults(bisect_results)
finally:
bisect_test.PerformCleanup()
if not(bisect_results['error']):
return 0
else:
print 'Error: ' + bisect_results['error']
print
return 1
if __name__ == '__main__':
sys.exit(main())
|
[] |
[] |
[
"VS100COMNTOOLS",
"GYP_DEFINES",
"GYP_GENERATORS"
] |
[]
|
["VS100COMNTOOLS", "GYP_DEFINES", "GYP_GENERATORS"]
|
python
| 3 | 0 | |
openweathermap.py
|
import os
import requests
from datetime import datetime, timezone, timedelta
import json
import dateutil.tz as tz
if "OWM_KEY" not in list(os.environ.keys()):
with open("creds.txt") as f:
owm_key = f.readlines()[-1]
os.environ["OWM_KEY"] = owm_key
temp_situation = {
"updated": 0,
"last_temp": 0
}
def get_temperature():
time_now = tz.gettz("Europe/Stockholm")
if temp_situation["updated"] == 0 or temp_situation["updated"] < datetime.now(time_now) - timedelta(minutes=10):
print("Updating temperatures")
openweathermap_key = os.environ["OWM_KEY"]
# Current weather
api_call = "https://api.openweathermap.org/data/2.5/weather?q=Göteborg&APPID=" + openweathermap_key
r = requests.get(api_call)
r_json = json.loads(str(r.json()).replace("'", '"'))
temp_now = str(round((r_json["main"]["temp"]-272.15), 1)) + "°C"
wheater_now = getWeatherEmoji(r_json["weather"][0]["id"])
# Call weather prognosis api
hourly_call = "https://api.openweathermap.org/data/2.5/forecast?q=Göteborg&APPID=" + openweathermap_key
hourly_r = requests.get(hourly_call)
hourly_json = json.loads(str(hourly_r.json()).replace("'", '"'))
# Weather in 3 hours
temp0 = str(round((hourly_json["list"][0]["main"]["temp"]-272.15),1)) + "°C"
temp0time = datetime.fromtimestamp(hourly_json["list"][0]["dt"], time_now).strftime('%H:%M')
temp0wheather = getWeatherEmoji(hourly_json["list"][0]["weather"][0]["id"])
# Weather in 6 hours
temp1 = str(round((hourly_json["list"][1]["main"]["temp"]-272.15),1)) + "°C"
temp1time = datetime.fromtimestamp(hourly_json["list"][1]["dt"], time_now).strftime('%H:%M')
temp1wheather = getWeatherEmoji(hourly_json["list"][1]["weather"][0]["id"])
# Weather in 9 hours
temp2 = str(round((hourly_json["list"][2]["main"]["temp"]-272.15),1)) + "°C"
temp2time = datetime.fromtimestamp(hourly_json["list"][2]["dt"], time_now).strftime('%H:%M')
temp2wheather = getWeatherEmoji(hourly_json["list"][2]["weather"][0]["id"])
temp_situation["updated"] = datetime.now(time_now)
out = [temp_now, wheater_now, temp0, temp0time, temp0wheather, temp1, temp1time, temp1wheather, temp2, temp2time, temp2wheather]
temp_situation["last_temp"] = out
return out
else:
return temp_situation["last_temp"]
# Uses emoji to display the weather icon
def getWeatherEmoji(weatherID):
weatherIDstr = str(weatherID)
# Openweathermap Weather codes and corressponding emojis
thunderstorm = "\U0001F4A8" # Code: 200's, 900, 901, 902, 905
drizzle = "\U0001F4A7" # Code: 300's
rain = "\U00002614\U0000FE0F" # Code: 500's
snowflake = "\U00002744\U0000FE0F" # Code: 600's snowflake
# snowman = "\U000026C4" # Code: 600's snowman, 903, 906
atmosphere = "\U0001F301" # Code: 700's foogy
clearSky = "\U00002600\U0000FE0F" # Code: 800 clear sky
fewClouds = "\U000026C5\U0000FE0F" # Code: 801 sun behind clouds
clouds = "\U00002601\U0000FE0F" # Code: 802-803-804 clouds general
hot = "\U0001F525" # Code: 904
defaultEmoji = "\U0001F300" # default emojis
if weatherIDstr[0] == '2' or weatherIDstr == '900' or weatherIDstr == '901' or weatherIDstr == '902' or weatherIDstr == '905':
return thunderstorm
elif weatherIDstr[0] == '3':
return drizzle
elif weatherIDstr[0] == '5':
return rain
elif weatherIDstr[0] == '6' or weatherIDstr == '903' or weatherIDstr == '906':
return snowflake # + ' ' + snowman
elif weatherIDstr[0] == '7':
return atmosphere
elif weatherIDstr == '800':
return clearSky
elif weatherIDstr == '801':
return fewClouds
elif weatherIDstr == '802' or weatherIDstr == '803' or weatherIDstr == '804':
return clouds
elif weatherIDstr == '904':
return hot
else:
return defaultEmoji
|
[] |
[] |
[
"OWM_KEY"
] |
[]
|
["OWM_KEY"]
|
python
| 1 | 0 | |
server/opts.go
|
// Copyright 2012-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"flag"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nkeys"
"github.com/nats-io/nats-server/v2/conf"
)
var allowUnknownTopLevelField = int32(0)
// NoErrOnUnknownFields can be used to change the behavior the processing
// of a configuration file. By default, an error is reported if unknown
// fields are found. If `noError` is set to true, no error will be reported
// if top-level unknown fields are found.
func NoErrOnUnknownFields(noError bool) {
var val int32
if noError {
val = int32(1)
}
atomic.StoreInt32(&allowUnknownTopLevelField, val)
}
// ClusterOpts are options for clusters.
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type ClusterOpts struct {
Name string `json:"-"`
Host string `json:"addr,omitempty"`
Port int `json:"cluster_port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
Permissions *RoutePermissions `json:"-"`
TLSTimeout float64 `json:"-"`
TLSConfig *tls.Config `json:"-"`
TLSMap bool `json:"-"`
ListenStr string `json:"-"`
Advertise string `json:"-"`
NoAdvertise bool `json:"-"`
ConnectRetries int `json:"-"`
// Not exported (used in tests)
resolver netResolver
}
// GatewayOpts are options for gateways.
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type GatewayOpts struct {
Name string `json:"name"`
Host string `json:"addr,omitempty"`
Port int `json:"port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
TLSMap bool `json:"-"`
Advertise string `json:"advertise,omitempty"`
ConnectRetries int `json:"connect_retries,omitempty"`
Gateways []*RemoteGatewayOpts `json:"gateways,omitempty"`
RejectUnknown bool `json:"reject_unknown,omitempty"`
// Not exported, for tests.
resolver netResolver
sendQSubsBufSize int
}
// RemoteGatewayOpts are options for connecting to a remote gateway
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type RemoteGatewayOpts struct {
Name string `json:"name"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
URLs []*url.URL `json:"urls,omitempty"`
}
// LeafNodeOpts are options for a given server to accept leaf node connections and/or connect to a remote cluster.
type LeafNodeOpts struct {
Host string `json:"addr,omitempty"`
Port int `json:"port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
Account string `json:"-"`
Users []*User `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
TLSMap bool `json:"-"`
Advertise string `json:"-"`
NoAdvertise bool `json:"-"`
ReconnectInterval time.Duration `json:"-"`
// For solicited connections to other clusters/superclusters.
Remotes []*RemoteLeafOpts `json:"remotes,omitempty"`
// Not exported, for tests.
resolver netResolver
dialTimeout time.Duration
connDelay time.Duration
}
// RemoteLeafOpts are options for connecting to a remote server as a leaf node.
type RemoteLeafOpts struct {
LocalAccount string `json:"local_account,omitempty"`
URLs []*url.URL `json:"urls,omitempty"`
Credentials string `json:"-"`
TLS bool `json:"-"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
Hub bool `json:"hub,omitempty"`
DenyImports []string `json:"-"`
DenyExports []string `json:"-"`
}
// Options block for nats-server.
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type Options struct {
ConfigFile string `json:"-"`
ServerName string `json:"server_name"`
Host string `json:"addr"`
Port int `json:"port"`
ClientAdvertise string `json:"-"`
Trace bool `json:"-"`
Debug bool `json:"-"`
TraceVerbose bool `json:"-"`
NoLog bool `json:"-"`
NoSigs bool `json:"-"`
NoSublistCache bool `json:"-"`
NoHeaderSupport bool `json:"-"`
DisableShortFirstPing bool `json:"-"`
Logtime bool `json:"-"`
MaxConn int `json:"max_connections"`
MaxSubs int `json:"max_subscriptions,omitempty"`
Nkeys []*NkeyUser `json:"-"`
Users []*User `json:"-"`
Accounts []*Account `json:"-"`
NoAuthUser string `json:"-"`
SystemAccount string `json:"-"`
NoSystemAccount bool `json:"-"`
AllowNewAccounts bool `json:"-"`
Username string `json:"-"`
Password string `json:"-"`
Authorization string `json:"-"`
PingInterval time.Duration `json:"ping_interval"`
MaxPingsOut int `json:"ping_max"`
HTTPHost string `json:"http_host"`
HTTPPort int `json:"http_port"`
HTTPBasePath string `json:"http_base_path"`
HTTPSPort int `json:"https_port"`
AuthTimeout float64 `json:"auth_timeout"`
MaxControlLine int32 `json:"max_control_line"`
MaxPayload int32 `json:"max_payload"`
MaxPending int64 `json:"max_pending"`
Cluster ClusterOpts `json:"cluster,omitempty"`
Gateway GatewayOpts `json:"gateway,omitempty"`
LeafNode LeafNodeOpts `json:"leaf,omitempty"`
JetStream bool `json:"jetstream"`
JetStreamMaxMemory int64 `json:"-"`
JetStreamMaxStore int64 `json:"-"`
StoreDir string `json:"-"`
Websocket WebsocketOpts `json:"-"`
ProfPort int `json:"-"`
PidFile string `json:"-"`
PortsFileDir string `json:"-"`
LogFile string `json:"-"`
LogSizeLimit int64 `json:"-"`
Syslog bool `json:"-"`
RemoteSyslog string `json:"-"`
Routes []*url.URL `json:"-"`
RoutesStr string `json:"-"`
TLSTimeout float64 `json:"tls_timeout"`
TLS bool `json:"-"`
TLSVerify bool `json:"-"`
TLSMap bool `json:"-"`
TLSCert string `json:"-"`
TLSKey string `json:"-"`
TLSCaCert string `json:"-"`
TLSConfig *tls.Config `json:"-"`
AllowNonTLS bool `json:"-"`
WriteDeadline time.Duration `json:"-"`
MaxClosedClients int `json:"-"`
LameDuckDuration time.Duration `json:"-"`
LameDuckGracePeriod time.Duration `json:"-"`
// MaxTracedMsgLen is the maximum printable length for traced messages.
MaxTracedMsgLen int `json:"-"`
// Operating a trusted NATS server
TrustedKeys []string `json:"-"`
TrustedOperators []*jwt.OperatorClaims `json:"-"`
AccountResolver AccountResolver `json:"-"`
AccountResolverTLSConfig *tls.Config `json:"-"`
resolverPreloads map[string]string
CustomClientAuthentication Authentication `json:"-"`
CustomRouterAuthentication Authentication `json:"-"`
// CheckConfig configuration file syntax test was successful and exit.
CheckConfig bool `json:"-"`
// ConnectErrorReports specifies the number of failed attempts
// at which point server should report the failure of an initial
// connection to a route, gateway or leaf node.
// See DEFAULT_CONNECT_ERROR_REPORTS for default value.
ConnectErrorReports int
// ReconnectErrorReports is similar to ConnectErrorReports except
// that this applies to reconnect events.
ReconnectErrorReports int
// private fields, used to know if bool options are explicitly
// defined in config and/or command line params.
inConfig map[string]bool
inCmdLine map[string]bool
// private fields, used for testing
gatewaysSolicitDelay time.Duration
routeProto int
}
// WebsocketOpts ...
type WebsocketOpts struct {
// The server will accept websocket client connections on this hostname/IP.
Host string
// The server will accept websocket client connections on this port.
Port int
// The host:port to advertise to websocket clients in the cluster.
Advertise string
// If no user name is provided when a client connects, will default to the
// matching user from the global list of users in `Options.Users`.
NoAuthUser string
// Name of the cookie, which if present in WebSocket upgrade headers,
// will be treated as JWT during CONNECT phase as long as
// "jwt" specified in the CONNECT options is missing or empty.
JWTCookie string
// Authentication section. If anything is configured in this section,
// it will override the authorization configuration of regular clients.
Username string
Password string
Token string
// Timeout for the authentication process.
AuthTimeout float64
// By default the server will enforce the use of TLS. If no TLS configuration
// is provided, you need to explicitly set NoTLS to true to allow the server
// to start without TLS configuration. Note that if a TLS configuration is
// present, this boolean is ignored and the server will run the Websocket
// server with that TLS configuration.
// Running without TLS is less secure since Websocket clients that use bearer
// tokens will send them in clear. So this should not be used in production.
NoTLS bool
// TLS configuration is required.
TLSConfig *tls.Config
// If true, map certificate values for authentication purposes.
TLSMap bool
// If true, the Origin header must match the request's host.
SameOrigin bool
// Only origins in this list will be accepted. If empty and
// SameOrigin is false, any origin is accepted.
AllowedOrigins []string
// If set to true, the server will negotiate with clients
// if compression can be used. If this is false, no compression
// will be used (both in server and clients) since it has to
// be negotiated between both endpoints
Compression bool
// Total time allowed for the server to read the client request
// and write the response back to the client. This include the
// time needed for the TLS Handshake.
HandshakeTimeout time.Duration
}
type netResolver interface {
LookupHost(ctx context.Context, host string) ([]string, error)
}
// Clone performs a deep copy of the Options struct, returning a new clone
// with all values copied.
func (o *Options) Clone() *Options {
if o == nil {
return nil
}
clone := &Options{}
*clone = *o
if o.Users != nil {
clone.Users = make([]*User, len(o.Users))
for i, user := range o.Users {
clone.Users[i] = user.clone()
}
}
if o.Nkeys != nil {
clone.Nkeys = make([]*NkeyUser, len(o.Nkeys))
for i, nkey := range o.Nkeys {
clone.Nkeys[i] = nkey.clone()
}
}
if o.Routes != nil {
clone.Routes = deepCopyURLs(o.Routes)
}
if o.TLSConfig != nil {
clone.TLSConfig = o.TLSConfig.Clone()
}
if o.Cluster.TLSConfig != nil {
clone.Cluster.TLSConfig = o.Cluster.TLSConfig.Clone()
}
if o.Gateway.TLSConfig != nil {
clone.Gateway.TLSConfig = o.Gateway.TLSConfig.Clone()
}
if len(o.Gateway.Gateways) > 0 {
clone.Gateway.Gateways = make([]*RemoteGatewayOpts, len(o.Gateway.Gateways))
for i, g := range o.Gateway.Gateways {
clone.Gateway.Gateways[i] = g.clone()
}
}
// FIXME(dlc) - clone leaf node stuff.
return clone
}
func deepCopyURLs(urls []*url.URL) []*url.URL {
if urls == nil {
return nil
}
curls := make([]*url.URL, len(urls))
for i, u := range urls {
cu := &url.URL{}
*cu = *u
curls[i] = cu
}
return curls
}
// Configuration file authorization section.
type authorization struct {
// Singles
user string
pass string
token string
acc string
// Multiple Nkeys/Users
nkeys []*NkeyUser
users []*User
timeout float64
defaultPermissions *Permissions
}
// TLSConfigOpts holds the parsed tls config information,
// used with flag parsing
type TLSConfigOpts struct {
CertFile string
KeyFile string
CaFile string
Verify bool
Insecure bool
Map bool
Timeout float64
Ciphers []uint16
CurvePreferences []tls.CurveID
}
var tlsUsage = `
TLS configuration is specified in the tls section of a configuration file:
e.g.
tls {
cert_file: "./certs/server-cert.pem"
key_file: "./certs/server-key.pem"
ca_file: "./certs/ca.pem"
verify: true
verify_and_map: true
cipher_suites: [
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
]
curve_preferences: [
"CurveP256",
"CurveP384",
"CurveP521"
]
}
Available cipher suites include:
`
// ProcessConfigFile processes a configuration file.
// FIXME(dlc): A bit hacky
func ProcessConfigFile(configFile string) (*Options, error) {
opts := &Options{}
if err := opts.ProcessConfigFile(configFile); err != nil {
// If only warnings then continue and return the options.
if cerr, ok := err.(*processConfigErr); ok && len(cerr.Errors()) == 0 {
return opts, nil
}
return nil, err
}
return opts, nil
}
// token is an item parsed from the configuration.
type token interface {
Value() interface{}
Line() int
IsUsedVariable() bool
SourceFile() string
Position() int
}
// unwrapValue can be used to get the token and value from an item
// to be able to report the line number in case of an incorrect
// configuration.
// also stores the token in lastToken for use in convertPanicToError
func unwrapValue(v interface{}, lastToken *token) (token, interface{}) {
switch tk := v.(type) {
case token:
if lastToken != nil {
*lastToken = tk
}
return tk, tk.Value()
default:
return nil, v
}
}
// use in defer to recover from panic and turn it into an error associated with last token
func convertPanicToErrorList(lastToken *token, errors *[]error) {
// only recover if an error can be stored
if errors == nil {
return
} else if err := recover(); err == nil {
return
} else if lastToken != nil && *lastToken != nil {
*errors = append(*errors, &configErr{*lastToken, fmt.Sprint(err)})
} else {
*errors = append(*errors, fmt.Errorf("encountered panic without a token %v", err))
}
}
// use in defer to recover from panic and turn it into an error associated with last token
func convertPanicToError(lastToken *token, e *error) {
// only recover if an error can be stored
if e == nil || *e != nil {
return
} else if err := recover(); err == nil {
return
} else if lastToken != nil && *lastToken != nil {
*e = &configErr{*lastToken, fmt.Sprint(err)}
} else {
*e = fmt.Errorf("%v", err)
}
}
// configureSystemAccount configures a system account
// if present in the configuration.
func configureSystemAccount(o *Options, m map[string]interface{}) (retErr error) {
var lt token
defer convertPanicToError(<, &retErr)
configure := func(v interface{}) error {
tk, v := unwrapValue(v, <)
sa, ok := v.(string)
if !ok {
return &configErr{tk, "system account name must be a string"}
}
o.SystemAccount = sa
return nil
}
if v, ok := m["system_account"]; ok {
return configure(v)
} else if v, ok := m["system"]; ok {
return configure(v)
}
return nil
}
// ProcessConfigFile updates the Options structure with options
// present in the given configuration file.
// This version is convenient if one wants to set some default
// options and then override them with what is in the config file.
// For instance, this version allows you to do something such as:
//
// opts := &Options{Debug: true}
// opts.ProcessConfigFile(myConfigFile)
//
// If the config file contains "debug: false", after this call,
// opts.Debug would really be false. It would be impossible to
// achieve that with the non receiver ProcessConfigFile() version,
// since one would not know after the call if "debug" was not present
// or was present but set to false.
func (o *Options) ProcessConfigFile(configFile string) error {
o.ConfigFile = configFile
if configFile == "" {
return nil
}
m, err := conf.ParseFileWithChecks(configFile)
if err != nil {
return err
}
// Collect all errors and warnings and report them all together.
errors := make([]error, 0)
warnings := make([]error, 0)
// First check whether a system account has been defined,
// as that is a condition for other features to be enabled.
if err := configureSystemAccount(o, m); err != nil {
errors = append(errors, err)
}
for k, v := range m {
o.processConfigFileLine(k, v, &errors, &warnings)
}
if len(errors) > 0 || len(warnings) > 0 {
return &processConfigErr{
errors: errors,
warnings: warnings,
}
}
return nil
}
func (o *Options) processConfigFileLine(k string, v interface{}, errors *[]error, warnings *[]error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
switch strings.ToLower(k) {
case "listen":
hp, err := parseListen(v)
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
o.Host = hp.host
o.Port = hp.port
case "client_advertise":
o.ClientAdvertise = v.(string)
case "port":
o.Port = int(v.(int64))
case "server_name":
o.ServerName = v.(string)
case "host", "net":
o.Host = v.(string)
case "debug":
o.Debug = v.(bool)
trackExplicitVal(o, &o.inConfig, "Debug", o.Debug)
case "trace":
o.Trace = v.(bool)
trackExplicitVal(o, &o.inConfig, "Trace", o.Trace)
case "trace_verbose":
o.TraceVerbose = v.(bool)
o.Trace = v.(bool)
trackExplicitVal(o, &o.inConfig, "TraceVerbose", o.TraceVerbose)
trackExplicitVal(o, &o.inConfig, "Trace", o.Trace)
case "logtime":
o.Logtime = v.(bool)
trackExplicitVal(o, &o.inConfig, "Logtime", o.Logtime)
case "disable_sublist_cache", "no_sublist_cache":
o.NoSublistCache = v.(bool)
case "accounts":
err := parseAccounts(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "authorization":
auth, err := parseAuthorization(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
o.Username = auth.user
o.Password = auth.pass
o.Authorization = auth.token
if (auth.user != "" || auth.pass != "") && auth.token != "" {
err := &configErr{tk, "Cannot have a user/pass and token"}
*errors = append(*errors, err)
return
}
o.AuthTimeout = auth.timeout
// Check for multiple users defined
if auth.users != nil {
if auth.user != "" {
err := &configErr{tk, "Can not have a single user/pass and a users array"}
*errors = append(*errors, err)
return
}
if auth.token != "" {
err := &configErr{tk, "Can not have a token and a users array"}
*errors = append(*errors, err)
return
}
// Users may have been added from Accounts parsing, so do an append here
o.Users = append(o.Users, auth.users...)
}
// Check for nkeys
if auth.nkeys != nil {
// NKeys may have been added from Accounts parsing, so do an append here
o.Nkeys = append(o.Nkeys, auth.nkeys...)
}
case "http":
hp, err := parseListen(v)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
o.HTTPHost = hp.host
o.HTTPPort = hp.port
case "https":
hp, err := parseListen(v)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
o.HTTPHost = hp.host
o.HTTPSPort = hp.port
case "http_port", "monitor_port":
o.HTTPPort = int(v.(int64))
case "https_port":
o.HTTPSPort = int(v.(int64))
case "http_base_path":
o.HTTPBasePath = v.(string)
case "cluster":
err := parseCluster(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "gateway":
if err := parseGateway(tk, o, errors, warnings); err != nil {
*errors = append(*errors, err)
return
}
case "leaf", "leafnodes":
err := parseLeafNodes(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "jetstream":
err := parseJetStream(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "logfile", "log_file":
o.LogFile = v.(string)
case "logfile_size_limit", "log_size_limit":
o.LogSizeLimit = v.(int64)
case "syslog":
o.Syslog = v.(bool)
trackExplicitVal(o, &o.inConfig, "Syslog", o.Syslog)
case "remote_syslog":
o.RemoteSyslog = v.(string)
case "pidfile", "pid_file":
o.PidFile = v.(string)
case "ports_file_dir":
o.PortsFileDir = v.(string)
case "prof_port":
o.ProfPort = int(v.(int64))
case "max_control_line":
if v.(int64) > 1<<31-1 {
err := &configErr{tk, fmt.Sprintf("%s value is too big", k)}
*errors = append(*errors, err)
return
}
o.MaxControlLine = int32(v.(int64))
case "max_payload":
if v.(int64) > 1<<31-1 {
err := &configErr{tk, fmt.Sprintf("%s value is too big", k)}
*errors = append(*errors, err)
return
}
o.MaxPayload = int32(v.(int64))
case "max_pending":
o.MaxPending = v.(int64)
case "max_connections", "max_conn":
o.MaxConn = int(v.(int64))
case "max_traced_msg_len":
o.MaxTracedMsgLen = int(v.(int64))
case "max_subscriptions", "max_subs":
o.MaxSubs = int(v.(int64))
case "ping_interval":
o.PingInterval = parseDuration("ping_interval", tk, v, errors, warnings)
case "ping_max":
o.MaxPingsOut = int(v.(int64))
case "tls":
tc, err := parseTLS(tk)
if err != nil {
*errors = append(*errors, err)
return
}
if o.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
o.TLSTimeout = tc.Timeout
o.TLSMap = tc.Map
case "allow_non_tls":
o.AllowNonTLS = v.(bool)
case "write_deadline":
o.WriteDeadline = parseDuration("write_deadline", tk, v, errors, warnings)
case "lame_duck_duration":
dur, err := time.ParseDuration(v.(string))
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing lame_duck_duration: %v", err)}
*errors = append(*errors, err)
return
}
if dur < 30*time.Second {
err := &configErr{tk, fmt.Sprintf("invalid lame_duck_duration of %v, minimum is 30 seconds", dur)}
*errors = append(*errors, err)
return
}
o.LameDuckDuration = dur
case "lame_duck_grace_period":
dur, err := time.ParseDuration(v.(string))
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing lame_duck_grace_period: %v", err)}
*errors = append(*errors, err)
return
}
if dur < 0 {
err := &configErr{tk, "invalid lame_duck_grace_period, needs to be positive"}
*errors = append(*errors, err)
return
}
o.LameDuckGracePeriod = dur
case "operator", "operators", "roots", "root", "root_operators", "root_operator":
opFiles := []string{}
switch v := v.(type) {
case string:
opFiles = append(opFiles, v)
case []string:
opFiles = append(opFiles, v...)
default:
err := &configErr{tk, fmt.Sprintf("error parsing operators: unsupported type %T", v)}
*errors = append(*errors, err)
}
// Assume for now these are file names, but they can also be the JWT itself inline.
o.TrustedOperators = make([]*jwt.OperatorClaims, 0, len(opFiles))
for _, fname := range opFiles {
opc, err := ReadOperatorJWT(fname)
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing operator JWT: %v", err)}
*errors = append(*errors, err)
continue
}
o.TrustedOperators = append(o.TrustedOperators, opc)
}
if len(o.TrustedOperators) == 1 {
// In case "resolver" is defined as well, it takes precedence
if o.AccountResolver == nil {
if accUrl, err := parseURL(o.TrustedOperators[0].AccountServerURL, "account resolver"); err == nil {
// nsc automatically appends "/accounts" during nsc push
o.AccountResolver, _ = NewURLAccResolver(accUrl.String() + "/accounts")
}
}
// In case "system_account" is defined as well, it takes precedence
if o.SystemAccount == "" {
o.SystemAccount = o.TrustedOperators[0].SystemAccount
}
}
case "resolver", "account_resolver", "accounts_resolver":
switch v := v.(type) {
case string:
// "resolver" takes precedence over value obtained from "operator".
// Clear so that parsing errors are not silently ignored.
o.AccountResolver = nil
memResolverRe := regexp.MustCompile(`(?i)(MEM|MEMORY)\s*`)
resolverRe := regexp.MustCompile(`(?i)(?:URL){1}(?:\({1}\s*"?([^\s"]*)"?\s*\){1})?\s*`)
if memResolverRe.MatchString(v) {
o.AccountResolver = &MemAccResolver{}
} else if items := resolverRe.FindStringSubmatch(v); len(items) == 2 {
url := items[1]
_, err := parseURL(url, "account resolver")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
if ur, err := NewURLAccResolver(url); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
} else {
o.AccountResolver = ur
}
}
case map[string]interface{}:
dir := ""
dirType := ""
limit := int64(0)
ttl := time.Duration(0)
sync := time.Duration(0)
var err error
if v, ok := v["dir"]; ok {
_, v := unwrapValue(v, <)
dir = v.(string)
}
if v, ok := v["type"]; ok {
_, v := unwrapValue(v, <)
dirType = v.(string)
}
if v, ok := v["limit"]; ok {
_, v := unwrapValue(v, <)
limit = v.(int64)
}
if v, ok := v["ttl"]; ok {
_, v := unwrapValue(v, <)
ttl, err = time.ParseDuration(v.(string))
}
if v, ok := v["interval"]; err == nil && ok {
_, v := unwrapValue(v, <)
sync, err = time.ParseDuration(v.(string))
}
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
if dir == "" {
*errors = append(*errors, &configErr{tk, "dir has no value and needs to point to a directory"})
return
}
if info, err := os.Stat(dir); err != nil && (!info.IsDir() || info.Mode().Perm()&(1<<(uint(7))) == 0) {
*errors = append(*errors, &configErr{tk, "dir needs to point to an accessible directory"})
return
}
var res AccountResolver
switch strings.ToUpper(dirType) {
case "CACHE":
if sync != 0 {
*errors = append(*errors, &configErr{tk, "CACHE does not accept sync"})
}
res, err = NewCacheDirAccResolver(dir, limit, ttl)
case "FULL":
if ttl != 0 {
*errors = append(*errors, &configErr{tk, "FULL does not accept ttl"})
}
res, err = NewDirAccResolver(dir, limit, sync)
}
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
o.AccountResolver = res
default:
err := &configErr{tk, fmt.Sprintf("error parsing operator resolver, wrong type %T", v)}
*errors = append(*errors, err)
return
}
if o.AccountResolver == nil {
err := &configErr{tk, "error parsing account resolver, should be MEM or " +
" URL(\"url\") or a map containing dir and type state=[FULL|CACHE])"}
*errors = append(*errors, err)
}
case "resolver_tls":
tc, err := parseTLS(tk)
if err != nil {
*errors = append(*errors, err)
return
}
if o.AccountResolverTLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
case "resolver_preload":
mp, ok := v.(map[string]interface{})
if !ok {
err := &configErr{tk, "preload should be a map of account_public_key:account_jwt"}
*errors = append(*errors, err)
return
}
o.resolverPreloads = make(map[string]string)
for key, val := range mp {
tk, val = unwrapValue(val, <)
if jwtstr, ok := val.(string); !ok {
err := &configErr{tk, "preload map value should be a string JWT"}
*errors = append(*errors, err)
continue
} else {
// Make sure this is a valid account JWT, that is a config error.
// We will warn of expirations, etc later.
if _, err := jwt.DecodeAccountClaims(jwtstr); err != nil {
err := &configErr{tk, "invalid account JWT"}
*errors = append(*errors, err)
continue
}
o.resolverPreloads[key] = jwtstr
}
}
case "no_auth_user":
o.NoAuthUser = v.(string)
case "system_account", "system":
// Already processed at the beginning so we just skip them
// to not treat them as unknown values.
return
case "no_system_account", "no_system", "no_sys_acc":
o.NoSystemAccount = v.(bool)
case "no_header_support":
o.NoHeaderSupport = v.(bool)
case "trusted", "trusted_keys":
switch v := v.(type) {
case string:
o.TrustedKeys = []string{v}
case []string:
o.TrustedKeys = v
case []interface{}:
keys := make([]string, 0, len(v))
for _, mv := range v {
tk, mv = unwrapValue(mv, <)
if key, ok := mv.(string); ok {
keys = append(keys, key)
} else {
err := &configErr{tk, fmt.Sprintf("error parsing trusted: unsupported type in array %T", mv)}
*errors = append(*errors, err)
continue
}
}
o.TrustedKeys = keys
default:
err := &configErr{tk, fmt.Sprintf("error parsing trusted: unsupported type %T", v)}
*errors = append(*errors, err)
}
// Do a quick sanity check on keys
for _, key := range o.TrustedKeys {
if !nkeys.IsValidPublicOperatorKey(key) {
err := &configErr{tk, fmt.Sprintf("trust key %q required to be a valid public operator nkey", key)}
*errors = append(*errors, err)
}
}
case "connect_error_reports":
o.ConnectErrorReports = int(v.(int64))
case "reconnect_error_reports":
o.ReconnectErrorReports = int(v.(int64))
case "websocket", "ws":
if err := parseWebsocket(tk, o, errors, warnings); err != nil {
*errors = append(*errors, err)
return
}
default:
if au := atomic.LoadInt32(&allowUnknownTopLevelField); au == 0 && !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
func parseDuration(field string, tk token, v interface{}, errors *[]error, warnings *[]error) time.Duration {
if wd, ok := v.(string); ok {
if dur, err := time.ParseDuration(wd); err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing %s: %v", field, err)}
*errors = append(*errors, err)
return 0
} else {
return dur
}
} else {
// Backward compatible with old type, assume this is the
// number of seconds.
err := &configWarningErr{
field: field,
configErr: configErr{
token: tk,
reason: field + " should be converted to a duration",
},
}
*warnings = append(*warnings, err)
return time.Duration(v.(int64)) * time.Second
}
}
func trackExplicitVal(opts *Options, pm *map[string]bool, name string, val bool) {
m := *pm
if m == nil {
m = make(map[string]bool)
*pm = m
}
m[name] = val
}
// hostPort is simple struct to hold parsed listen/addr strings.
type hostPort struct {
host string
port int
}
// parseListen will parse listen option which is replacing host/net and port
func parseListen(v interface{}) (*hostPort, error) {
hp := &hostPort{}
switch vv := v.(type) {
// Only a port
case int64:
hp.port = int(vv)
case string:
host, port, err := net.SplitHostPort(vv)
if err != nil {
return nil, fmt.Errorf("could not parse address string %q", vv)
}
hp.port, err = strconv.Atoi(port)
if err != nil {
return nil, fmt.Errorf("could not parse port %q", port)
}
hp.host = host
default:
return nil, fmt.Errorf("expected port or host:port, got %T", vv)
}
return hp, nil
}
// parseCluster will parse the cluster config.
func parseCluster(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
cm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected map to define cluster, got %T", v)}
}
for mk, mv := range cm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "name":
opts.Cluster.Name = mv.(string)
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.Cluster.Host = hp.host
opts.Cluster.Port = hp.port
case "port":
opts.Cluster.Port = int(mv.(int64))
case "host", "net":
opts.Cluster.Host = mv.(string)
case "authorization":
auth, err := parseAuthorization(tk, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if auth.users != nil {
err := &configErr{tk, "Cluster authorization does not allow multiple users"}
*errors = append(*errors, err)
continue
}
opts.Cluster.Username = auth.user
opts.Cluster.Password = auth.pass
opts.Cluster.AuthTimeout = auth.timeout
if auth.defaultPermissions != nil {
err := &configWarningErr{
field: mk,
configErr: configErr{
token: tk,
reason: `setting "permissions" within cluster authorization block is deprecated`,
},
}
*warnings = append(*warnings, err)
// Do not set permissions if they were specified in top-level cluster block.
if opts.Cluster.Permissions == nil {
setClusterPermissions(&opts.Cluster, auth.defaultPermissions)
}
}
case "routes":
ra := mv.([]interface{})
routes, errs := parseURLs(ra, "route")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
opts.Routes = routes
case "tls":
config, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
opts.Cluster.TLSConfig = config
opts.Cluster.TLSTimeout = tlsopts.Timeout
opts.Cluster.TLSMap = tlsopts.Map
case "cluster_advertise", "advertise":
opts.Cluster.Advertise = mv.(string)
case "no_advertise":
opts.Cluster.NoAdvertise = mv.(bool)
trackExplicitVal(opts, &opts.inConfig, "Cluster.NoAdvertise", opts.Cluster.NoAdvertise)
case "connect_retries":
opts.Cluster.ConnectRetries = int(mv.(int64))
case "permissions":
perms, err := parseUserPermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
// Dynamic response permissions do not make sense here.
if perms.Response != nil {
err := &configErr{tk, "Cluster permissions do not support dynamic responses"}
*errors = append(*errors, err)
continue
}
// This will possibly override permissions that were define in auth block
setClusterPermissions(&opts.Cluster, perms)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
func parseURLs(a []interface{}, typ string) (urls []*url.URL, errors []error) {
urls = make([]*url.URL, 0, len(a))
var lt token
defer convertPanicToErrorList(<, &errors)
for _, u := range a {
tk, u := unwrapValue(u, <)
sURL := u.(string)
url, err := parseURL(sURL, typ)
if err != nil {
err := &configErr{tk, err.Error()}
errors = append(errors, err)
continue
}
urls = append(urls, url)
}
return urls, errors
}
func parseURL(u string, typ string) (*url.URL, error) {
urlStr := strings.TrimSpace(u)
url, err := url.Parse(urlStr)
if err != nil {
return nil, fmt.Errorf("error parsing %s url [%q]", typ, urlStr)
}
return url, nil
}
func parseGateway(v interface{}, o *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
gm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected gateway to be a map, got %T", v)}
}
for mk, mv := range gm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "name":
o.Gateway.Name = mv.(string)
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Gateway.Host = hp.host
o.Gateway.Port = hp.port
case "port":
o.Gateway.Port = int(mv.(int64))
case "host", "net":
o.Gateway.Host = mv.(string)
case "authorization":
auth, err := parseAuthorization(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if auth.users != nil {
*errors = append(*errors, &configErr{tk, "Gateway authorization does not allow multiple users"})
continue
}
o.Gateway.Username = auth.user
o.Gateway.Password = auth.pass
o.Gateway.AuthTimeout = auth.timeout
case "tls":
config, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
o.Gateway.TLSConfig = config
o.Gateway.TLSTimeout = tlsopts.Timeout
o.Gateway.TLSMap = tlsopts.Map
case "advertise":
o.Gateway.Advertise = mv.(string)
case "connect_retries":
o.Gateway.ConnectRetries = int(mv.(int64))
case "gateways":
gateways, err := parseGateways(mv, errors, warnings)
if err != nil {
return err
}
o.Gateway.Gateways = gateways
case "reject_unknown":
o.Gateway.RejectUnknown = mv.(bool)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
var dynamicJSAccountLimits = &JetStreamAccountLimits{-1, -1, -1, -1}
// Parses jetstream account limits for an account. Simple setup with boolen is allowed, and we will
// use dynamic account limits.
func parseJetStreamForAccount(v interface{}, acc *Account, errors *[]error, warnings *[]error) error {
var lt token
tk, v := unwrapValue(v, <)
// Value here can be bool, or string "enabled" or a map.
switch vv := v.(type) {
case bool:
if vv {
acc.jsLimits = dynamicJSAccountLimits
}
case string:
switch strings.ToLower(vv) {
case "enabled", "enable":
acc.jsLimits = dynamicJSAccountLimits
case "disabled", "disable":
acc.jsLimits = nil
default:
return &configErr{tk, fmt.Sprintf("Expected 'enabled' or 'disabled' for string value, got '%s'", vv)}
}
case map[string]interface{}:
jsLimits := &JetStreamAccountLimits{-1, -1, -1, -1}
for mk, mv := range vv {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "max_memory", "max_mem", "mem", "memory":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxMemory = int64(vv)
case "max_store", "max_file", "max_disk", "store", "disk":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxStore = int64(vv)
case "max_streams", "streams":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxStreams = int(vv)
case "max_consumers", "consumers":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxConsumers = int(vv)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
acc.jsLimits = jsLimits
default:
return &configErr{tk, fmt.Sprintf("Expected map, bool or string to define JetStream, got %T", v)}
}
return nil
}
// Parse enablement of jetstream for a server.
func parseJetStream(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var lt token
tk, v := unwrapValue(v, <)
// Value here can be bool, or string "enabled" or a map.
switch vv := v.(type) {
case bool:
opts.JetStream = v.(bool)
case string:
switch strings.ToLower(vv) {
case "enabled", "enable":
opts.JetStream = true
case "disabled", "disable":
opts.JetStream = false
default:
return &configErr{tk, fmt.Sprintf("Expected 'enabled' or 'disabled' for string value, got '%s'", vv)}
}
case map[string]interface{}:
for mk, mv := range vv {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "store_dir", "storedir":
opts.StoreDir = mv.(string)
case "max_memory_store", "max_mem_store", "max_mem":
opts.JetStreamMaxMemory = mv.(int64)
case "max_file_store", "max_file":
opts.JetStreamMaxStore = mv.(int64)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
opts.JetStream = true
default:
return &configErr{tk, fmt.Sprintf("Expected map, bool or string to define JetStream, got %T", v)}
}
return nil
}
// parseLeafNodes will parse the leaf node config.
func parseLeafNodes(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
cm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected map to define a leafnode, got %T", v)}
}
for mk, mv := range cm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.LeafNode.Host = hp.host
opts.LeafNode.Port = hp.port
case "port":
opts.LeafNode.Port = int(mv.(int64))
case "host", "net":
opts.LeafNode.Host = mv.(string)
case "authorization":
auth, err := parseLeafAuthorization(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
opts.LeafNode.Username = auth.user
opts.LeafNode.Password = auth.pass
opts.LeafNode.AuthTimeout = auth.timeout
opts.LeafNode.Account = auth.acc
opts.LeafNode.Users = auth.users
// Validate user info config for leafnode authorization
if err := validateLeafNodeAuthOptions(opts); err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
case "remotes":
// Parse the remote options here.
remotes, err := parseRemoteLeafNodes(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
opts.LeafNode.Remotes = remotes
case "reconnect", "reconnect_delay", "reconnect_interval":
opts.LeafNode.ReconnectInterval = time.Duration(int(mv.(int64))) * time.Second
case "tls":
tc, err := parseTLS(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
if opts.LeafNode.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.LeafNode.TLSTimeout = tc.Timeout
case "leafnode_advertise", "advertise":
opts.LeafNode.Advertise = mv.(string)
case "no_advertise":
opts.LeafNode.NoAdvertise = mv.(bool)
trackExplicitVal(opts, &opts.inConfig, "LeafNode.NoAdvertise", opts.LeafNode.NoAdvertise)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
// This is the authorization parser adapter for the leafnode's
// authorization config.
func parseLeafAuthorization(v interface{}, errors *[]error, warnings *[]error) (*authorization, error) {
var (
am map[string]interface{}
tk token
lt token
auth = &authorization{}
)
defer convertPanicToErrorList(<, errors)
_, v = unwrapValue(v, <)
am = v.(map[string]interface{})
for mk, mv := range am {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "user", "username":
auth.user = mv.(string)
case "pass", "password":
auth.pass = mv.(string)
case "timeout":
at := float64(1)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
auth.timeout = at
case "users":
users, err := parseLeafUsers(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.users = users
case "account":
auth.acc = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
continue
}
}
return auth, nil
}
// This is a trimmed down version of parseUsers that is adapted
// for the users possibly defined in the authorization{} section
// of leafnodes {}.
func parseLeafUsers(mv interface{}, errors *[]error, warnings *[]error) ([]*User, error) {
var (
tk token
lt token
users = []*User{}
)
defer convertPanicToErrorList(<, errors)
tk, mv = unwrapValue(mv, <)
// Make sure we have an array
uv, ok := mv.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected users field to be an array, got %v", mv)}
}
for _, u := range uv {
tk, u = unwrapValue(u, <)
// Check its a map/struct
um, ok := u.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected user entry to be a map/struct, got %v", u)}
*errors = append(*errors, err)
continue
}
user := &User{}
for k, v := range um {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "user", "username":
user.Username = v.(string)
case "pass", "password":
user.Password = v.(string)
case "account":
// We really want to save just the account name here, but
// the User object is *Account. So we create an account object
// but it won't be registered anywhere. The server will just
// use opts.LeafNode.Users[].Account.Name. Alternatively
// we need to create internal objects to store u/p and account
// name and have a server structure to hold that.
user.Account = NewAccount(v.(string))
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
users = append(users, user)
}
return users, nil
}
func parseRemoteLeafNodes(v interface{}, errors *[]error, warnings *[]error) ([]*RemoteLeafOpts, error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
ra, ok := v.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected remotes field to be an array, got %T", v)}
}
remotes := make([]*RemoteLeafOpts, 0, len(ra))
for _, r := range ra {
tk, r = unwrapValue(r, <)
// Check its a map/struct
rm, ok := r.(map[string]interface{})
if !ok {
*errors = append(*errors, &configErr{tk, fmt.Sprintf("Expected remote leafnode entry to be a map/struct, got %v", r)})
continue
}
remote := &RemoteLeafOpts{}
for k, v := range rm {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "url", "urls":
switch v := v.(type) {
case []interface{}, []string:
urls, errs := parseURLs(v.([]interface{}), "leafnode")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
remote.URLs = urls
case string:
url, err := parseURL(v, "leafnode")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
remote.URLs = append(remote.URLs, url)
default:
*errors = append(*errors, &configErr{tk, fmt.Sprintf("Expected remote leafnode url to be an array or string, got %v", v)})
continue
}
case "account", "local":
remote.LocalAccount = v.(string)
case "creds", "credentials":
p, err := expandPath(v.(string))
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
remote.Credentials = p
case "tls":
tc, err := parseTLS(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
if remote.TLSConfig, err = GenTLSConfig(tc); err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
// If ca_file is defined, GenTLSConfig() sets TLSConfig.ClientCAs.
// Set RootCAs since this tls.Config is used when soliciting
// a connection (therefore behaves as a client).
remote.TLSConfig.RootCAs = remote.TLSConfig.ClientCAs
if tc.Timeout > 0 {
remote.TLSTimeout = tc.Timeout
} else {
remote.TLSTimeout = float64(DEFAULT_LEAF_TLS_TIMEOUT)
}
case "hub":
remote.Hub = v.(bool)
case "deny_imports", "deny_import":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
remote.DenyImports = subjects
case "deny_exports", "deny_export":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
remote.DenyExports = subjects
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
remotes = append(remotes, remote)
}
return remotes, nil
}
// Parse TLS and returns a TLSConfig and TLSTimeout.
// Used by cluster and gateway parsing.
func getTLSConfig(tk token) (*tls.Config, *TLSConfigOpts, error) {
tc, err := parseTLS(tk)
if err != nil {
return nil, nil, err
}
config, err := GenTLSConfig(tc)
if err != nil {
err := &configErr{tk, err.Error()}
return nil, nil, err
}
// For clusters/gateways, we will force strict verification. We also act
// as both client and server, so will mirror the rootCA to the
// clientCA pool.
config.ClientAuth = tls.RequireAndVerifyClientCert
config.RootCAs = config.ClientCAs
return config, tc, nil
}
func parseGateways(v interface{}, errors *[]error, warnings *[]error) ([]*RemoteGatewayOpts, error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
// Make sure we have an array
ga, ok := v.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected gateways field to be an array, got %T", v)}
}
gateways := []*RemoteGatewayOpts{}
for _, g := range ga {
tk, g = unwrapValue(g, <)
// Check its a map/struct
gm, ok := g.(map[string]interface{})
if !ok {
*errors = append(*errors, &configErr{tk, fmt.Sprintf("Expected gateway entry to be a map/struct, got %v", g)})
continue
}
gateway := &RemoteGatewayOpts{}
for k, v := range gm {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "name":
gateway.Name = v.(string)
case "tls":
tls, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
gateway.TLSConfig = tls
gateway.TLSTimeout = tlsopts.Timeout
case "url":
url, err := parseURL(v.(string), "gateway")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
gateway.URLs = append(gateway.URLs, url)
case "urls":
urls, errs := parseURLs(v.([]interface{}), "gateway")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
gateway.URLs = urls
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
gateways = append(gateways, gateway)
}
return gateways, nil
}
// Sets cluster's permissions based on given pub/sub permissions,
// doing the appropriate translation.
func setClusterPermissions(opts *ClusterOpts, perms *Permissions) {
// Import is whether or not we will send a SUB for interest to the other side.
// Export is whether or not we will accept a SUB from the remote for a given subject.
// Both only effect interest registration.
// The parsing sets Import into Publish and Export into Subscribe, convert
// accordingly.
opts.Permissions = &RoutePermissions{
Import: perms.Publish,
Export: perms.Subscribe,
}
}
// Temp structures to hold account import and export defintions since they need
// to be processed after being parsed.
type export struct {
acc *Account
sub string
accs []string
rt ServiceRespType
lat *serviceLatency
rthr time.Duration
}
type importStream struct {
acc *Account
an string
sub string
pre string
}
type importService struct {
acc *Account
an string
sub string
to string
share bool
}
// Checks if an account name is reserved.
func isReservedAccount(name string) bool {
return name == globalAccountName
}
// parseAccounts will parse the different accounts syntax.
func parseAccounts(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var (
importStreams []*importStream
importServices []*importService
exportStreams []*export
exportServices []*export
lt token
)
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
switch vv := v.(type) {
// Simple array of account names.
case []interface{}, []string:
m := make(map[string]struct{}, len(v.([]interface{})))
for _, n := range v.([]interface{}) {
tk, name := unwrapValue(n, <)
ns := name.(string)
// Check for reserved names.
if isReservedAccount(ns) {
err := &configErr{tk, fmt.Sprintf("%q is a Reserved Account", ns)}
*errors = append(*errors, err)
continue
}
if _, ok := m[ns]; ok {
err := &configErr{tk, fmt.Sprintf("Duplicate Account Entry: %s", ns)}
*errors = append(*errors, err)
continue
}
opts.Accounts = append(opts.Accounts, NewAccount(ns))
m[ns] = struct{}{}
}
// More common map entry
case map[string]interface{}:
// Track users across accounts, must be unique across
// accounts and nkeys vs users.
uorn := make(map[string]struct{})
for aname, mv := range vv {
tk, amv := unwrapValue(mv, <)
// Skip referenced config vars within the account block.
if tk.IsUsedVariable() {
continue
}
// These should be maps.
mv, ok := amv.(map[string]interface{})
if !ok {
err := &configErr{tk, "Expected map entries for accounts"}
*errors = append(*errors, err)
continue
}
if isReservedAccount(aname) {
err := &configErr{tk, fmt.Sprintf("%q is a Reserved Account", aname)}
*errors = append(*errors, err)
continue
}
var (
users []*User
nkeyUsr []*NkeyUser
usersTk token
)
acc := NewAccount(aname)
opts.Accounts = append(opts.Accounts, acc)
for k, v := range mv {
tk, mv := unwrapValue(v, <)
switch strings.ToLower(k) {
case "nkey":
nk, ok := mv.(string)
if !ok || !nkeys.IsValidPublicAccountKey(nk) {
err := &configErr{tk, fmt.Sprintf("Not a valid public nkey for an account: %q", mv)}
*errors = append(*errors, err)
continue
}
acc.Nkey = nk
case "imports":
streams, services, err := parseAccountImports(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
importStreams = append(importStreams, streams...)
importServices = append(importServices, services...)
case "exports":
streams, services, err := parseAccountExports(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
exportStreams = append(exportStreams, streams...)
exportServices = append(exportServices, services...)
case "jetstream":
err := parseJetStreamForAccount(mv, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
case "users":
var err error
usersTk = tk
nkeyUsr, users, err = parseUsers(mv, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
case "default_permissions":
permissions, err := parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
acc.defaultPerms = permissions
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
applyDefaultPermissions(users, nkeyUsr, acc.defaultPerms)
for _, u := range nkeyUsr {
if _, ok := uorn[u.Nkey]; ok {
err := &configErr{usersTk, fmt.Sprintf("Duplicate nkey %q detected", u.Nkey)}
*errors = append(*errors, err)
continue
}
uorn[u.Nkey] = struct{}{}
u.Account = acc
}
opts.Nkeys = append(opts.Nkeys, nkeyUsr...)
for _, u := range users {
if _, ok := uorn[u.Username]; ok {
err := &configErr{usersTk, fmt.Sprintf("Duplicate user %q detected", u.Username)}
*errors = append(*errors, err)
continue
}
uorn[u.Username] = struct{}{}
u.Account = acc
}
opts.Users = append(opts.Users, users...)
}
}
lt = tk
// Bail already if there are previous errors.
if len(*errors) > 0 {
return nil
}
// Parse Imports and Exports here after all accounts defined.
// Do exports first since they need to be defined for imports to succeed
// since we do permissions checks.
// Create a lookup map for accounts lookups.
am := make(map[string]*Account, len(opts.Accounts))
for _, a := range opts.Accounts {
am[a.Name] = a
}
// Do stream exports
for _, stream := range exportStreams {
// Make array of accounts if applicable.
var accounts []*Account
for _, an := range stream.accs {
ta := am[an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for stream export", an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
accounts = append(accounts, ta)
}
if err := stream.acc.AddStreamExport(stream.sub, accounts); err != nil {
msg := fmt.Sprintf("Error adding stream export %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
for _, service := range exportServices {
// Make array of accounts if applicable.
var accounts []*Account
for _, an := range service.accs {
ta := am[an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for service export", an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
accounts = append(accounts, ta)
}
if err := service.acc.AddServiceExportWithResponse(service.sub, service.rt, accounts); err != nil {
msg := fmt.Sprintf("Error adding service export %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if service.rthr != 0 {
// Response threshold was set in options.
if err := service.acc.SetServiceExportResponseThreshold(service.sub, service.rthr); err != nil {
msg := fmt.Sprintf("Error adding service export response threshold for %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
if service.lat != nil {
if opts.SystemAccount == "" {
msg := fmt.Sprintf("Error adding service latency sampling for %q: %v", service.sub, ErrNoSysAccount.Error())
*errors = append(*errors, &configErr{tk, msg})
continue
}
if err := service.acc.TrackServiceExportWithSampling(service.sub, service.lat.subject, int(service.lat.sampling)); err != nil {
msg := fmt.Sprintf("Error adding service latency sampling for %q on subject %q: %v", service.sub, service.lat.subject, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
}
for _, stream := range importStreams {
ta := am[stream.an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for stream import", stream.an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if err := stream.acc.AddStreamImport(ta, stream.sub, stream.pre); err != nil {
msg := fmt.Sprintf("Error adding stream import %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
for _, service := range importServices {
ta := am[service.an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for service import", service.an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if service.to == "" {
service.to = service.sub
}
if err := service.acc.AddServiceImport(ta, service.to, service.sub); err != nil {
msg := fmt.Sprintf("Error adding service import %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if err := service.acc.SetServiceImportSharing(ta, service.sub, service.share); err != nil {
msg := fmt.Sprintf("Error setting service import sharing %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
return nil
}
// Parse the account exports
func parseAccountExports(v interface{}, acc *Account, errors, warnings *[]error) ([]*export, []*export, error) {
var lt token
defer convertPanicToErrorList(<, errors)
// This should be an array of objects/maps.
tk, v := unwrapValue(v, <)
ims, ok := v.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Exports should be an array, got %T", v)}
}
var services []*export
var streams []*export
for _, v := range ims {
// Should have stream or service
stream, service, err := parseExportStreamOrService(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if service != nil {
service.acc = acc
services = append(services, service)
}
if stream != nil {
stream.acc = acc
streams = append(streams, stream)
}
}
return streams, services, nil
}
// Parse the account imports
func parseAccountImports(v interface{}, acc *Account, errors, warnings *[]error) ([]*importStream, []*importService, error) {
var lt token
defer convertPanicToErrorList(<, errors)
// This should be an array of objects/maps.
tk, v := unwrapValue(v, <)
ims, ok := v.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Imports should be an array, got %T", v)}
}
var services []*importService
var streams []*importStream
svcSubjects := map[string]*importService{}
for _, v := range ims {
// Should have stream or service
stream, service, err := parseImportStreamOrService(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if service != nil {
if dup := svcSubjects[service.to]; dup != nil {
tk, _ := unwrapValue(v, <)
err := &configErr{tk,
fmt.Sprintf("Duplicate service import subject %q, previously used in import for account %q, subject %q",
service.to, dup.an, dup.sub)}
*errors = append(*errors, err)
continue
}
svcSubjects[service.to] = service
service.acc = acc
services = append(services, service)
}
if stream != nil {
stream.acc = acc
streams = append(streams, stream)
}
}
return streams, services, nil
}
// Helper to parse an embedded account description for imported services or streams.
func parseAccount(v map[string]interface{}, errors, warnings *[]error) (string, string, error) {
var lt token
defer convertPanicToErrorList(<, errors)
var accountName, subject string
for mk, mv := range v {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "account":
accountName = mv.(string)
case "subject":
subject = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return accountName, subject, nil
}
// Parse an export stream or service.
// e.g.
// {stream: "public.>"} # No accounts means public.
// {stream: "synadia.private.>", accounts: [cncf, natsio]}
// {service: "pub.request"} # No accounts means public.
// {service: "pub.special.request", accounts: [nats.io]}
func parseExportStreamOrService(v interface{}, errors, warnings *[]error) (*export, *export, error) {
var (
curStream *export
curService *export
accounts []string
rt ServiceRespType
rtSeen bool
rtToken token
lat *serviceLatency
threshSeen bool
thresh time.Duration
latToken token
lt token
)
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
vv, ok := v.(map[string]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Export Items should be a map with type entry, got %T", v)}
}
for mk, mv := range vv {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "stream":
if curService != nil {
err := &configErr{tk, fmt.Sprintf("Detected stream %q but already saw a service", mv)}
*errors = append(*errors, err)
continue
}
if rtToken != nil {
err := &configErr{rtToken, "Detected response directive on non-service"}
*errors = append(*errors, err)
continue
}
if latToken != nil {
err := &configErr{latToken, "Detected latency directive on non-service"}
*errors = append(*errors, err)
continue
}
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected stream name to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
curStream = &export{sub: mvs}
if accounts != nil {
curStream.accs = accounts
}
case "service":
if curStream != nil {
err := &configErr{tk, fmt.Sprintf("Detected service %q but already saw a stream", mv)}
*errors = append(*errors, err)
continue
}
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected service name to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
curService = &export{sub: mvs}
if accounts != nil {
curService.accs = accounts
}
if rtSeen {
curService.rt = rt
}
if lat != nil {
curService.lat = lat
}
if threshSeen {
curService.rthr = thresh
}
case "response", "response_type":
if rtSeen {
err := &configErr{tk, "Duplicate response type definition"}
*errors = append(*errors, err)
continue
}
rtSeen = true
rtToken = tk
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected response type to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
switch strings.ToLower(mvs) {
case "single", "singleton":
rt = Singleton
case "stream":
rt = Streamed
case "chunk", "chunked":
rt = Chunked
default:
err := &configErr{tk, fmt.Sprintf("Unknown response type: %q", mvs)}
*errors = append(*errors, err)
continue
}
if curService != nil {
curService.rt = rt
}
if curStream != nil {
err := &configErr{tk, "Detected response directive on non-service"}
*errors = append(*errors, err)
}
case "threshold", "response_threshold", "response_max_time", "response_time":
if threshSeen {
err := &configErr{tk, "Duplicate response threshold detected"}
*errors = append(*errors, err)
continue
}
threshSeen = true
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected response threshold to be a parseable time duration, got %T", mv)}
*errors = append(*errors, err)
continue
}
var err error
thresh, err = time.ParseDuration(mvs)
if err != nil {
err := &configErr{tk, fmt.Sprintf("Expected response threshold to be a parseable time duration, got %q", mvs)}
*errors = append(*errors, err)
continue
}
if curService != nil {
curService.rthr = thresh
}
if curStream != nil {
err := &configErr{tk, "Detected response directive on non-service"}
*errors = append(*errors, err)
}
case "accounts":
for _, iv := range mv.([]interface{}) {
_, mv := unwrapValue(iv, <)
accounts = append(accounts, mv.(string))
}
if curStream != nil {
curStream.accs = accounts
} else if curService != nil {
curService.accs = accounts
}
case "latency":
latToken = tk
var err error
lat, err = parseServiceLatency(tk, mv)
if err != nil {
*errors = append(*errors, err)
continue
}
if curStream != nil {
err = &configErr{tk, "Detected latency directive on non-service"}
*errors = append(*errors, err)
continue
}
if curService != nil {
curService.lat = lat
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return curStream, curService, nil
}
// parseServiceLatency returns a latency config block.
func parseServiceLatency(root token, v interface{}) (l *serviceLatency, retErr error) {
var lt token
defer convertPanicToError(<, &retErr)
if subject, ok := v.(string); ok {
return &serviceLatency{
subject: subject,
sampling: DEFAULT_SERVICE_LATENCY_SAMPLING,
}, nil
}
latency, ok := v.(map[string]interface{})
if !ok {
return nil, &configErr{token: root,
reason: fmt.Sprintf("Expected latency entry to be a map/struct or string, got %T", v)}
}
sl := serviceLatency{
sampling: DEFAULT_SERVICE_LATENCY_SAMPLING,
}
// Read sampling value.
if v, ok := latency["sampling"]; ok {
tk, v := unwrapValue(v, <)
header := false
var sample int64
switch vv := v.(type) {
case int64:
// Sample is an int, like 50.
sample = vv
case string:
// Sample is a string, like "50%".
if strings.ToLower(strings.TrimSpace(vv)) == "headers" {
header = true
sample = 0
break
}
s := strings.TrimSuffix(vv, "%")
n, err := strconv.Atoi(s)
if err != nil {
return nil, &configErr{token: tk,
reason: fmt.Sprintf("Failed to parse latency sample: %v", err)}
}
sample = int64(n)
default:
return nil, &configErr{token: tk,
reason: fmt.Sprintf("Expected latency sample to be a string or map/struct, got %T", v)}
}
if !header {
if sample < 1 || sample > 100 {
return nil, &configErr{token: tk,
reason: ErrBadSampling.Error()}
}
}
sl.sampling = int8(sample)
}
// Read subject value.
v, ok = latency["subject"]
if !ok {
return nil, &configErr{token: root,
reason: "Latency subject required, but missing"}
}
tk, v := unwrapValue(v, <)
subject, ok := v.(string)
if !ok {
return nil, &configErr{token: tk,
reason: fmt.Sprintf("Expected latency subject to be a string, got %T", subject)}
}
sl.subject = subject
return &sl, nil
}
// Parse an import stream or service.
// e.g.
// {stream: {account: "synadia", subject:"public.synadia"}, prefix: "imports.synadia"}
// {stream: {account: "synadia", subject:"synadia.private.*"}}
// {service: {account: "synadia", subject: "pub.special.request"}, to: "synadia.request"}
func parseImportStreamOrService(v interface{}, errors, warnings *[]error) (*importStream, *importService, error) {
var (
curStream *importStream
curService *importService
pre, to string
share bool
lt token
)
defer convertPanicToErrorList(<, errors)
tk, mv := unwrapValue(v, <)
vv, ok := mv.(map[string]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Import Items should be a map with type entry, got %T", mv)}
}
for mk, mv := range vv {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "stream":
if curService != nil {
err := &configErr{tk, "Detected stream but already saw a service"}
*errors = append(*errors, err)
continue
}
ac, ok := mv.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Stream entry should be an account map, got %T", mv)}
*errors = append(*errors, err)
continue
}
// Make sure this is a map with account and subject
accountName, subject, err := parseAccount(ac, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if accountName == "" || subject == "" {
err := &configErr{tk, "Expect an account name and a subject"}
*errors = append(*errors, err)
continue
}
curStream = &importStream{an: accountName, sub: subject}
if pre != "" {
curStream.pre = pre
}
case "service":
if curStream != nil {
err := &configErr{tk, "Detected service but already saw a stream"}
*errors = append(*errors, err)
continue
}
ac, ok := mv.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Service entry should be an account map, got %T", mv)}
*errors = append(*errors, err)
continue
}
// Make sure this is a map with account and subject
accountName, subject, err := parseAccount(ac, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if accountName == "" || subject == "" {
err := &configErr{tk, "Expect an account name and a subject"}
*errors = append(*errors, err)
continue
}
curService = &importService{an: accountName, sub: subject}
if to != "" {
curService.to = to
} else {
curService.to = subject
}
curService.share = share
case "prefix":
pre = mv.(string)
if curStream != nil {
curStream.pre = pre
}
case "to":
to = mv.(string)
if curService != nil {
curService.to = to
}
case "share":
share = mv.(bool)
if curService != nil {
curService.share = share
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return curStream, curService, nil
}
// Apply permission defaults to users/nkeyuser that don't have their own.
func applyDefaultPermissions(users []*User, nkeys []*NkeyUser, defaultP *Permissions) {
if defaultP == nil {
return
}
for _, user := range users {
if user.Permissions == nil {
user.Permissions = defaultP
}
}
for _, user := range nkeys {
if user.Permissions == nil {
user.Permissions = defaultP
}
}
}
// Helper function to parse Authorization configs.
func parseAuthorization(v interface{}, opts *Options, errors *[]error, warnings *[]error) (*authorization, error) {
var (
am map[string]interface{}
tk token
lt token
auth = &authorization{}
)
defer convertPanicToErrorList(<, errors)
_, v = unwrapValue(v, <)
am = v.(map[string]interface{})
for mk, mv := range am {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "user", "username":
auth.user = mv.(string)
case "pass", "password":
auth.pass = mv.(string)
case "token":
auth.token = mv.(string)
case "timeout":
at := float64(1)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
auth.timeout = at
case "users":
nkeys, users, err := parseUsers(tk, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.users = users
auth.nkeys = nkeys
case "default_permission", "default_permissions", "permissions":
permissions, err := parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.defaultPermissions = permissions
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
continue
}
applyDefaultPermissions(auth.users, auth.nkeys, auth.defaultPermissions)
}
return auth, nil
}
// Helper function to parse multiple users array with optional permissions.
func parseUsers(mv interface{}, opts *Options, errors *[]error, warnings *[]error) ([]*NkeyUser, []*User, error) {
var (
tk token
lt token
keys []*NkeyUser
users = []*User{}
)
defer convertPanicToErrorList(<, errors)
tk, mv = unwrapValue(mv, <)
// Make sure we have an array
uv, ok := mv.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Expected users field to be an array, got %v", mv)}
}
for _, u := range uv {
tk, u = unwrapValue(u, <)
// Check its a map/struct
um, ok := u.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected user entry to be a map/struct, got %v", u)}
*errors = append(*errors, err)
continue
}
var (
user = &User{}
nkey = &NkeyUser{}
perms *Permissions
err error
)
for k, v := range um {
// Also needs to unwrap first
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "nkey":
nkey.Nkey = v.(string)
case "user", "username":
user.Username = v.(string)
case "pass", "password":
user.Password = v.(string)
case "permission", "permissions", "authorization":
perms, err = parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
case "allowed_connection_types", "connection_types", "clients":
cts := parseAllowedConnectionTypes(tk, <, v, errors, warnings)
nkey.AllowedConnectionTypes = cts
user.AllowedConnectionTypes = cts
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
// Place perms if we have them.
if perms != nil {
// nkey takes precedent.
if nkey.Nkey != "" {
nkey.Permissions = perms
} else {
user.Permissions = perms
}
}
// Check to make sure we have at least an nkey or username <password> defined.
if nkey.Nkey == "" && user.Username == "" {
return nil, nil, &configErr{tk, "User entry requires a user"}
} else if nkey.Nkey != "" {
// Make sure the nkey a proper public nkey for a user..
if !nkeys.IsValidPublicUserKey(nkey.Nkey) {
return nil, nil, &configErr{tk, "Not a valid public nkey for a user"}
}
// If we have user or password defined here that is an error.
if user.Username != "" || user.Password != "" {
return nil, nil, &configErr{tk, "Nkey users do not take usernames or passwords"}
}
keys = append(keys, nkey)
} else {
users = append(users, user)
}
}
return keys, users, nil
}
func parseAllowedConnectionTypes(tk token, lt *token, mv interface{}, errors *[]error, warnings *[]error) map[string]struct{} {
cts, err := parseStringArray("allowed connection types", tk, lt, mv, errors, warnings)
// If error, it has already been added to the `errors` array, simply return
if err != nil {
return nil
}
m, err := convertAllowedConnectionTypes(cts)
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
}
return m
}
// Helper function to parse user/account permissions
func parseUserPermissions(mv interface{}, errors, warnings *[]error) (*Permissions, error) {
var (
tk token
lt token
p = &Permissions{}
)
defer convertPanicToErrorList(<, errors)
tk, mv = unwrapValue(mv, <)
pm, ok := mv.(map[string]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected permissions to be a map/struct, got %+v", mv)}
}
for k, v := range pm {
tk, mv = unwrapValue(v, <)
switch strings.ToLower(k) {
// For routes:
// Import is Publish
// Export is Subscribe
case "pub", "publish", "import":
perms, err := parseVariablePermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Publish = perms
case "sub", "subscribe", "export":
perms, err := parseVariablePermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Subscribe = perms
case "publish_allow_responses", "allow_responses":
rp := &ResponsePermission{
MaxMsgs: DEFAULT_ALLOW_RESPONSE_MAX_MSGS,
Expires: DEFAULT_ALLOW_RESPONSE_EXPIRATION,
}
// Try boolean first
responses, ok := mv.(bool)
if ok {
if responses {
p.Response = rp
}
} else {
p.Response = parseAllowResponses(v, errors, warnings)
}
if p.Response != nil {
if p.Publish == nil {
p.Publish = &SubjectPermission{}
}
if p.Publish.Allow == nil {
// We turn off the blanket allow statement.
p.Publish.Allow = []string{}
}
}
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field %q parsing permissions", k)}
*errors = append(*errors, err)
}
}
}
return p, nil
}
// Top level parser for authorization configurations.
func parseVariablePermissions(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
switch vv := v.(type) {
case map[string]interface{}:
// New style with allow and/or deny properties.
return parseSubjectPermission(vv, errors, warnings)
default:
// Old style
return parseOldPermissionStyle(v, errors, warnings)
}
}
// Helper function to parse subject singletons and/or arrays
func parseSubjects(v interface{}, errors, warnings *[]error) ([]string, error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
var subjects []string
switch vv := v.(type) {
case string:
subjects = append(subjects, vv)
case []string:
subjects = vv
case []interface{}:
for _, i := range vv {
tk, i := unwrapValue(i, <)
subject, ok := i.(string)
if !ok {
return nil, &configErr{tk, "Subject in permissions array cannot be cast to string"}
}
subjects = append(subjects, subject)
}
default:
return nil, &configErr{tk, fmt.Sprintf("Expected subject permissions to be a subject, or array of subjects, got %T", v)}
}
if err := checkSubjectArray(subjects); err != nil {
return nil, &configErr{tk, err.Error()}
}
return subjects, nil
}
// Helper function to parse a ResponsePermission.
func parseAllowResponses(v interface{}, errors, warnings *[]error) *ResponsePermission {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
// Check if this is a map.
pm, ok := v.(map[string]interface{})
if !ok {
err := &configErr{tk, "error parsing response permissions, expected a boolean or a map"}
*errors = append(*errors, err)
return nil
}
rp := &ResponsePermission{
MaxMsgs: DEFAULT_ALLOW_RESPONSE_MAX_MSGS,
Expires: DEFAULT_ALLOW_RESPONSE_EXPIRATION,
}
for k, v := range pm {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "max", "max_msgs", "max_messages", "max_responses":
max := int(v.(int64))
// Negative values are accepted (mean infinite), and 0
// means default value (set above).
if max != 0 {
rp.MaxMsgs = max
}
case "expires", "expiration", "ttl":
wd, ok := v.(string)
if ok {
ttl, err := time.ParseDuration(wd)
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing expires: %v", err)}
*errors = append(*errors, err)
return nil
}
// Negative values are accepted (mean infinite), and 0
// means default value (set above).
if ttl != 0 {
rp.Expires = ttl
}
} else {
err := &configErr{tk, "error parsing expires, not a duration string"}
*errors = append(*errors, err)
return nil
}
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field %q parsing permissions", k)}
*errors = append(*errors, err)
}
}
}
return rp
}
// Helper function to parse old style authorization configs.
func parseOldPermissionStyle(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
subjects, err := parseSubjects(v, errors, warnings)
if err != nil {
return nil, err
}
return &SubjectPermission{Allow: subjects}, nil
}
// Helper function to parse new style authorization into a SubjectPermission with Allow and Deny.
func parseSubjectPermission(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
var lt token
defer convertPanicToErrorList(<, errors)
m := v.(map[string]interface{})
if len(m) == 0 {
return nil, nil
}
p := &SubjectPermission{}
for k, v := range m {
tk, _ := unwrapValue(v, <)
switch strings.ToLower(k) {
case "allow":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Allow = subjects
case "deny":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Deny = subjects
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field name %q parsing subject permissions, only 'allow' or 'deny' are permitted", k)}
*errors = append(*errors, err)
}
}
}
return p, nil
}
// Helper function to validate subjects, etc for account permissioning.
func checkSubjectArray(sa []string) error {
for _, s := range sa {
if !IsValidSubject(s) {
return fmt.Errorf("subject %q is not a valid subject", s)
}
}
return nil
}
// PrintTLSHelpAndDie prints TLS usage and exits.
func PrintTLSHelpAndDie() {
fmt.Printf("%s", tlsUsage)
for k := range cipherMap {
fmt.Printf(" %s\n", k)
}
fmt.Printf("\nAvailable curve preferences include:\n")
for k := range curvePreferenceMap {
fmt.Printf(" %s\n", k)
}
os.Exit(0)
}
func parseCipher(cipherName string) (uint16, error) {
cipher, exists := cipherMap[cipherName]
if !exists {
return 0, fmt.Errorf("unrecognized cipher %s", cipherName)
}
return cipher, nil
}
func parseCurvePreferences(curveName string) (tls.CurveID, error) {
curve, exists := curvePreferenceMap[curveName]
if !exists {
return 0, fmt.Errorf("unrecognized curve preference %s", curveName)
}
return curve, nil
}
// Helper function to parse TLS configs.
func parseTLS(v interface{}) (t *TLSConfigOpts, retErr error) {
var (
tlsm map[string]interface{}
tc = TLSConfigOpts{}
lt token
)
defer convertPanicToError(<, &retErr)
_, v = unwrapValue(v, <)
tlsm = v.(map[string]interface{})
for mk, mv := range tlsm {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "cert_file":
certFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'cert_file' to be filename"}
}
tc.CertFile = certFile
case "key_file":
keyFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'key_file' to be filename"}
}
tc.KeyFile = keyFile
case "ca_file":
caFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'ca_file' to be filename"}
}
tc.CaFile = caFile
case "insecure":
insecure, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'insecure' to be a boolean"}
}
tc.Insecure = insecure
case "verify":
verify, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'verify' to be a boolean"}
}
tc.Verify = verify
case "verify_and_map":
verify, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'verify_and_map' to be a boolean"}
}
tc.Verify = verify
tc.Map = verify
case "cipher_suites":
ra := mv.([]interface{})
if len(ra) == 0 {
return nil, &configErr{tk, "error parsing tls config, 'cipher_suites' cannot be empty"}
}
tc.Ciphers = make([]uint16, 0, len(ra))
for _, r := range ra {
tk, r := unwrapValue(r, <)
cipher, err := parseCipher(r.(string))
if err != nil {
return nil, &configErr{tk, err.Error()}
}
tc.Ciphers = append(tc.Ciphers, cipher)
}
case "curve_preferences":
ra := mv.([]interface{})
if len(ra) == 0 {
return nil, &configErr{tk, "error parsing tls config, 'curve_preferences' cannot be empty"}
}
tc.CurvePreferences = make([]tls.CurveID, 0, len(ra))
for _, r := range ra {
tk, r := unwrapValue(r, <)
cps, err := parseCurvePreferences(r.(string))
if err != nil {
return nil, &configErr{tk, err.Error()}
}
tc.CurvePreferences = append(tc.CurvePreferences, cps)
}
case "timeout":
at := float64(0)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
tc.Timeout = at
default:
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, unknown field [%q]", mk)}
}
}
// If cipher suites were not specified then use the defaults
if tc.Ciphers == nil {
tc.Ciphers = defaultCipherSuites()
}
// If curve preferences were not specified, then use the defaults
if tc.CurvePreferences == nil {
tc.CurvePreferences = defaultCurvePreferences()
}
return &tc, nil
}
func parseAuthForWS(v interface{}, errors *[]error, warnings *[]error) *authorization {
var (
am map[string]interface{}
tk token
lt token
auth = &authorization{}
)
defer convertPanicToErrorList(<, errors)
_, v = unwrapValue(v, <)
am = v.(map[string]interface{})
for mk, mv := range am {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "user", "username":
auth.user = mv.(string)
case "pass", "password":
auth.pass = mv.(string)
case "token":
auth.token = mv.(string)
case "timeout":
at := float64(1)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
auth.timeout = at
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
continue
}
}
return auth
}
func parseStringArray(fieldName string, tk token, lt *token, mv interface{}, errors *[]error, warnings *[]error) ([]string, error) {
switch mv := mv.(type) {
case string:
return []string{mv}, nil
case []interface{}:
strs := make([]string, 0, len(mv))
for _, val := range mv {
tk, val = unwrapValue(val, lt)
if str, ok := val.(string); ok {
strs = append(strs, str)
} else {
err := &configErr{tk, fmt.Sprintf("error parsing %s: unsupported type in array %T", fieldName, val)}
*errors = append(*errors, err)
continue
}
}
return strs, nil
default:
err := &configErr{tk, fmt.Sprintf("error parsing %s: unsupported type %T", fieldName, mv)}
*errors = append(*errors, err)
return nil, err
}
}
func parseWebsocket(v interface{}, o *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
gm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected websocket to be a map, got %T", v)}
}
for mk, mv := range gm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Websocket.Host = hp.host
o.Websocket.Port = hp.port
case "port":
o.Websocket.Port = int(mv.(int64))
case "host", "net":
o.Websocket.Host = mv.(string)
case "advertise":
o.Websocket.Advertise = mv.(string)
case "no_tls":
o.Websocket.NoTLS = mv.(bool)
case "tls":
tc, err := parseTLS(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
if o.Websocket.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Websocket.TLSMap = tc.Map
case "same_origin":
o.Websocket.SameOrigin = mv.(bool)
case "allowed_origins", "allowed_origin", "allow_origins", "allow_origin", "origins", "origin":
o.Websocket.AllowedOrigins, _ = parseStringArray("allowed origins", tk, <, mv, errors, warnings)
case "handshake_timeout":
ht := time.Duration(0)
switch mv := mv.(type) {
case int64:
ht = time.Duration(mv) * time.Second
case string:
var err error
ht, err = time.ParseDuration(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
default:
err := &configErr{tk, fmt.Sprintf("error parsing handshake timeout: unsupported type %T", mv)}
*errors = append(*errors, err)
}
o.Websocket.HandshakeTimeout = ht
case "compression":
o.Websocket.Compression = mv.(bool)
case "authorization", "authentication":
auth := parseAuthForWS(tk, errors, warnings)
o.Websocket.Username = auth.user
o.Websocket.Password = auth.pass
o.Websocket.Token = auth.token
o.Websocket.AuthTimeout = auth.timeout
case "jwt_cookie":
o.Websocket.JWTCookie = mv.(string)
case "no_auth_user":
o.Websocket.NoAuthUser = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
// GenTLSConfig loads TLS related configuration parameters.
func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) {
// Create the tls.Config from our options before including the certs.
// It will determine the cipher suites that we prefer.
// FIXME(dlc) change if ARM based.
config := tls.Config{
MinVersion: tls.VersionTLS12,
CipherSuites: tc.Ciphers,
PreferServerCipherSuites: true,
CurvePreferences: tc.CurvePreferences,
InsecureSkipVerify: tc.Insecure,
}
switch {
case tc.CertFile != "" && tc.KeyFile == "":
return nil, fmt.Errorf("missing 'key_file' in TLS configuration")
case tc.CertFile == "" && tc.KeyFile != "":
return nil, fmt.Errorf("missing 'cert_file' in TLS configuration")
case tc.CertFile != "" && tc.KeyFile != "":
// Now load in cert and private key
cert, err := tls.LoadX509KeyPair(tc.CertFile, tc.KeyFile)
if err != nil {
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err)
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, fmt.Errorf("error parsing certificate: %v", err)
}
config.Certificates = []tls.Certificate{cert}
}
// Require client certificates as needed
if tc.Verify {
config.ClientAuth = tls.RequireAndVerifyClientCert
}
// Add in CAs if applicable.
if tc.CaFile != "" {
rootPEM, err := ioutil.ReadFile(tc.CaFile)
if err != nil || rootPEM == nil {
return nil, err
}
pool := x509.NewCertPool()
ok := pool.AppendCertsFromPEM(rootPEM)
if !ok {
return nil, fmt.Errorf("failed to parse root ca certificate")
}
config.ClientCAs = pool
}
return &config, nil
}
// MergeOptions will merge two options giving preference to the flagOpts
// if the item is present.
func MergeOptions(fileOpts, flagOpts *Options) *Options {
if fileOpts == nil {
return flagOpts
}
if flagOpts == nil {
return fileOpts
}
// Merge the two, flagOpts override
opts := *fileOpts
if flagOpts.Port != 0 {
opts.Port = flagOpts.Port
}
if flagOpts.Host != "" {
opts.Host = flagOpts.Host
}
if flagOpts.ClientAdvertise != "" {
opts.ClientAdvertise = flagOpts.ClientAdvertise
}
if flagOpts.Username != "" {
opts.Username = flagOpts.Username
}
if flagOpts.Password != "" {
opts.Password = flagOpts.Password
}
if flagOpts.Authorization != "" {
opts.Authorization = flagOpts.Authorization
}
if flagOpts.HTTPPort != 0 {
opts.HTTPPort = flagOpts.HTTPPort
}
if flagOpts.HTTPBasePath != "" {
opts.HTTPBasePath = flagOpts.HTTPBasePath
}
if flagOpts.Debug {
opts.Debug = true
}
if flagOpts.Trace {
opts.Trace = true
}
if flagOpts.Logtime {
opts.Logtime = true
}
if flagOpts.LogFile != "" {
opts.LogFile = flagOpts.LogFile
}
if flagOpts.PidFile != "" {
opts.PidFile = flagOpts.PidFile
}
if flagOpts.PortsFileDir != "" {
opts.PortsFileDir = flagOpts.PortsFileDir
}
if flagOpts.ProfPort != 0 {
opts.ProfPort = flagOpts.ProfPort
}
if flagOpts.Cluster.ListenStr != "" {
opts.Cluster.ListenStr = flagOpts.Cluster.ListenStr
}
if flagOpts.Cluster.NoAdvertise {
opts.Cluster.NoAdvertise = true
}
if flagOpts.Cluster.ConnectRetries != 0 {
opts.Cluster.ConnectRetries = flagOpts.Cluster.ConnectRetries
}
if flagOpts.Cluster.Advertise != "" {
opts.Cluster.Advertise = flagOpts.Cluster.Advertise
}
if flagOpts.RoutesStr != "" {
mergeRoutes(&opts, flagOpts)
}
return &opts
}
// RoutesFromStr parses route URLs from a string
func RoutesFromStr(routesStr string) []*url.URL {
routes := strings.Split(routesStr, ",")
if len(routes) == 0 {
return nil
}
routeUrls := []*url.URL{}
for _, r := range routes {
r = strings.TrimSpace(r)
u, _ := url.Parse(r)
routeUrls = append(routeUrls, u)
}
return routeUrls
}
// This will merge the flag routes and override anything that was present.
func mergeRoutes(opts, flagOpts *Options) {
routeUrls := RoutesFromStr(flagOpts.RoutesStr)
if routeUrls == nil {
return
}
opts.Routes = routeUrls
opts.RoutesStr = flagOpts.RoutesStr
}
// RemoveSelfReference removes this server from an array of routes
func RemoveSelfReference(clusterPort int, routes []*url.URL) ([]*url.URL, error) {
var cleanRoutes []*url.URL
cport := strconv.Itoa(clusterPort)
selfIPs, err := getInterfaceIPs()
if err != nil {
return nil, err
}
for _, r := range routes {
host, port, err := net.SplitHostPort(r.Host)
if err != nil {
return nil, err
}
ipList, err := getURLIP(host)
if err != nil {
return nil, err
}
if cport == port && isIPInList(selfIPs, ipList) {
continue
}
cleanRoutes = append(cleanRoutes, r)
}
return cleanRoutes, nil
}
func isIPInList(list1 []net.IP, list2 []net.IP) bool {
for _, ip1 := range list1 {
for _, ip2 := range list2 {
if ip1.Equal(ip2) {
return true
}
}
}
return false
}
func getURLIP(ipStr string) ([]net.IP, error) {
ipList := []net.IP{}
ip := net.ParseIP(ipStr)
if ip != nil {
ipList = append(ipList, ip)
return ipList, nil
}
hostAddr, err := net.LookupHost(ipStr)
if err != nil {
return nil, fmt.Errorf("Error looking up host with route hostname: %v", err)
}
for _, addr := range hostAddr {
ip = net.ParseIP(addr)
if ip != nil {
ipList = append(ipList, ip)
}
}
return ipList, nil
}
func getInterfaceIPs() ([]net.IP, error) {
var localIPs []net.IP
interfaceAddr, err := net.InterfaceAddrs()
if err != nil {
return nil, fmt.Errorf("Error getting self referencing address: %v", err)
}
for i := 0; i < len(interfaceAddr); i++ {
interfaceIP, _, _ := net.ParseCIDR(interfaceAddr[i].String())
if net.ParseIP(interfaceIP.String()) != nil {
localIPs = append(localIPs, interfaceIP)
} else {
return nil, fmt.Errorf("Error parsing self referencing address: %v", err)
}
}
return localIPs, nil
}
func setBaselineOptions(opts *Options) {
// Setup non-standard Go defaults
if opts.Host == "" {
opts.Host = DEFAULT_HOST
}
if opts.HTTPHost == "" {
// Default to same bind from server if left undefined
opts.HTTPHost = opts.Host
}
if opts.Port == 0 {
opts.Port = DEFAULT_PORT
} else if opts.Port == RANDOM_PORT {
// Choose randomly inside of net.Listen
opts.Port = 0
}
if opts.MaxConn == 0 {
opts.MaxConn = DEFAULT_MAX_CONNECTIONS
}
if opts.PingInterval == 0 {
opts.PingInterval = DEFAULT_PING_INTERVAL
}
if opts.MaxPingsOut == 0 {
opts.MaxPingsOut = DEFAULT_PING_MAX_OUT
}
if opts.TLSTimeout == 0 {
opts.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.AuthTimeout == 0 {
opts.AuthTimeout = getDefaultAuthTimeout(opts.TLSConfig, opts.TLSTimeout)
}
if opts.Cluster.Port != 0 {
if opts.Cluster.Host == "" {
opts.Cluster.Host = DEFAULT_HOST
}
if opts.Cluster.TLSTimeout == 0 {
opts.Cluster.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.Cluster.AuthTimeout == 0 {
opts.Cluster.AuthTimeout = getDefaultAuthTimeout(opts.Cluster.TLSConfig, opts.Cluster.TLSTimeout)
}
}
if opts.LeafNode.Port != 0 {
if opts.LeafNode.Host == "" {
opts.LeafNode.Host = DEFAULT_HOST
}
if opts.LeafNode.TLSTimeout == 0 {
opts.LeafNode.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.LeafNode.AuthTimeout == 0 {
opts.LeafNode.AuthTimeout = getDefaultAuthTimeout(opts.LeafNode.TLSConfig, opts.LeafNode.TLSTimeout)
}
}
// Set baseline connect port for remotes.
for _, r := range opts.LeafNode.Remotes {
if r != nil {
for _, u := range r.URLs {
if u.Port() == "" {
u.Host = net.JoinHostPort(u.Host, strconv.Itoa(DEFAULT_LEAFNODE_PORT))
}
}
}
}
// Set this regardless of opts.LeafNode.Port
if opts.LeafNode.ReconnectInterval == 0 {
opts.LeafNode.ReconnectInterval = DEFAULT_LEAF_NODE_RECONNECT
}
if opts.MaxControlLine == 0 {
opts.MaxControlLine = MAX_CONTROL_LINE_SIZE
}
if opts.MaxPayload == 0 {
opts.MaxPayload = MAX_PAYLOAD_SIZE
}
if opts.MaxPending == 0 {
opts.MaxPending = MAX_PENDING_SIZE
}
if opts.WriteDeadline == time.Duration(0) {
opts.WriteDeadline = DEFAULT_FLUSH_DEADLINE
}
if opts.MaxClosedClients == 0 {
opts.MaxClosedClients = DEFAULT_MAX_CLOSED_CLIENTS
}
if opts.LameDuckDuration == 0 {
opts.LameDuckDuration = DEFAULT_LAME_DUCK_DURATION
}
if opts.LameDuckGracePeriod == 0 {
opts.LameDuckGracePeriod = DEFAULT_LAME_DUCK_GRACE_PERIOD
}
if opts.Gateway.Port != 0 {
if opts.Gateway.Host == "" {
opts.Gateway.Host = DEFAULT_HOST
}
if opts.Gateway.TLSTimeout == 0 {
opts.Gateway.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.Gateway.AuthTimeout == 0 {
opts.Gateway.AuthTimeout = getDefaultAuthTimeout(opts.Gateway.TLSConfig, opts.Gateway.TLSTimeout)
}
}
if opts.ConnectErrorReports == 0 {
opts.ConnectErrorReports = DEFAULT_CONNECT_ERROR_REPORTS
}
if opts.ReconnectErrorReports == 0 {
opts.ReconnectErrorReports = DEFAULT_RECONNECT_ERROR_REPORTS
}
if opts.Websocket.Port != 0 {
if opts.Websocket.Host == "" {
opts.Websocket.Host = DEFAULT_HOST
}
}
// JetStream
if opts.JetStreamMaxMemory == 0 {
opts.JetStreamMaxMemory = -1
}
if opts.JetStreamMaxStore == 0 {
opts.JetStreamMaxStore = -1
}
}
func getDefaultAuthTimeout(tls *tls.Config, tlsTimeout float64) float64 {
var authTimeout float64
if tls != nil {
authTimeout = tlsTimeout + 1.0
} else {
authTimeout = float64(AUTH_TIMEOUT / time.Second)
}
return authTimeout
}
// ConfigureOptions accepts a flag set and augments it with NATS Server
// specific flags. On success, an options structure is returned configured
// based on the selected flags and/or configuration file.
// The command line options take precedence to the ones in the configuration file.
func ConfigureOptions(fs *flag.FlagSet, args []string, printVersion, printHelp, printTLSHelp func()) (*Options, error) {
opts := &Options{}
var (
showVersion bool
showHelp bool
showTLSHelp bool
signal string
configFile string
dbgAndTrace bool
trcAndVerboseTrc bool
dbgAndTrcAndVerboseTrc bool
err error
)
fs.BoolVar(&showHelp, "h", false, "Show this message.")
fs.BoolVar(&showHelp, "help", false, "Show this message.")
fs.IntVar(&opts.Port, "port", 0, "Port to listen on.")
fs.IntVar(&opts.Port, "p", 0, "Port to listen on.")
fs.StringVar(&opts.Host, "addr", "", "Network host to listen on.")
fs.StringVar(&opts.Host, "a", "", "Network host to listen on.")
fs.StringVar(&opts.Host, "net", "", "Network host to listen on.")
fs.StringVar(&opts.ClientAdvertise, "client_advertise", "", "Client URL to advertise to other servers.")
fs.BoolVar(&opts.Debug, "D", false, "Enable Debug logging.")
fs.BoolVar(&opts.Debug, "debug", false, "Enable Debug logging.")
fs.BoolVar(&opts.Trace, "V", false, "Enable Trace logging.")
fs.BoolVar(&trcAndVerboseTrc, "VV", false, "Enable Verbose Trace logging. (Traces system account as well)")
fs.BoolVar(&opts.Trace, "trace", false, "Enable Trace logging.")
fs.BoolVar(&dbgAndTrace, "DV", false, "Enable Debug and Trace logging.")
fs.BoolVar(&dbgAndTrcAndVerboseTrc, "DVV", false, "Enable Debug and Verbose Trace logging. (Traces system account as well)")
fs.BoolVar(&opts.Logtime, "T", true, "Timestamp log entries.")
fs.BoolVar(&opts.Logtime, "logtime", true, "Timestamp log entries.")
fs.StringVar(&opts.Username, "user", "", "Username required for connection.")
fs.StringVar(&opts.Password, "pass", "", "Password required for connection.")
fs.StringVar(&opts.Authorization, "auth", "", "Authorization token required for connection.")
fs.IntVar(&opts.HTTPPort, "m", 0, "HTTP Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPPort, "http_port", 0, "HTTP Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPSPort, "ms", 0, "HTTPS Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPSPort, "https_port", 0, "HTTPS Port for /varz, /connz endpoints.")
fs.StringVar(&configFile, "c", "", "Configuration file.")
fs.StringVar(&configFile, "config", "", "Configuration file.")
fs.BoolVar(&opts.CheckConfig, "t", false, "Check configuration and exit.")
fs.StringVar(&signal, "sl", "", "Send signal to nats-server process (stop, quit, reopen, reload).")
fs.StringVar(&signal, "signal", "", "Send signal to nats-server process (stop, quit, reopen, reload).")
fs.StringVar(&opts.PidFile, "P", "", "File to store process pid.")
fs.StringVar(&opts.PidFile, "pid", "", "File to store process pid.")
fs.StringVar(&opts.PortsFileDir, "ports_file_dir", "", "Creates a ports file in the specified directory (<executable_name>_<pid>.ports).")
fs.StringVar(&opts.LogFile, "l", "", "File to store logging output.")
fs.StringVar(&opts.LogFile, "log", "", "File to store logging output.")
fs.Int64Var(&opts.LogSizeLimit, "log_size_limit", 0, "Logfile size limit being auto-rotated")
fs.BoolVar(&opts.Syslog, "s", false, "Enable syslog as log method.")
fs.BoolVar(&opts.Syslog, "syslog", false, "Enable syslog as log method.")
fs.StringVar(&opts.RemoteSyslog, "r", "", "Syslog server addr (udp://127.0.0.1:514).")
fs.StringVar(&opts.RemoteSyslog, "remote_syslog", "", "Syslog server addr (udp://127.0.0.1:514).")
fs.BoolVar(&showVersion, "version", false, "Print version information.")
fs.BoolVar(&showVersion, "v", false, "Print version information.")
fs.IntVar(&opts.ProfPort, "profile", 0, "Profiling HTTP port.")
fs.StringVar(&opts.RoutesStr, "routes", "", "Routes to actively solicit a connection.")
fs.StringVar(&opts.Cluster.ListenStr, "cluster", "", "Cluster url from which members can solicit routes.")
fs.StringVar(&opts.Cluster.ListenStr, "cluster_listen", "", "Cluster url from which members can solicit routes.")
fs.StringVar(&opts.Cluster.Advertise, "cluster_advertise", "", "Cluster URL to advertise to other servers.")
fs.BoolVar(&opts.Cluster.NoAdvertise, "no_advertise", false, "Advertise known cluster IPs to clients.")
fs.IntVar(&opts.Cluster.ConnectRetries, "connect_retries", 0, "For implicit routes, number of connect retries.")
fs.StringVar(&opts.Cluster.Name, "cluster_name", "", "Cluster Name, if not set one will be dynamically generated.")
fs.BoolVar(&showTLSHelp, "help_tls", false, "TLS help.")
fs.BoolVar(&opts.TLS, "tls", false, "Enable TLS.")
fs.BoolVar(&opts.TLSVerify, "tlsverify", false, "Enable TLS with client verification.")
fs.StringVar(&opts.TLSCert, "tlscert", "", "Server certificate file.")
fs.StringVar(&opts.TLSKey, "tlskey", "", "Private key for server certificate.")
fs.StringVar(&opts.TLSCaCert, "tlscacert", "", "Client certificate CA for verification.")
fs.IntVar(&opts.MaxTracedMsgLen, "max_traced_msg_len", 0, "Maximum printable length for traced messages. 0 for unlimited.")
fs.BoolVar(&opts.JetStream, "js", false, "Enable JetStream.")
fs.BoolVar(&opts.JetStream, "jetstream", false, "Enable JetStream.")
fs.StringVar(&opts.StoreDir, "sd", "", "Storage directory.")
fs.StringVar(&opts.StoreDir, "store_dir", "", "Storage directory.")
// The flags definition above set "default" values to some of the options.
// Calling Parse() here will override the default options with any value
// specified from the command line. This is ok. We will then update the
// options with the content of the configuration file (if present), and then,
// call Parse() again to override the default+config with command line values.
// Calling Parse() before processing config file is necessary since configFile
// itself is a command line argument, and also Parse() is required in order
// to know if user wants simply to show "help" or "version", etc...
if err := fs.Parse(args); err != nil {
return nil, err
}
if showVersion {
printVersion()
return nil, nil
}
if showHelp {
printHelp()
return nil, nil
}
if showTLSHelp {
printTLSHelp()
return nil, nil
}
// Process args looking for non-flag options,
// 'version' and 'help' only for now
showVersion, showHelp, err = ProcessCommandLineArgs(fs)
if err != nil {
return nil, err
} else if showVersion {
printVersion()
return nil, nil
} else if showHelp {
printHelp()
return nil, nil
}
// Snapshot flag options.
FlagSnapshot = opts.Clone()
// Keep track of the boolean flags that were explicitly set with their value.
fs.Visit(func(f *flag.Flag) {
switch f.Name {
case "DVV":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", dbgAndTrcAndVerboseTrc)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", dbgAndTrcAndVerboseTrc)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "TraceVerbose", dbgAndTrcAndVerboseTrc)
case "DV":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", dbgAndTrace)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", dbgAndTrace)
case "D":
fallthrough
case "debug":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", FlagSnapshot.Debug)
case "VV":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", trcAndVerboseTrc)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "TraceVerbose", trcAndVerboseTrc)
case "V":
fallthrough
case "trace":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", FlagSnapshot.Trace)
case "T":
fallthrough
case "logtime":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Logtime", FlagSnapshot.Logtime)
case "s":
fallthrough
case "syslog":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Syslog", FlagSnapshot.Syslog)
case "no_advertise":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Cluster.NoAdvertise", FlagSnapshot.Cluster.NoAdvertise)
}
})
// Process signal control.
if signal != "" {
if err := processSignal(signal); err != nil {
return nil, err
}
}
// Parse config if given
if configFile != "" {
// This will update the options with values from the config file.
err := opts.ProcessConfigFile(configFile)
if err != nil {
if opts.CheckConfig {
return nil, err
}
if cerr, ok := err.(*processConfigErr); !ok || len(cerr.Errors()) != 0 {
return nil, err
}
// If we get here we only have warnings and can still continue
fmt.Fprint(os.Stderr, err)
} else if opts.CheckConfig {
// Report configuration file syntax test was successful and exit.
return opts, nil
}
// Call this again to override config file options with options from command line.
// Note: We don't need to check error here since if there was an error, it would
// have been caught the first time this function was called (after setting up the
// flags).
fs.Parse(args)
} else if opts.CheckConfig {
return nil, fmt.Errorf("must specify [-c, --config] option to check configuration file syntax")
}
// Special handling of some flags
var (
flagErr error
tlsDisabled bool
tlsOverride bool
)
fs.Visit(func(f *flag.Flag) {
// short-circuit if an error was encountered
if flagErr != nil {
return
}
if strings.HasPrefix(f.Name, "tls") {
if f.Name == "tls" {
if !opts.TLS {
// User has specified "-tls=false", we need to disable TLS
opts.TLSConfig = nil
tlsDisabled = true
tlsOverride = false
return
}
tlsOverride = true
} else if !tlsDisabled {
tlsOverride = true
}
} else {
switch f.Name {
case "VV":
opts.Trace, opts.TraceVerbose = trcAndVerboseTrc, trcAndVerboseTrc
case "DVV":
opts.Trace, opts.Debug, opts.TraceVerbose = dbgAndTrcAndVerboseTrc, dbgAndTrcAndVerboseTrc, dbgAndTrcAndVerboseTrc
case "DV":
// Check value to support -DV=false
opts.Trace, opts.Debug = dbgAndTrace, dbgAndTrace
case "cluster", "cluster_listen":
// Override cluster config if explicitly set via flags.
flagErr = overrideCluster(opts)
case "routes":
// Keep in mind that the flag has updated opts.RoutesStr at this point.
if opts.RoutesStr == "" {
// Set routes array to nil since routes string is empty
opts.Routes = nil
return
}
routeUrls := RoutesFromStr(opts.RoutesStr)
opts.Routes = routeUrls
}
}
})
if flagErr != nil {
return nil, flagErr
}
// This will be true if some of the `-tls` params have been set and
// `-tls=false` has not been set.
if tlsOverride {
if err := overrideTLS(opts); err != nil {
return nil, err
}
}
// If we don't have cluster defined in the configuration
// file and no cluster listen string override, but we do
// have a routes override, we need to report misconfiguration.
if opts.RoutesStr != "" && opts.Cluster.ListenStr == "" && opts.Cluster.Host == "" && opts.Cluster.Port == 0 {
return nil, errors.New("solicited routes require cluster capabilities, e.g. --cluster")
}
return opts, nil
}
func normalizeBasePath(p string) string {
if len(p) == 0 {
return "/"
}
// add leading slash
if p[0] != '/' {
p = "/" + p
}
return path.Clean(p)
}
// overrideTLS is called when at least "-tls=true" has been set.
func overrideTLS(opts *Options) error {
if opts.TLSCert == "" {
return errors.New("TLS Server certificate must be present and valid")
}
if opts.TLSKey == "" {
return errors.New("TLS Server private key must be present and valid")
}
tc := TLSConfigOpts{}
tc.CertFile = opts.TLSCert
tc.KeyFile = opts.TLSKey
tc.CaFile = opts.TLSCaCert
tc.Verify = opts.TLSVerify
var err error
opts.TLSConfig, err = GenTLSConfig(&tc)
return err
}
// overrideCluster updates Options.Cluster if that flag "cluster" (or "cluster_listen")
// has explicitly be set in the command line. If it is set to empty string, it will
// clear the Cluster options.
func overrideCluster(opts *Options) error {
if opts.Cluster.ListenStr == "" {
// This one is enough to disable clustering.
opts.Cluster.Port = 0
return nil
}
// -1 will fail url.Parse, so if we have -1, change it to
// 0, and then after parse, replace the port with -1 so we get
// automatic port allocation
wantsRandom := false
if strings.HasSuffix(opts.Cluster.ListenStr, ":-1") {
wantsRandom = true
cls := fmt.Sprintf("%s:0", opts.Cluster.ListenStr[0:len(opts.Cluster.ListenStr)-3])
opts.Cluster.ListenStr = cls
}
clusterURL, err := url.Parse(opts.Cluster.ListenStr)
if err != nil {
return err
}
h, p, err := net.SplitHostPort(clusterURL.Host)
if err != nil {
return err
}
if wantsRandom {
p = "-1"
}
opts.Cluster.Host = h
_, err = fmt.Sscan(p, &opts.Cluster.Port)
if err != nil {
return err
}
if clusterURL.User != nil {
pass, hasPassword := clusterURL.User.Password()
if !hasPassword {
return errors.New("expected cluster password to be set")
}
opts.Cluster.Password = pass
user := clusterURL.User.Username()
opts.Cluster.Username = user
} else {
// Since we override from flag and there is no user/pwd, make
// sure we clear what we may have gotten from config file.
opts.Cluster.Username = ""
opts.Cluster.Password = ""
}
return nil
}
func processSignal(signal string) error {
var (
pid string
commandAndPid = strings.Split(signal, "=")
)
if l := len(commandAndPid); l == 2 {
pid = maybeReadPidFile(commandAndPid[1])
} else if l > 2 {
return fmt.Errorf("invalid signal parameters: %v", commandAndPid[2:])
}
if err := ProcessSignal(Command(commandAndPid[0]), pid); err != nil {
return err
}
os.Exit(0)
return nil
}
// maybeReadPidFile returns a PID or Windows service name obtained via the following method:
// 1. Try to open a file with path "pidStr" (absolute or relative).
// 2. If such a file exists and can be read, return its contents.
// 3. Otherwise, return the original "pidStr" string.
func maybeReadPidFile(pidStr string) string {
if b, err := ioutil.ReadFile(pidStr); err == nil {
return string(b)
}
return pidStr
}
func homeDir() (string, error) {
if runtime.GOOS == "windows" {
homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH")
userProfile := os.Getenv("USERPROFILE")
home := filepath.Join(homeDrive, homePath)
if homeDrive == "" || homePath == "" {
if userProfile == "" {
return "", errors.New("nats: failed to get home dir, require %HOMEDRIVE% and %HOMEPATH% or %USERPROFILE%")
}
home = userProfile
}
return home, nil
}
home := os.Getenv("HOME")
if home == "" {
return "", errors.New("failed to get home dir, require $HOME")
}
return home, nil
}
func expandPath(p string) (string, error) {
p = os.ExpandEnv(p)
if !strings.HasPrefix(p, "~") {
return p, nil
}
home, err := homeDir()
if err != nil {
return "", err
}
return filepath.Join(home, p[1:]), nil
}
|
[
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"HOME\""
] |
[] |
[
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 4 | 0 | |
sdk/grpcplugin/grpcplugin.go
|
package grpcplugin
import (
"bufio"
"context"
"fmt"
"io"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"syscall"
"time"
"github.com/golang/protobuf/ptypes/empty"
"github.com/ovh/cds/sdk"
"github.com/ovh/cds/sdk/log"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
// readyString have to be written by plugin, worker read it
const readyString = "is ready to accept new connection\n"
// Plugin is the interface to be implemented by plugin
type Plugin interface {
Start(context.Context) error
Stop(context.Context, *empty.Empty) (*empty.Empty, error)
Instance() *Common
}
// StartPlugin starts a plugin, returns stdoutPipe, stderrPipe and socketName
func StartPlugin(ctx context.Context, pluginName string, workdir, cmd string, args []string, env []string) (io.Reader, string, error) {
c := exec.CommandContext(ctx, cmd, args...)
c.Dir = workdir
c.Env = env
stdoutPipe, err := c.StdoutPipe()
if err != nil {
return nil, "", err
}
stderrPipe, err := c.StderrPipe()
if err != nil {
return nil, "", err
}
r1 := bufio.NewReader(stdoutPipe)
r2 := bufio.NewReader(stderrPipe)
reader := io.MultiReader(r1, r2)
if err := c.Start(); err != nil {
return nil, "", err
}
go func() {
if err := c.Wait(); err != nil {
log.Info("GRPC Plugin %s wait failed:%+v", cmd, err)
}
log.Info("GRPC Plugin %s end", cmd)
}()
log.Info("GRPC Plugin %s started", cmd)
//Sleep a while, to let the plugin write on stdout the socket address
time.Sleep(500 * time.Millisecond)
tsStart := time.Now()
stdoutreader := bufio.NewReader(stdoutPipe)
var socket string
var errReturn error
for {
line, errs := stdoutreader.ReadString('\n')
if errs == io.EOF {
continue
}
if errs != nil {
if time.Now().Before(tsStart.Add(5 * time.Second)) {
log.Warning("plugin:%s error on ReadString, retry in 500ms...", pluginName)
time.Sleep(500 * time.Millisecond)
continue
}
errReturn = fmt.Errorf("plugin:%s error on ReadString(len buff %d, content: %s): %v", pluginName, len(line), line, err)
break
}
if strings.HasSuffix(line, readyString) {
socket = strings.TrimSpace(strings.Replace(line, fmt.Sprintf(" %s", readyString), "", 1))
log.Info("socket %s ready", socket)
break
}
}
return reader, socket, errReturn
}
type Common struct {
Desc *grpc.ServiceDesc
Srv interface{}
Socket string
s *grpc.Server
}
func (c *Common) Instance() *Common {
return c
}
func (c *Common) Start(ctx context.Context) error {
_, err := c.start(ctx, c.Desc, c.Srv)
return err
}
func userCacheDir() string {
cdir := os.Getenv("HOME_CDS_PLUGINS")
if cdir == "" {
cdir = os.TempDir()
}
switch runtime.GOOS {
case "windows":
cdir = os.Getenv("LocalAppData")
case "darwin":
cdir += "/Library/Caches"
case "plan9":
cdir += "/lib/cache"
default: // Unix
dir := os.Getenv("XDG_CACHE_HOME")
if dir != "" {
cdir = dir
}
}
return cdir
}
func (c *Common) start(ctx context.Context, desc *grpc.ServiceDesc, srv interface{}) (Plugin, error) {
//Start the grpc server on unix socket
uuid := sdk.UUID()
c.Socket = filepath.Join(userCacheDir(), fmt.Sprintf("grpcplugin-socket-%s.sock", uuid))
syscall.Unlink(c.Socket)
l, err := net.Listen("unix", c.Socket)
if err != nil {
return nil, fmt.Errorf("unable to listen on socket %s: %v", c.Socket, err)
}
s := grpc.NewServer()
c.s = s
c.s.RegisterService(desc, srv)
reflection.Register(s)
go func() {
<-ctx.Done()
fmt.Printf("exiting plugin\n")
defer os.RemoveAll(c.Socket)
c.s.Stop()
}()
go func() {
time.Sleep(5 * time.Millisecond)
socket, _ := filepath.Abs(c.Socket)
fmt.Printf("%s %s", socket, readyString)
}()
return c, s.Serve(l)
}
func (c *Common) Stop(context.Context, *empty.Empty) (*empty.Empty, error) {
defer func() {
fmt.Printf("Stopping plugin...")
time.Sleep(2 * time.Second)
c.s.Stop()
}()
return new(empty.Empty), nil
}
// InfoMarkdown returns string formatted with markdown
func InfoMarkdown(pl sdk.GRPCPlugin) string {
var sp string
for _, param := range pl.Parameters {
sp += fmt.Sprintf("* **%s**: %s\n", param.Name, param.Description)
}
info := fmt.Sprintf(`
%s
## Parameters
%s
`,
pl.Description,
sp)
return info
}
|
[
"\"HOME_CDS_PLUGINS\"",
"\"LocalAppData\"",
"\"XDG_CACHE_HOME\""
] |
[] |
[
"LocalAppData",
"HOME_CDS_PLUGINS",
"XDG_CACHE_HOME"
] |
[]
|
["LocalAppData", "HOME_CDS_PLUGINS", "XDG_CACHE_HOME"]
|
go
| 3 | 0 | |
cmd/crypto_local.go
|
// Copyright 2016-2019, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
cryptorand "crypto/rand"
"encoding/base64"
"fmt"
"os"
"github.com/pulumi/pulumi/pkg/diag"
"github.com/pulumi/pulumi/pkg/resource/config"
"github.com/pulumi/pulumi/pkg/secrets"
"github.com/pulumi/pulumi/pkg/secrets/passphrase"
"github.com/pulumi/pulumi/pkg/tokens"
"github.com/pulumi/pulumi/pkg/util/cmdutil"
"github.com/pulumi/pulumi/pkg/util/contract"
"github.com/pulumi/pulumi/pkg/workspace"
)
func readPassphrase(prompt string) (string, error) {
if phrase := os.Getenv("PULUMI_CONFIG_PASSPHRASE"); phrase != "" {
return phrase, nil
}
return cmdutil.ReadConsoleNoEcho(prompt)
}
func newPassphraseSecretsManager(stackName tokens.QName, configFile string) (secrets.Manager, error) {
contract.Assertf(stackName != "", "stackName %s", "!= \"\"")
if configFile == "" {
f, err := workspace.DetectProjectStackPath(stackName)
if err != nil {
return nil, err
}
configFile = f
}
info, err := workspace.LoadProjectStack(configFile)
if err != nil {
return nil, err
}
// If we have a salt, we can just use it.
if info.EncryptionSalt != "" {
for {
phrase, phraseErr := readPassphrase("Enter your passphrase to unlock config/secrets\n" +
" (set PULUMI_CONFIG_PASSPHRASE to remember)")
if phraseErr != nil {
return nil, phraseErr
}
sm, smerr := passphrase.NewPassphaseSecretsManager(phrase, info.EncryptionSalt)
switch {
case smerr == passphrase.ErrIncorrectPassphrase:
cmdutil.Diag().Errorf(diag.Message("", "incorrect passphrase"))
continue
case smerr != nil:
return nil, smerr
default:
return sm, nil
}
}
}
var phrase string
// Get a the passphrase from the user, ensuring that they match.
for {
// Here, the stack does not have an EncryptionSalt, so we will get a passphrase and create one
first, err := readPassphrase("Enter your passphrase to protect config/secrets")
if err != nil {
return nil, err
}
second, err := readPassphrase("Re-enter your passphrase to confirm")
if err != nil {
return nil, err
}
if first == second {
phrase = first
break
}
// If they didn't match, print an error and try again
cmdutil.Diag().Errorf(diag.Message("", "passphrases do not match"))
}
// Produce a new salt.
salt := make([]byte, 8)
_, err = cryptorand.Read(salt)
contract.Assertf(err == nil, "could not read from system random")
// Encrypt a message and store it with the salt so we can test if the password is correct later.
crypter := config.NewSymmetricCrypterFromPassphrase(phrase, salt)
msg, err := crypter.EncryptValue("pulumi")
contract.AssertNoError(err)
// Now store the result and save it.
info.EncryptionSalt = fmt.Sprintf("v1:%s:%s", base64.StdEncoding.EncodeToString(salt), msg)
if err = info.Save(configFile); err != nil {
return nil, err
}
// Finally, build the full secrets manager from the state we just saved
return passphrase.NewPassphaseSecretsManager(phrase, info.EncryptionSalt)
}
|
[
"\"PULUMI_CONFIG_PASSPHRASE\""
] |
[] |
[
"PULUMI_CONFIG_PASSPHRASE"
] |
[]
|
["PULUMI_CONFIG_PASSPHRASE"]
|
go
| 1 | 0 | |
integration/rql_test.go
|
package integration
import (
"fmt"
"os"
"sync"
"testing"
"time"
"github.com/a8m/rql"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
)
var (
CreateTime, _ = time.Parse(time.RFC3339, "2000-05-16T16:00:00.000Z")
MySQLConn = os.Getenv("MYSQL_DSN")
QueryParser = rql.MustNewParser(rql.Config{
Model: User{},
FieldSep: ".",
})
)
type User struct {
ID int `rql:"filter,sort"`
Admin bool `rql:"filter"`
Name string `rql:"filter"`
AddressName string `rql:"filter"`
CreatedAt time.Time `rql:"filter"`
UnixTime time.Time `rql:"filter,layout=UnixDate"`
CustomTime time.Time `rql:"filter,layout=2006-01-02 15:04"`
}
func TestMySQL(t *testing.T) {
db := Connect(t)
SetUp(t, db)
defer Teardown(t, db)
AssertCount(t, db, 1, `{ "filter": { "id": 1 } }`)
AssertCount(t, db, 1, `{ "filter": { "id": 100 } }`)
AssertCount(t, db, 50, `{ "filter": { "id": { "$gt": 50 } } }`)
AssertCount(t, db, 50, `{ "filter": { "id": { "$lte": 50 } } }`)
AssertCount(t, db, 99, `{ "filter": { "$or": [{ "id":{ "$gt": 50 } }, { "id":{ "$lt": 50 } }] } }`)
AssertCount(t, db, 1, `{ "filter": {"name": "user_1" } }`)
AssertCount(t, db, 100, `{ "filter": {"name": {"$like": "user%" } } }`) // all
AssertCount(t, db, 2, `{ "filter": {"name": {"$like": "%10%" } } }`) // 10 or 100
AssertCount(t, db, 50, `{ "filter": {"admin": true } }`) // 50 users
AssertCount(t, db, 0, `{ "filter": {"address_name": "??" } }`) // nothing
AssertCount(t, db, 1, `{ "filter": {"address_name": "address_1" } }`) // 1st user
AssertCount(t, db, 100, fmt.Sprintf(`{"filter": {"created_at": { "$gt": %q } } }`, CreateTime.Add(-time.Hour).Format(time.RFC3339)))
AssertCount(t, db, 100, fmt.Sprintf(`{"filter": {"created_at": { "$lte": %q } } }`, CreateTime.Add(time.Hour).Format(time.RFC3339)))
AssertCount(t, db, 100, fmt.Sprintf(`{"filter": {"unix_time": { "$gt": %q } } }`, CreateTime.Add(-time.Hour).Format(time.UnixDate)))
AssertCount(t, db, 100, fmt.Sprintf(`{"filter": {"unix_time": { "$lte": %q } } }`, CreateTime.Add(time.Hour).Format(time.UnixDate)))
AssertCount(t, db, 100, fmt.Sprintf(`{"filter": {"custom_time": { "$gt": %q } } }`, CreateTime.Add(-time.Hour).Format("2006-01-02 15:04")))
AssertCount(t, db, 100, fmt.Sprintf(`{"filter": {"custom_time": { "$lte": %q } } }`, CreateTime.Add(time.Hour).Format("2006-01-02 15:04")))
AssertMatchIDs(t, db, []int{1}, `{ "filter": { "id": 1 } }`)
AssertMatchIDs(t, db, []int{2, 3}, `{ "filter": { "$or": [ { "id": 2 }, { "id": 3 } ] } }`)
AssertMatchIDs(t, db, []int{3, 2}, `{ "filter": { "$or": [ { "id": 2 }, { "id": 3 } ] }, "sort": ["-id"] }`)
AssertMatchIDs(t, db, []int{5, 4, 3, 2, 1}, `{ "filter": { "id": { "$lte": 5 } }, "sort": ["-id"] }`)
AssertSelect(t, db, []string{"user_1", "user_2"}, `{ "select": ["name"], "limit": 2 }`)
AssertSelect(t, db, []string{"address_1", "address_2"}, `{ "select": ["address_name"], "limit": 2 }`)
}
func AssertCount(t *testing.T, db *gorm.DB, expected int, query string) {
params, err := QueryParser.Parse([]byte(query))
must(t, err, "parse query: %s", query)
count := 0
err = db.Model(User{}).
Where(params.FilterExp, params.FilterArgs...).
Count(&count).Error
must(t, err, "count users")
if count != expected {
t.Errorf("AssertCount: %s\n\twant: %d\n\tgot: %d", query, expected, count)
}
}
func AssertMatchIDs(t *testing.T, db *gorm.DB, expected []int, query string) {
params, err := QueryParser.Parse([]byte(query))
must(t, err, "parse query: %s", query)
var ids []int
err = db.Model(User{}).
Where(params.FilterExp, params.FilterArgs...).
Order(params.Sort).
Pluck("id", &ids).Error
must(t, err, "select ids")
if len(ids) != len(expected) {
t.Errorf("AssertMatchIDs:\n\twant: %v\n\tgot: %v", expected, ids)
return
}
for i := range expected {
if ids[i] != expected[i] {
t.Errorf("AssertMatchIDs:\n\twant: %v\n\tgot: %v", expected, ids)
return
}
}
}
func AssertSelect(t *testing.T, db *gorm.DB, expected []string, query string) {
params, err := QueryParser.Parse([]byte(query))
must(t, err, "parse query: %s", query)
var values []string
err = db.Model(User{}).
Limit(params.Limit).
Select(params.Select).
Pluck(params.Select, &values).Error
must(t, err, "select values")
if len(values) != len(expected) {
t.Errorf("AssertSelect:\n\twant: %v\n\tgot: %v", expected, values)
return
}
for i := range expected {
if values[i] != expected[i] {
t.Errorf("AssertSelect:\n\twant: %v\n\tgot: %v", expected, values)
return
}
}
}
func Connect(t *testing.T) *gorm.DB {
if MySQLConn == "" {
t.Skip("missing database connection string")
}
for i := 1; i <= 5; i++ {
db, err := gorm.Open("mysql", MySQLConn)
if err == nil {
return db
}
time.Sleep(time.Second * time.Duration(i))
}
t.Log("failed connect to the database")
return nil
}
func SetUp(t *testing.T, db *gorm.DB) {
must(t, db.AutoMigrate(User{}).Error, "migrate db")
var wg sync.WaitGroup
wg.Add(100)
for i := 1; i <= 100; i++ {
go func(i int) {
defer wg.Done()
err := db.Create(&User{
ID: i,
Admin: i%2 == 0,
Name: fmt.Sprintf("user_%d", i),
AddressName: fmt.Sprintf("address_%d", i),
CreatedAt: CreateTime.Add(time.Minute),
UnixTime: CreateTime.Add(time.Minute),
CustomTime: CreateTime.Add(time.Minute),
}).Error
must(t, err, "create user")
}(i)
}
wg.Wait()
}
func Teardown(t *testing.T, db *gorm.DB) {
must(t, db.DropTable(User{}).Error, "drop table")
must(t, db.Close(), "close conn to mysql")
}
func must(t *testing.T, err error, msg string, args ...interface{}) {
if err != nil {
args = append(args, err)
t.Fatalf(msg+": %s", args...)
}
}
|
[
"\"MYSQL_DSN\""
] |
[] |
[
"MYSQL_DSN"
] |
[]
|
["MYSQL_DSN"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gerenciamento_pet.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
demo/demo/asgi.py
|
'''
Configure asgi server for serving asynchronous content such as websockets
Copyright (c) 2018 Gibbs Consulting and others - see CONTRIBUTIONS.md
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import django
from channels.routing import get_default_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demo.settings")
django.setup()
application = get_default_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/integration_test.go
|
// +build integration
package tests
import (
"fmt"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/openshift/geard/containers"
"github.com/openshift/geard/docker"
"github.com/openshift/geard/systemd"
chk "launchpad.net/gocheck"
)
const (
TimeoutContainerStateChange = time.Second * 15
TimeoutDockerStateChange = time.Second * 5
TimeoutDockerWait = time.Second * 2
IntervalContainerCheck = time.Second / 20
IntervalHttpCheck = time.Second / 10
TestImage = "openshift/busybox-http-app"
EnvImage = "openshift/envtest"
)
//Hookup gocheck with go test
func Test(t *testing.T) {
chk.TestingT(t)
}
var _ = chk.Suite(&IntegrationTestSuite{})
type IntegrationTestSuite struct {
dockerClient *docker.DockerClient
daemonURI string
containerIds []containers.Identifier
repositoryIds []string
sdconn systemd.Systemd
}
func (s *IntegrationTestSuite) assertFilePresent(c *chk.C, path string, perm os.FileMode, readableByNobodyUser bool) {
info, err := os.Stat(path)
c.Assert(err, chk.IsNil)
if (info.Mode() & os.ModeSymlink) != 0 {
linkedFile, err := os.Readlink(path)
c.Assert(err, chk.IsNil)
s.assertFilePresent(c, linkedFile, perm, readableByNobodyUser)
} else {
if info.Mode().Perm() != perm {
c.Errorf("File %s has permission \"%s\" but expected \"%s\"", path, info.Mode().String(), perm.String())
}
}
if readableByNobodyUser {
for i := path; i != "/"; i = filepath.Dir(i) {
info, err = os.Stat(i)
c.Assert(err, chk.IsNil)
c.Assert(info.Mode().Perm()&0005, chk.Not(chk.Equals), 0)
}
}
}
func (s *IntegrationTestSuite) assertFileAbsent(c *chk.C, path string) {
c.Logf("assertFileAbsent(%v,%v,%v)", path)
_, err := os.Stat(path)
c.Assert(err, chk.Not(chk.IsNil))
}
func (s *IntegrationTestSuite) getContainerPid(id containers.Identifier) int {
container, err := s.dockerClient.InspectContainer(id.ContainerFor())
if err != nil {
return 0
}
return container.State.Pid
}
const (
CONTAINER_CREATED ContainerState = iota
CONTAINER_STARTED
CONTAINER_RESTARTED
CONTAINER_STOPPED
)
type ContainerState int
func (c ContainerState) String() string {
switch c {
case CONTAINER_CREATED:
return "created"
case CONTAINER_STARTED:
return "started"
case CONTAINER_RESTARTED:
return "restarted"
case CONTAINER_STOPPED:
return "stopped"
default:
return "unknown"
}
}
func (s *IntegrationTestSuite) unitState(id containers.Identifier) (string, string) {
props, err := s.sdconn.GetUnitProperties(id.UnitNameFor())
if props == nil || err != nil {
return "", ""
}
return props["ActiveState"].(string), props["SubState"].(string)
}
func (s *IntegrationTestSuite) unitTimes(id containers.Identifier) (inactiveStart time.Time, inactiveEnd time.Time, activeStart time.Time, activeEnd time.Time) {
props, err := s.sdconn.GetUnitProperties(id.UnitNameFor())
if props == nil || err != nil {
return
}
inactiveStart = time.Unix(int64(props["InactiveEnterTimestampMonotonic"].(uint64)), 0)
inactiveEnd = time.Unix(int64(props["InactiveExitTimestampMonotonic"].(uint64)), 0)
activeStart = time.Unix(int64(props["ActiveEnterTimestampMonotonic"].(uint64)), 0)
activeEnd = time.Unix(int64(props["ActiveExitTimestampMonotonic"].(uint64)), 0)
return
}
func until(duration, every time.Duration, f func() bool) bool {
timeout := time.After(duration)
ticker := time.NewTicker(every)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if f() {
return true
}
case <-timeout:
return false
}
}
}
func isContainerAvailable(client *docker.DockerClient, id string) (bool, error) {
container, err := client.InspectContainer(id)
if err == docker.ErrNoSuchContainer {
return false, nil
}
if err != nil {
return true, err
}
if container.State.Running && container.State.Pid != 0 {
return true, nil
}
return false, nil
}
func (s *IntegrationTestSuite) assertContainerStarts(c *chk.C, id containers.Identifier) {
active, _ := s.unitState(id)
switch active {
case "active":
return
case "activating":
break
default:
c.Errorf("Container %s failed to start - %s", id, active)
c.FailNow()
return
}
isRunning := func() bool {
active, sub := s.unitState(id)
if active == "active" {
return true
}
if active == "activating" {
return false
}
c.Errorf("Unit %s start failed with state %s", id, sub)
c.FailNow()
return false
}
if !until(TimeoutContainerStateChange, time.Second/20, isRunning) {
c.Errorf("Timeout during start of %s, never got to 'active' state", id)
c.FailNow()
}
// Docker does not immediately return container status - possibly due to races inside of the
// daemon
failed := false
isContainerUp := func() bool {
done, err := isContainerAvailable(s.dockerClient, id.ContainerFor())
if err != nil {
failed = true
c.Error("Docker couldn't return container info", err)
c.FailNow()
}
return done
}
if !until(TimeoutDockerWait, IntervalHttpCheck, isContainerUp) {
if !failed {
c.Errorf("Docker never reported the container running %s", id)
}
c.FailNow()
}
}
func (s *IntegrationTestSuite) assertContainerStartsAndExits(c *chk.C, start time.Time, id containers.Identifier) {
hasStarted := func() bool {
_, inactiveEnd, activeStart, _ := s.unitTimes(id)
if inactiveEnd.IsZero() || activeStart.IsZero() {
c.Logf("Variables empty before")
}
if inactiveEnd.Before(start) || activeStart.Before(start) {
return false
}
return true
}
if !until(TimeoutContainerStateChange, IntervalContainerCheck, hasStarted) {
c.Errorf("The service did not start in the allotted time")
c.FailNow()
}
hasCompleted := func() bool {
switch active, _ := s.unitState(id); active {
case "active", "activating", "deactivating":
return false
}
return true
}
if !until(TimeoutContainerStateChange, IntervalContainerCheck, hasCompleted) {
c.Errorf("The service did not finish in the allotted time")
c.FailNow()
}
}
func (s *IntegrationTestSuite) assertContainerStops(c *chk.C, id containers.Identifier, allowFail bool) {
active, _ := s.unitState(id)
switch active {
case "active", "activating":
c.Errorf("Container %s stop not properly queued, service is still active - %s", id, active)
c.FailNow()
return
}
isStopped := func() bool {
active, sub := s.unitState(id)
if active == "inactive" {
return true
}
if allowFail && active == "failed" {
return true
}
if active == "deactivating" {
return false
}
c.Errorf("Unit %s stop failed (%s) with state %s", id, active, sub)
c.FailNow()
return false
}
if !until(TimeoutContainerStateChange, IntervalContainerCheck, isStopped) {
c.Errorf("Timeout during start of %s, never got to 'inactive' state", id)
c.FailNow()
}
_, err := s.dockerClient.InspectContainer(id.ContainerFor())
if err == nil {
c.Errorf("Container %s is still active in docker, should be stopped and removed", id.ContainerFor())
c.FailNow()
}
}
func (s *IntegrationTestSuite) assertContainerRestarts(c *chk.C, id containers.Identifier) {
isStarted := func() bool {
active, sub := s.unitState(id)
if active == "active" {
return true
}
if active == "deactivating" || active == "activating" {
return false
}
c.Errorf("Unit %s restart failed (%s) in unexpected state %s", id, active, sub)
c.FailNow()
return false
}
if !until(TimeoutContainerStateChange, IntervalContainerCheck, isStarted) {
active, sub := s.unitState(id)
c.Errorf("Timeout during restart of %s, never got back to 'active' state (%s/%s)", id, active, sub)
c.FailNow()
}
// Docker does not immediately return container status - possibly due to races inside of the
// daemon
failed := false
isContainerUp := func() bool {
done, err := isContainerAvailable(s.dockerClient, id.ContainerFor())
if err != nil {
failed = true
c.Error("Docker couldn't return container info", err)
c.FailNow()
}
return done
}
if !until(TimeoutDockerWait, IntervalHttpCheck, isContainerUp) {
if !failed {
c.Errorf("Docker never reported the container running %s", id)
}
c.FailNow()
}
}
func (s *IntegrationTestSuite) SetUpSuite(c *chk.C) {
var err error
travis := os.Getenv("TRAVIS")
if travis != "" {
c.Skip("-skip run on Travis")
}
s.daemonURI = os.Getenv("GEARD_URI")
if s.daemonURI == "" {
s.daemonURI = "localhost:43273"
}
dockerURI := os.Getenv("DOCKER_URI")
if dockerURI == "" {
dockerURI = "unix:///var/run/docker.sock"
}
s.dockerClient, err = docker.GetConnection(dockerURI)
c.Assert(err, chk.IsNil)
containers, err := s.dockerClient.ListContainers()
c.Assert(err, chk.IsNil)
for _, cinfo := range containers {
if strings.HasPrefix(cinfo.Names[0], "Test") {
s.dockerClient.ForceCleanContainer(cinfo.ID)
}
}
_, err = s.dockerClient.GetImage(TestImage)
c.Assert(err, chk.IsNil)
s.sdconn, err = systemd.NewConnection()
c.Assert(err, chk.IsNil)
err = s.sdconn.Subscribe()
c.Assert(err, chk.IsNil)
defer s.sdconn.Unsubscribe()
}
func (s *IntegrationTestSuite) SetupTest(c *chk.C) {
}
func (s *IntegrationTestSuite) TearDownTest(c *chk.C) {
}
func (s *IntegrationTestSuite) TestInstallSimpleStart(c *chk.C) {
id, err := containers.NewIdentifier("TestInstallSimpleStart")
c.Assert(err, chk.IsNil)
s.containerIds = append(s.containerIds, id)
hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id)
cmd := exec.Command("/usr/bin/gear", "install", TestImage, hostContainerId)
data, err := cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
active, _ := s.unitState(id)
c.Assert(active, chk.Equals, "inactive")
s.assertFilePresent(c, id.UnitPathFor(), 0664, true)
paths, err := filepath.Glob(id.VersionedUnitPathFor("*"))
c.Assert(err, chk.IsNil)
for _, p := range paths {
s.assertFilePresent(c, p, 0664, true)
}
s.assertFileAbsent(c, filepath.Join(id.RunPathFor(), "container-init.sh"))
ports, err := containers.GetExistingPorts(id)
c.Assert(err, chk.IsNil)
c.Assert(len(ports), chk.Equals, 0)
cmd = exec.Command("/usr/bin/gear", "status", hostContainerId)
data, err = cmd.CombinedOutput()
c.Assert(err, chk.IsNil)
c.Log(string(data))
c.Assert(strings.Contains(string(data), "Loaded: loaded (/var/lib/containers/units/Te/ctr-TestInstallSimpleStart.service; enabled)"), chk.Equals, true)
}
func (s *IntegrationTestSuite) TestInstallEnvFile(c *chk.C) {
id, err := containers.NewIdentifier("TestInstallEnvFile")
c.Assert(err, chk.IsNil)
s.containerIds = append(s.containerIds, id)
hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id)
// get the full path to this .go file so we can get the correct path to the
// simple.env file
_, filename, _, _ := runtime.Caller(0)
envFile := path.Join(path.Dir(filename), "..", "deployment", "fixtures", "simple.env")
cmd := exec.Command("/usr/bin/gear", "install", EnvImage, hostContainerId, "--env-file="+envFile, "--start")
data, err := cmd.CombinedOutput()
c.Log(cmd.Args)
c.Log(string(data))
c.Assert(err, chk.IsNil)
s.assertContainerStarts(c, id)
cmd = exec.Command("/usr/bin/gear", "status", hostContainerId)
data, err = cmd.CombinedOutput()
c.Assert(err, chk.IsNil)
c.Log(string(data))
c.Assert(strings.Contains(string(data), "TEST=value"), chk.Equals, true)
c.Assert(strings.Contains(string(data), "QUOTED=\\\"foo\\\""), chk.Equals, true)
c.Assert(strings.Contains(string(data), "IGNORED"), chk.Equals, false)
}
func (s *IntegrationTestSuite) TestInstallEnv(c *chk.C) {
id, err := containers.NewIdentifier("TestInstallEnv")
c.Assert(err, chk.IsNil)
s.containerIds = append(s.containerIds, id)
hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id)
hostEnvId := fmt.Sprintf("%v/%v", s.daemonURI, "foobar")
cmd := exec.Command("/usr/bin/gear", "install", EnvImage, hostContainerId, "--env-id=foobar", "A=B", "C=D", "--start")
data, err := cmd.CombinedOutput()
c.Log(cmd.Args)
c.Log(string(data))
c.Assert(err, chk.IsNil)
s.assertContainerStarts(c, id)
cmd = exec.Command("/usr/bin/gear", "status", hostContainerId)
data, err = cmd.CombinedOutput()
c.Assert(err, chk.IsNil)
c.Log(string(data))
c.Assert(strings.Contains(string(data), "A=B"), chk.Equals, true)
c.Assert(strings.Contains(string(data), "C=D"), chk.Equals, true)
cmd = exec.Command("/usr/bin/gear", "env", hostEnvId)
data, err = cmd.CombinedOutput()
c.Assert(err, chk.IsNil)
c.Log(string(data))
c.Assert(string(data), chk.Equals, "A=B\nC=D\n")
}
func (s *IntegrationTestSuite) TestInstallIsolateStart(c *chk.C) {
id, err := containers.NewIdentifier("TestInstallIsolateStart")
c.Assert(err, chk.IsNil)
s.containerIds = append(s.containerIds, id)
hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id)
cmd := exec.Command("/usr/bin/gear", "install", TestImage, hostContainerId, "--start", "--ports=8080:0", "--isolate")
data, err := cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
s.assertContainerStarts(c, id)
s.assertFilePresent(c, id.UnitPathFor(), 0664, true)
paths, err := filepath.Glob(id.VersionedUnitPathFor("*"))
c.Assert(err, chk.IsNil)
for _, p := range paths {
s.assertFilePresent(c, p, 0664, true)
}
s.assertFilePresent(c, filepath.Join(id.RunPathFor(), "container-init.sh"), 0700, false)
ports, err := containers.GetExistingPorts(id)
c.Assert(err, chk.IsNil)
c.Assert(len(ports), chk.Equals, 1)
httpAlive := func() bool {
resp, err := http.Get(fmt.Sprintf("http://0.0.0.0:%v", ports[0].External))
if err == nil {
c.Assert(resp.StatusCode, chk.Equals, 200)
return true
}
return false
}
if !until(TimeoutContainerStateChange, IntervalHttpCheck, httpAlive) {
c.Errorf("Unable to retrieve a 200 status code from port %d", ports[0].External)
c.FailNow()
}
}
func (s *IntegrationTestSuite) TestInstallIsolate(c *chk.C) {
id, err := containers.NewIdentifier("TestInstallIsolate")
c.Assert(err, chk.IsNil)
s.containerIds = append(s.containerIds, id)
hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id)
cmd := exec.Command("/usr/bin/gear", "install", TestImage, hostContainerId)
data, err := cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
active, _ := s.unitState(id)
c.Assert(active, chk.Equals, "inactive")
s.assertFilePresent(c, id.UnitPathFor(), 0664, true)
paths, err := filepath.Glob(id.VersionedUnitPathFor("*"))
c.Assert(err, chk.IsNil)
for _, p := range paths {
s.assertFilePresent(c, p, 0664, true)
}
}
func (s *IntegrationTestSuite) TestSamePortRejected(c *chk.C) {
id, err := containers.NewIdentifier("TestSamePortRejected")
c.Assert(err, chk.IsNil)
s.containerIds = append(s.containerIds, id)
hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id)
cmd := exec.Command("/usr/bin/gear", "install", TestImage, hostContainerId, "--ports=8080:39485")
data, err := cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
active, _ := s.unitState(id)
c.Assert(active, chk.Equals, "inactive")
s.assertFilePresent(c, id.UnitPathFor(), 0664, true)
paths, err := filepath.Glob(id.VersionedUnitPathFor("*"))
c.Assert(err, chk.IsNil)
for _, p := range paths {
s.assertFilePresent(c, p, 0664, true)
}
id2, _ := containers.NewIdentifier("TestSamePortRejected2")
cmd = exec.Command("/usr/bin/gear", "install", TestImage, fmt.Sprintf("%v/%v", s.daemonURI, id2), "--ports=8080:39485")
data, err = cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.ErrorMatches, "exit status 1")
state, substate := s.unitState(id2)
c.Assert(state, chk.Equals, "inactive")
c.Assert(substate, chk.Equals, "dead")
}
func (s *IntegrationTestSuite) TestStartStop(c *chk.C) {
id, err := containers.NewIdentifier("TestStartStop")
c.Assert(err, chk.IsNil)
s.containerIds = append(s.containerIds, id)
hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id)
cmd := exec.Command("/usr/bin/gear", "install", TestImage, hostContainerId, "--ports=8080:0", "--isolate")
data, err := cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
s.assertFilePresent(c, id.UnitPathFor(), 0664, true)
cmd = exec.Command("/usr/bin/gear", "start", hostContainerId)
data, err = cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
s.assertContainerStarts(c, id)
s.assertFilePresent(c, filepath.Join(id.RunPathFor(), "container-init.sh"), 0700, false)
ports, err := containers.GetExistingPorts(id)
c.Assert(err, chk.IsNil)
c.Assert(len(ports), chk.Equals, 1)
httpAlive := func() bool {
resp, err := http.Get(fmt.Sprintf("http://0.0.0.0:%v", ports[0].External))
if err == nil {
c.Assert(resp.StatusCode, chk.Equals, 200)
return true
}
return false
}
if !until(TimeoutContainerStateChange, IntervalHttpCheck, httpAlive) {
c.Errorf("Unable to retrieve a 200 status code from port %d", ports[0].External)
c.FailNow()
}
cmd = exec.Command("/usr/bin/gear", "stop", hostContainerId)
data, err = cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
s.assertContainerStops(c, id, true)
}
func (s *IntegrationTestSuite) TestRestart(c *chk.C) {
id, err := containers.NewIdentifier("TestRestart")
c.Assert(err, chk.IsNil)
s.containerIds = append(s.containerIds, id)
hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id)
cmd := exec.Command("/usr/bin/gear", "install", TestImage, hostContainerId, "--ports=8080:0", "--start", "--isolate")
data, err := cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
s.assertFilePresent(c, id.UnitPathFor(), 0664, true)
s.assertContainerStarts(c, id)
s.assertFilePresent(c, filepath.Join(id.RunPathFor(), "container-init.sh"), 0700, false)
oldPid := s.getContainerPid(id)
cmd = exec.Command("/usr/bin/gear", "restart", hostContainerId)
data, err = cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
s.assertContainerRestarts(c, id)
newPid := s.getContainerPid(id)
c.Assert(oldPid, chk.Not(chk.Equals), newPid)
}
func (s *IntegrationTestSuite) TestStatus(c *chk.C) {
id, err := containers.NewIdentifier("TestStatus")
c.Assert(err, chk.IsNil)
s.containerIds = append(s.containerIds, id)
hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id)
cmd := exec.Command("/usr/bin/gear", "install", TestImage, hostContainerId)
data, err := cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
s.assertFilePresent(c, id.UnitPathFor(), 0664, true)
active, _ := s.unitState(id)
if active == "failed" {
c.Logf("Container %s has previous recorded 'failed' state, convert to 'inactive'", id)
active = "inactive"
}
c.Assert(active, chk.Equals, "inactive")
cmd = exec.Command("/usr/bin/gear", "status", hostContainerId)
data, err = cmd.CombinedOutput()
c.Assert(err, chk.IsNil)
c.Log(string(data))
c.Assert(strings.Contains(string(data), "Loaded: loaded (/var/lib/containers/units/Te/ctr-TestStatus.service; enabled)"), chk.Equals, true)
cmd = exec.Command("/usr/bin/gear", "start", hostContainerId)
_, err = cmd.CombinedOutput()
c.Assert(err, chk.IsNil)
s.assertContainerStarts(c, id)
cmd = exec.Command("/usr/bin/gear", "status", hostContainerId)
data, err = cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
c.Assert(strings.Contains(string(data), "Loaded: loaded (/var/lib/containers/units/Te/ctr-TestStatus.service; enabled)"), chk.Equals, true)
c.Assert(strings.Contains(string(data), "Active: active (running)"), chk.Equals, true)
cmd = exec.Command("/usr/bin/gear", "stop", hostContainerId)
_, err = cmd.CombinedOutput()
c.Assert(err, chk.IsNil)
s.assertContainerStops(c, id, true)
cmd = exec.Command("/usr/bin/gear", "status", hostContainerId)
data, err = cmd.CombinedOutput()
c.Assert(err, chk.IsNil)
c.Log(string(data))
c.Assert(strings.Contains(string(data), "Loaded: loaded (/var/lib/containers/units/Te/ctr-TestStatus.service; enabled)"), chk.Equals, true)
}
func (s *IntegrationTestSuite) TestVeryLongNameAtLimits(c *chk.C) {
id, err := containers.NewIdentifier("TestVeryLongNameAtLimits")
c.Assert(err, chk.IsNil)
s.containerIds = append(s.containerIds, id)
hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id)
cmd := exec.Command("/usr/bin/gear", "install", TestImage, hostContainerId, "--start", "--ports=8080:0", "--isolate")
data, err := cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
s.assertContainerStarts(c, id)
s.assertFilePresent(c, id.UnitPathFor(), 0664, true)
s.assertFilePresent(c, filepath.Join(id.RunPathFor(), "container-init.sh"), 0700, false)
ports, err := containers.GetExistingPorts(id)
c.Assert(err, chk.IsNil)
c.Assert(len(ports), chk.Equals, 1)
httpAlive := func() bool {
resp, err := http.Get(fmt.Sprintf("http://0.0.0.0:%v", ports[0].External))
if err == nil {
c.Assert(resp.StatusCode, chk.Equals, 200)
return true
}
return false
}
if !until(TimeoutContainerStateChange, IntervalHttpCheck, httpAlive) {
c.Errorf("Unable to retrieve a 200 status code from port %d", ports[0].External)
c.FailNow()
}
}
func (s *IntegrationTestSuite) TestLinks(c *chk.C) {
id, err := containers.NewIdentifier("TestLinks")
c.Assert(err, chk.IsNil)
s.containerIds = append(s.containerIds, id)
hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id)
cmd := exec.Command("/usr/bin/gear", "install", TestImage, hostContainerId, "--ports=8080:0", "--isolate")
data, err := cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
s.assertFilePresent(c, id.UnitPathFor(), 0664, true)
cmd = exec.Command("/usr/bin/gear", "link", "-n", "127.0.0.1:8081:74.125.239.114:80", hostContainerId)
data, err = cmd.CombinedOutput()
c.Log(string(data))
c.Assert(err, chk.IsNil)
cmd = exec.Command("/usr/bin/gear", "start", hostContainerId)
data, err = cmd.CombinedOutput()
s.assertContainerStarts(c, id)
s.assertFilePresent(c, filepath.Join(id.RunPathFor(), "container-init.sh"), 0700, false)
cmd = exec.Command("/usr/bin/switchns", "--container="+id.ContainerFor(),
"--", "/sbin/iptables", "-t", "nat", "-L")
data, err = cmd.CombinedOutput()
c.Log(string(data))
c.Assert(strings.Contains(string(data), "tcp dpt:tproxy to:74.125.239.114"), chk.Equals, true)
}
func (s *IntegrationTestSuite) TearDownSuite(c *chk.C) {
for _, id := range s.containerIds {
hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id)
cmd := exec.Command("/usr/bin/gear", "delete", hostContainerId)
data, err := cmd.CombinedOutput()
c.Log(string(data))
if err != nil {
c.Logf("Container %v did not cleanup properly", id)
}
}
}
|
[
"\"TRAVIS\"",
"\"GEARD_URI\"",
"\"DOCKER_URI\""
] |
[] |
[
"TRAVIS",
"DOCKER_URI",
"GEARD_URI"
] |
[]
|
["TRAVIS", "DOCKER_URI", "GEARD_URI"]
|
go
| 3 | 0 | |
provisioning/molecule/gitlab/tests/test_default.py
|
import os
import pytest
# import re
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('pkg', [
'curl',
'sshpass',
])
def test_pkg(host, pkg):
package = host.package(pkg)
assert package.is_installed
def test_hosts_file(host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
# @pytest.mark.parametrize('directory', [
# '/home/deploy_user/.ssh',
# ])
# def test_directory_is_present(host, directory):
# dir = host.file(directory)
# assert dir.is_directory
# assert dir.exists
@pytest.mark.parametrize('file', [
'/etc/hosts',
'/etc/gitlab/gitlab.rb',
'/etc/gitlab/ssl/gitlab.local.crt',
'/etc/gitlab/ssl/gitlab.local.key',
])
def test_binary_is_present(host, file):
file = host.file(file)
assert file.exists
# @pytest.mark.parametrize('command, regex', [
# ("getent passwd vagrant", "^vagrant*"),
# ])
# def test_commands(host, command, regex):
# cmd = host.check_output(command)
# assert re.match(regex, cmd)
# @pytest.mark.parametrize('svc', [
# 'ssh'
# ])
# def test_svc(host, svc):
# service = host.service(svc)
#
# assert service.is_running
# assert service.is_enabled
@pytest.mark.parametrize('file, content', [
("/etc/passwd", "root")
])
def test_files(host, file, content):
file = host.file(file)
assert file.exists
assert file.contains(content)
|
[] |
[] |
[
"MOLECULE_INVENTORY_FILE"
] |
[]
|
["MOLECULE_INVENTORY_FILE"]
|
python
| 1 | 0 | |
provider/pkg/provider/provider.go
|
// Copyright 2016-2021, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package provider
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/user"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
jsonpatch "github.com/evanphx/json-patch"
pbempty "github.com/golang/protobuf/ptypes/empty"
structpb "github.com/golang/protobuf/ptypes/struct"
"github.com/imdario/mergo"
pkgerrors "github.com/pkg/errors"
"github.com/pulumi/pulumi-kubernetes/provider/v3/pkg/await"
"github.com/pulumi/pulumi-kubernetes/provider/v3/pkg/await/states"
"github.com/pulumi/pulumi-kubernetes/provider/v3/pkg/clients"
"github.com/pulumi/pulumi-kubernetes/provider/v3/pkg/cluster"
"github.com/pulumi/pulumi-kubernetes/provider/v3/pkg/gen"
"github.com/pulumi/pulumi-kubernetes/provider/v3/pkg/kinds"
"github.com/pulumi/pulumi-kubernetes/provider/v3/pkg/logging"
"github.com/pulumi/pulumi-kubernetes/provider/v3/pkg/metadata"
"github.com/pulumi/pulumi-kubernetes/provider/v3/pkg/openapi"
"github.com/pulumi/pulumi/pkg/v3/resource/provider"
"github.com/pulumi/pulumi/sdk/v3/go/common/diag"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/contract"
logger "github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/rpcerror"
pulumirpc "github.com/pulumi/pulumi/sdk/v3/proto/go"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"helm.sh/helm/v3/pkg/helmpath"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
k8sresource "k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientapi "k8s.io/client-go/tools/clientcmd/api"
k8sopenapi "k8s.io/kubectl/pkg/util/openapi"
"sigs.k8s.io/yaml"
)
// --------------------------------------------------------------------------
// Kubernetes resource provider.
//
// Implements functionality for the Pulumi Kubernetes Resource Provider. This code is responsible
// for producing sensible responses for the gRPC server to send back to a client when it requests
// something to do with the Kubernetes resources it's meant to manage.
// --------------------------------------------------------------------------
const (
streamInvokeList = "kubernetes:kubernetes:list"
streamInvokeWatch = "kubernetes:kubernetes:watch"
streamInvokePodLogs = "kubernetes:kubernetes:podLogs"
invokeDecodeYaml = "kubernetes:yaml:decode"
invokeHelmTemplate = "kubernetes:helm:template"
invokeKustomize = "kubernetes:kustomize:directory"
lastAppliedConfigKey = "kubectl.kubernetes.io/last-applied-configuration"
initialAPIVersionKey = "__initialApiVersion"
)
type cancellationContext struct {
context context.Context
cancel context.CancelFunc
}
func makeCancellationContext() *cancellationContext {
ctx, cancel := context.WithCancel(context.Background())
return &cancellationContext{
context: ctx,
cancel: cancel,
}
}
type kubeOpts struct {
rejectUnknownResources bool
}
type kubeProvider struct {
host *provider.HostClient
canceler *cancellationContext
name string
version string
pulumiSchema []byte
providerPackage string
opts kubeOpts
defaultNamespace string
enableDryRun bool
enableReplaceCRD bool
enableSecrets bool
suppressDeprecationWarnings bool
suppressHelmHookWarnings bool
helmDriver string
helmPluginsPath string
helmRegistryConfigPath string
helmRepositoryConfigPath string
helmRepositoryCache string
helmReleaseProvider customResourceProvider
yamlRenderMode bool
yamlDirectory string
clusterUnreachable bool // Kubernetes cluster is unreachable.
clusterUnreachableReason string // Detailed error message if cluster is unreachable.
config *rest.Config // Cluster config, e.g., through $KUBECONFIG file.
kubeconfig clientcmd.ClientConfig
clientSet *clients.DynamicClientSet
dryRunVerifier *k8sresource.DryRunVerifier
logClient *clients.LogClient
k8sVersion cluster.ServerVersion
resources k8sopenapi.Resources
resourcesMutex sync.RWMutex
}
var _ pulumirpc.ResourceProviderServer = (*kubeProvider)(nil)
func makeKubeProvider(
host *provider.HostClient, name, version string, pulumiSchema []byte,
) (pulumirpc.ResourceProviderServer, error) {
return &kubeProvider{
host: host,
canceler: makeCancellationContext(),
name: name,
version: version,
pulumiSchema: pulumiSchema,
providerPackage: name,
enableDryRun: false,
enableSecrets: false,
suppressDeprecationWarnings: false,
}, nil
}
func (k *kubeProvider) getResources() (k8sopenapi.Resources, error) {
k.resourcesMutex.RLock()
rs := k.resources
k.resourcesMutex.RUnlock()
if rs != nil {
return rs, nil
}
k.resourcesMutex.Lock()
defer k.resourcesMutex.Unlock()
rs, err := openapi.GetResourceSchemasForClient(k.clientSet.DiscoveryClientCached)
if err != nil {
return nil, err
}
k.resources = rs
return k.resources, nil
}
func (k *kubeProvider) invalidateResources() {
k.resourcesMutex.Lock()
defer k.resourcesMutex.Unlock()
k.resources = nil
}
// Call dynamically executes a method in the provider associated with a component resource.
func (k *kubeProvider) Call(ctx context.Context, req *pulumirpc.CallRequest) (*pulumirpc.CallResponse, error) {
return nil, status.Error(codes.Unimplemented, "Call is not yet implemented")
}
// Construct creates a new instance of the provided component resource and returns its state.
func (k *kubeProvider) Construct(ctx context.Context, req *pulumirpc.ConstructRequest) (*pulumirpc.ConstructResponse, error) {
return nil, status.Error(codes.Unimplemented, "Construct is not yet implemented")
}
// GetSchema returns the JSON-encoded schema for this provider's package.
func (k *kubeProvider) GetSchema(ctx context.Context, req *pulumirpc.GetSchemaRequest) (*pulumirpc.GetSchemaResponse, error) {
if v := req.GetVersion(); v != 0 {
return nil, fmt.Errorf("unsupported schema version %d", v)
}
return &pulumirpc.GetSchemaResponse{Schema: string(k.pulumiSchema)}, nil
}
// CheckConfig validates the configuration for this provider.
func (k *kubeProvider) CheckConfig(ctx context.Context, req *pulumirpc.CheckRequest) (*pulumirpc.CheckResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.CheckConfig(%s)", k.label(), urn)
logger.V(9).Infof("%s executing", label)
news, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.news", label),
KeepUnknowns: true,
SkipNulls: true,
})
if err != nil {
return nil, pkgerrors.Wrapf(err, "CheckConfig failed because of malformed resource inputs")
}
truthyValue := func(argName resource.PropertyKey, props resource.PropertyMap) bool {
if arg := props[argName]; arg.HasValue() {
switch {
case arg.IsString() && len(arg.StringValue()) > 0:
return true
case arg.IsBool() && arg.BoolValue():
return true
default:
return false
}
}
return false
}
renderYamlEnabled := truthyValue("renderYamlToDirectory", news)
errTemplate := `%q arg is not compatible with "renderYamlToDirectory" arg`
if renderYamlEnabled {
var failures []*pulumirpc.CheckFailure
if truthyValue("cluster", news) {
failures = append(failures, &pulumirpc.CheckFailure{
Property: "cluster",
Reason: fmt.Sprintf(errTemplate, "cluster"),
})
}
if truthyValue("context", news) {
failures = append(failures, &pulumirpc.CheckFailure{
Property: "context",
Reason: fmt.Sprintf(errTemplate, "context"),
})
}
if truthyValue("kubeconfig", news) {
failures = append(failures, &pulumirpc.CheckFailure{
Property: "kubeconfig",
Reason: fmt.Sprintf(errTemplate, "kubeconfig"),
})
}
if truthyValue("enableDryRun", news) {
failures = append(failures, &pulumirpc.CheckFailure{
Property: "enableDryRun",
Reason: fmt.Sprintf(errTemplate, "enableDryRun"),
})
}
if len(failures) > 0 {
return &pulumirpc.CheckResponse{Inputs: req.GetNews(), Failures: failures}, nil
}
}
return &pulumirpc.CheckResponse{Inputs: req.GetNews()}, nil
}
// DiffConfig diffs the configuration for this provider.
func (k *kubeProvider) DiffConfig(ctx context.Context, req *pulumirpc.DiffRequest) (*pulumirpc.DiffResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.DiffConfig(%s)", k.label(), urn)
logger.V(9).Infof("%s executing", label)
olds, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label),
KeepUnknowns: true,
SkipNulls: true,
})
if err != nil {
return nil, err
}
news, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.news", label),
KeepUnknowns: true,
SkipNulls: true,
})
if err != nil {
return nil, pkgerrors.Wrapf(err, "DiffConfig failed because of malformed resource inputs")
}
// We can't tell for sure if a computed value has changed, so we make the conservative choice
// and force a replacement.
if news["kubeconfig"].IsComputed() {
return &pulumirpc.DiffResponse{
Changes: pulumirpc.DiffResponse_DIFF_SOME,
Diffs: []string{"kubeconfig"},
Replaces: []string{"kubeconfig"},
}, nil
}
var diffs, replaces []string
oldConfig, err := parseKubeconfigPropertyValue(olds["kubeconfig"])
if err != nil {
return nil, err
}
newConfig, err := parseKubeconfigPropertyValue(news["kubeconfig"])
if err != nil {
return nil, err
}
// Check for differences in provider overrides.
if !reflect.DeepEqual(oldConfig, newConfig) {
diffs = append(diffs, "kubeconfig")
}
if olds["context"] != news["context"] {
diffs = append(diffs, "context")
}
if olds["cluster"] != news["cluster"] {
diffs = append(diffs, "cluster")
}
if olds["namespace"] != news["namespace"] {
diffs = append(diffs, "namespace")
}
if olds["enableDryRun"] != news["enableDryRun"] {
diffs = append(diffs, "enableDryRun")
}
if olds["renderYamlToDirectory"] != news["renderYamlToDirectory"] {
diffs = append(diffs, "renderYamlToDirectory")
// If the render directory changes, all of the manifests will be replaced.
replaces = append(replaces, "renderYamlToDirectory")
}
// In general, it's not possible to tell from a kubeconfig if the k8s cluster it points to has
// changed. k8s clusters do not have a well defined identity, so the best we can do is check
// if the settings for the active cluster have changed. This is not a foolproof method; a trivial
// counterexample is changing the load balancer or DNS entry pointing to the same cluster.
//
// Given this limitation, we try to strike a reasonable balance by planning a replacement iff
// the active cluster in the kubeconfig changes. This could still plan an erroneous replacement,
// but should work for the majority of cases.
//
// The alternative of ignoring changes to the kubeconfig is untenable; if the k8s cluster has
// changed, any dependent resources must be recreated, and ignoring changes prevents that from
// happening.
oldActiveCluster := getActiveClusterFromConfig(oldConfig, olds)
activeCluster := getActiveClusterFromConfig(newConfig, news)
if !reflect.DeepEqual(oldActiveCluster, activeCluster) {
replaces = diffs
}
logger.V(7).Infof("%s: diffs %v / replaces %v", label, diffs, replaces)
if len(diffs) > 0 || len(replaces) > 0 {
return &pulumirpc.DiffResponse{
Changes: pulumirpc.DiffResponse_DIFF_SOME,
Diffs: diffs,
Replaces: replaces,
}, nil
}
return &pulumirpc.DiffResponse{
Changes: pulumirpc.DiffResponse_DIFF_NONE,
}, nil
}
// Configure configures the resource provider with "globals" that control its behavior.
func (k *kubeProvider) Configure(_ context.Context, req *pulumirpc.ConfigureRequest) (*pulumirpc.ConfigureResponse, error) {
const trueStr = "true"
vars := req.GetVariables()
//
// Set simple configuration settings.
//
k.opts = kubeOpts{
rejectUnknownResources: vars["kubernetes:config:rejectUnknownResources"] == trueStr,
}
k.enableSecrets = req.GetAcceptSecrets()
//
// Configure client-go using provided or ambient kubeconfig file.
//
if defaultNamespace := vars["kubernetes:config:namespace"]; defaultNamespace != "" {
k.defaultNamespace = defaultNamespace
}
// Compute config overrides.
overrides := &clientcmd.ConfigOverrides{
Context: clientapi.Context{
Cluster: vars["kubernetes:config:cluster"],
Namespace: k.defaultNamespace,
},
CurrentContext: vars["kubernetes:config:context"],
}
enableDryRun := func() bool {
// If the provider flag is set, use that value to determine behavior. This will override the ENV var.
if enabled, exists := vars["kubernetes:config:enableDryRun"]; exists {
return enabled == trueStr
}
// If the provider flag is not set, fall back to the ENV var.
if enabled, exists := os.LookupEnv("PULUMI_K8S_ENABLE_DRY_RUN"); exists {
return enabled == trueStr
}
// Default to false.
return false
}
if enableDryRun() {
k.enableDryRun = true
}
enableReplaceCRD := func() bool {
// If the provider flag is set, use that value to determine behavior. This will override the ENV var.
if enabled, exists := vars["kubernetes:config:enableReplaceCRD"]; exists {
return enabled == trueStr
}
// If the provider flag is not set, fall back to the ENV var.
if enabled, exists := os.LookupEnv("PULUMI_K8S_ENABLE_REPLACE_CRD"); exists {
return enabled == trueStr
}
// Default to false.
return false
}
if enableReplaceCRD() {
k.enableReplaceCRD = true
}
suppressDeprecationWarnings := func() bool {
// If the provider flag is set, use that value to determine behavior. This will override the ENV var.
if enabled, exists := vars["kubernetes:config:suppressDeprecationWarnings"]; exists {
return enabled == trueStr
}
// If the provider flag is not set, fall back to the ENV var.
if enabled, exists := os.LookupEnv("PULUMI_K8S_SUPPRESS_DEPRECATION_WARNINGS"); exists {
return enabled == trueStr
}
// Default to false.
return false
}
if suppressDeprecationWarnings() {
k.suppressDeprecationWarnings = true
}
suppressHelmHookWarnings := func() bool {
// If the provider flag is set, use that value to determine behavior. This will override the ENV var.
if enabled, exists := vars["kubernetes:config:suppressHelmHookWarnings"]; exists {
return enabled == trueStr
}
// If the provider flag is not set, fall back to the ENV var.
if enabled, exists := os.LookupEnv("PULUMI_K8S_SUPPRESS_HELM_HOOK_WARNINGS"); exists {
return enabled == trueStr
}
// Default to false.
return false
}
if suppressHelmHookWarnings() {
k.suppressHelmHookWarnings = true
}
renderYamlToDirectory := func() string {
// Read the config from the Provider.
if directory, exists := vars["kubernetes:config:renderYamlToDirectory"]; exists && directory != "" {
return directory
}
return ""
}
k.yamlDirectory = renderYamlToDirectory()
k.yamlRenderMode = len(k.yamlDirectory) > 0
var helmReleaseSettings HelmReleaseSettings
if obj, ok := vars["kubernetes:config:helmReleaseSettings"]; ok {
err := json.Unmarshal([]byte(obj), &helmReleaseSettings)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal helmReleaseSettings option: %w", err)
}
}
// TODO: Once https://github.com/pulumi/pulumi/issues/8132 is fixed, we can drop the env var handling logic.
helmDriver := func() string {
if helmReleaseSettings.Driver != nil {
return *helmReleaseSettings.Driver
}
// If the provider flag is not set, fall back to the ENV var.
if driver, exists := os.LookupEnv("PULUMI_K8S_HELM_DRIVER"); exists {
return driver
}
return "secret"
}
k.helmDriver = helmDriver() // TODO: Make sure this is in provider state
helmPluginsPath := func() string {
if helmReleaseSettings.PluginsPath != nil {
return *helmReleaseSettings.PluginsPath
}
// If the provider flag is not set, fall back to the ENV var.
if pluginsPath, exists := os.LookupEnv("PULUMI_K8S_HELM_PLUGINS_PATH"); exists {
return pluginsPath
}
return helmpath.DataPath("plugins")
}
k.helmPluginsPath = helmPluginsPath()
helmRegistryConfigPath := func() string {
if helmReleaseSettings.RegistryConfigPath != nil {
return *helmReleaseSettings.RegistryConfigPath
}
// If the provider flag is not set, fall back to the ENV var.
if registryPath, exists := os.LookupEnv("PULUMI_K8S_HELM_REGISTRY_CONFIG_PATH"); exists {
return registryPath
}
return helmpath.ConfigPath("registry.json")
}
k.helmRegistryConfigPath = helmRegistryConfigPath()
helmRepositoryConfigPath := func() string {
if helmReleaseSettings.RepositoryConfigPath != nil {
return *helmReleaseSettings.RepositoryConfigPath
}
if repositoryConfigPath, exists := os.LookupEnv("PULUMI_K8S_HELM_REPOSITORY_CONFIG_PATH"); exists {
return repositoryConfigPath
}
return helmpath.ConfigPath("repositories.yaml")
}
k.helmRepositoryConfigPath = helmRepositoryConfigPath()
helmRepositoryCache := func() string {
if helmReleaseSettings.RepositoryCache != nil {
return *helmReleaseSettings.RepositoryCache
}
if repositoryCache, exists := os.LookupEnv("PULUMI_K8S_HELM_REPOSITORY_CACHE"); exists {
return repositoryCache
}
return helmpath.CachePath("repository")
}
k.helmRepositoryCache = helmRepositoryCache()
// Rather than erroring out on an invalid k8s config, mark the cluster as unreachable and conditionally bail out on
// operations that require a valid cluster. This will allow us to perform invoke operations using the default
// provider.
unreachableCluster := func(err error) {
k.clusterUnreachable = true
k.clusterUnreachableReason = fmt.Sprintf(
"failed to parse kubeconfig data in `kubernetes:config:kubeconfig`- %v", err)
}
var kubeconfig clientcmd.ClientConfig
var apiConfig *clientapi.Config
homeDir := func() string {
// Ignore errors. The filepath will be checked later, so we can handle failures there.
usr, _ := user.Current()
return usr.HomeDir
}
if pathOrContents, ok := vars["kubernetes:config:kubeconfig"]; ok {
var contents string
// Handle the '~' character if it is set in the config string. Normally, this would be expanded by the shell
// into the user's home directory, but we have to do that manually if it is set in a config value.
if pathOrContents == "~" {
// In case of "~", which won't be caught by the "else if"
pathOrContents = homeDir()
} else if strings.HasPrefix(pathOrContents, "~/") {
pathOrContents = filepath.Join(homeDir(), pathOrContents[2:])
}
// If the variable is a valid filepath, load the file and parse the contents as a k8s config.
_, err := os.Stat(pathOrContents)
if err == nil {
b, err := ioutil.ReadFile(pathOrContents)
if err != nil {
unreachableCluster(err)
} else {
contents = string(b)
}
} else { // Assume the contents are a k8s config.
contents = pathOrContents
}
// Load the contents of the k8s config.
apiConfig, err = clientcmd.Load([]byte(contents))
if err != nil {
unreachableCluster(err)
} else {
kubeconfig = clientcmd.NewDefaultClientConfig(*apiConfig, overrides)
configurationNamespace, _, err := kubeconfig.Namespace()
if err == nil {
k.defaultNamespace = configurationNamespace
}
}
} else {
// Use client-go to resolve the final configuration values for the client. Typically, these
// values would reside in the $KUBECONFIG file, but can also be altered in several
// places, including in env variables, client-go default values, and (if we allowed it) CLI
// flags.
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
kubeconfig = clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, overrides, os.Stdin)
}
var kubeClientSettings KubeClientSettings
if obj, ok := vars["kubernetes:config:kubeClientSettings"]; ok {
err := json.Unmarshal([]byte(obj), &kubeClientSettings)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal kubeClientSettings option: %w", err)
}
}
// TODO: Once https://github.com/pulumi/pulumi/issues/8132 is fixed, we can drop the env var handling logic.
if burst := os.Getenv("PULUMI_K8S_CLIENT_BURST"); burst != "" && kubeClientSettings.Burst == nil {
asInt, err := strconv.Atoi(burst)
if err != nil {
return nil, fmt.Errorf("invalid value specified for PULUMI_K8S_CLIENT_BURST: %w", err)
}
kubeClientSettings.Burst = &asInt
}
if qps := os.Getenv("PULUMI_K8S_CLIENT_QPS"); qps != "" && kubeClientSettings.QPS == nil {
asFloat, err := strconv.ParseFloat(qps, 64)
if err != nil {
return nil, fmt.Errorf("invalid value specified for PULUMI_K8S_CLIENT_QPS: %w", err)
}
kubeClientSettings.QPS = &asFloat
}
// Attempt to load the configuration from the provided kubeconfig. If this fails, mark the cluster as unreachable.
if !k.clusterUnreachable {
config, err := kubeconfig.ClientConfig()
if err != nil {
k.clusterUnreachable = true
k.clusterUnreachableReason = fmt.Sprintf(
"unable to load Kubernetes client configuration from kubeconfig file: %v", err)
} else {
if kubeClientSettings.Burst != nil {
config.Burst = *kubeClientSettings.Burst
logger.V(9).Infof("kube client burst set to %v", config.Burst)
}
if kubeClientSettings.QPS != nil {
config.QPS = float32(*kubeClientSettings.QPS)
logger.V(9).Infof("kube client QPS set to %v", config.QPS)
}
warningConfig := rest.CopyConfig(config)
warningConfig.WarningHandler = rest.NoWarnings{}
k.config = warningConfig
k.kubeconfig = kubeconfig
namespace := "default"
if k.defaultNamespace != "" {
namespace = k.defaultNamespace
}
k.helmReleaseProvider, err = newHelmReleaseProvider(
k.host,
apiConfig,
overrides,
k.config,
k.helmDriver,
namespace,
k.enableSecrets,
k.helmPluginsPath,
k.helmRegistryConfigPath,
k.helmRepositoryConfigPath,
k.helmRepositoryCache)
if err != nil {
return nil, err
}
}
}
// These operations require a reachable cluster.
if !k.clusterUnreachable {
cs, err := clients.NewDynamicClientSet(k.config)
if err != nil {
return nil, err
}
k.clientSet = cs
k.dryRunVerifier = k8sresource.NewDryRunVerifier(cs.GenericClient, cs.DiscoveryClientCached)
lc, err := clients.NewLogClient(k.config)
if err != nil {
return nil, err
}
k.logClient = lc
k.k8sVersion = cluster.TryGetServerVersion(cs.DiscoveryClientCached)
if _, err = k.getResources(); err != nil {
k.clusterUnreachable = true
k.clusterUnreachableReason = fmt.Sprintf(
"unable to load schema information from the API server: %v", err)
}
}
return &pulumirpc.ConfigureResponse{
AcceptSecrets: true,
SupportsPreview: true,
}, nil
}
// Invoke dynamically executes a built-in function in the provider.
func (k *kubeProvider) Invoke(ctx context.Context,
req *pulumirpc.InvokeRequest) (*pulumirpc.InvokeResponse, error) {
// Important: Some invoke logic is intended to run during preview, and the Kubernetes provider
// inputs may not have resolved yet. Any invoke logic that depends on an active cluster must check
// k.clusterUnreachable and handle that condition appropriately.
tok := req.GetTok()
label := fmt.Sprintf("%s.Invoke(%s)", k.label(), tok)
args, err := plugin.UnmarshalProperties(
req.GetArgs(), plugin.MarshalOptions{Label: label, KeepUnknowns: true})
if err != nil {
return nil, pkgerrors.Wrapf(err, "failed to unmarshal %v args during an Invoke call", tok)
}
switch tok {
case invokeDecodeYaml:
var text, defaultNamespace string
if textArg := args["text"]; textArg.HasValue() && textArg.IsString() {
text = textArg.StringValue()
} else {
return nil, pkgerrors.New("missing required field 'text' of type string")
}
if defaultNsArg := args["defaultNamespace"]; defaultNsArg.HasValue() && defaultNsArg.IsString() {
defaultNamespace = defaultNsArg.StringValue()
}
result, err := decodeYaml(text, defaultNamespace, k.clientSet)
if err != nil {
return nil, err
}
objProps, err := plugin.MarshalProperties(
resource.NewPropertyMapFromMap(map[string]interface{}{"result": result}),
plugin.MarshalOptions{
Label: label, KeepUnknowns: true, SkipNulls: true,
})
if err != nil {
return nil, err
}
return &pulumirpc.InvokeResponse{Return: objProps}, nil
case invokeHelmTemplate:
var jsonOpts string
if jsonOptsArgs := args["jsonOpts"]; jsonOptsArgs.HasValue() && jsonOptsArgs.IsString() {
jsonOpts = jsonOptsArgs.StringValue()
} else {
return nil, pkgerrors.New("missing required field 'jsonOpts' of type string")
}
var opts HelmChartOpts
err = json.Unmarshal([]byte(jsonOpts), &opts)
if err != nil {
return nil, pkgerrors.Wrap(err, "failed to unmarshal 'jsonOpts'")
}
text, err := helmTemplate(opts)
if err != nil {
return nil, pkgerrors.Wrap(err, "failed to generate YAML for specified Helm chart")
}
// Decode the generated YAML here to avoid an extra invoke in the client.
result, err := decodeYaml(text, opts.Namespace, k.clientSet)
if err != nil {
return nil, pkgerrors.Wrap(err, "failed to decode YAML for specified Helm chart")
}
objProps, err := plugin.MarshalProperties(
resource.NewPropertyMapFromMap(map[string]interface{}{"result": result}),
plugin.MarshalOptions{
Label: label, KeepUnknowns: true, SkipNulls: true,
})
if err != nil {
return nil, err
}
return &pulumirpc.InvokeResponse{Return: objProps}, nil
case invokeKustomize:
var directory string
if directoryArg := args["directory"]; directoryArg.HasValue() && directoryArg.IsString() {
directory = directoryArg.StringValue()
} else {
return nil, pkgerrors.New("missing required field 'directory' of type string")
}
result, err := kustomizeDirectory(directory, k.clientSet)
if err != nil {
return nil, err
}
objProps, err := plugin.MarshalProperties(
resource.NewPropertyMapFromMap(map[string]interface{}{"result": result}),
plugin.MarshalOptions{
Label: label, KeepUnknowns: true, SkipNulls: true,
})
if err != nil {
return nil, err
}
return &pulumirpc.InvokeResponse{Return: objProps}, nil
default:
return nil, fmt.Errorf("unknown Invoke type %q", tok)
}
}
// StreamInvoke dynamically executes a built-in function in the provider. The result is streamed
// back as a series of messages.
func (k *kubeProvider) StreamInvoke(
req *pulumirpc.InvokeRequest, server pulumirpc.ResourceProvider_StreamInvokeServer) error {
// Important: Some invoke logic is intended to run during preview, and the Kubernetes provider
// inputs may not have resolved yet. Any invoke logic that depends on an active cluster must check
// k.clusterUnreachable and handle that condition appropriately.
// Unmarshal arguments.
tok := req.GetTok()
label := fmt.Sprintf("%s.StreamInvoke(%s)", k.label(), tok)
args, err := plugin.UnmarshalProperties(
req.GetArgs(), plugin.MarshalOptions{Label: label, KeepUnknowns: true})
if err != nil {
return pkgerrors.Wrapf(err, "failed to unmarshal %v args during an StreamInvoke call", tok)
}
switch tok {
case streamInvokeList:
//
// Request a list of all resources of some type, in some number of namespaces.
//
// DESIGN NOTES: `list` must be a `StreamInvoke` instead of an `Invoke` to avoid the gRPC
// message size limit. Unlike `watch`, which will continue until the user cancels the
// request, `list` is guaranteed to terminate after all the resources are listed. The role
// of the SDK implementations of `list` is thus to wait for the stream to terminate,
// aggregate the resources into a list, and return to the user.
//
// We send the resources asynchronously. This requires an "event loop" (below), which
// continuously attempts to send the resource, checking for cancellation on each send. This
// allows for the theoretical possibility that the gRPC client cancels the `list` operation
// prior to completion. The SDKs implementing `list` will very probably never expose a
// `cancel` handler in the way that `watch` does; `watch` requires it because a watcher is
// expected to never terminate, and users of the various SDKs need a way to tell the
// provider to stop streaming and reclaim the resources associated with the stream.
//
// Still, we implement this cancellation also for `list`, primarily for completeness. We'd
// like to avoid an unpleasant and non-actionable error that would appear on a `Send` on a
// client that is no longer accepting requests. This also helps to guard against the
// possibility that some dark corner of gRPC signals cancellation by accident, e.g., during
// shutdown.
//
if k.clusterUnreachable {
return fmt.Errorf("configured Kubernetes cluster is unreachable: %s", k.clusterUnreachableReason)
}
namespace := ""
if args["namespace"].HasValue() {
namespace = args["namespace"].StringValue()
}
if !args["group"].HasValue() || !args["version"].HasValue() || !args["kind"].HasValue() {
return fmt.Errorf(
"list requires a group, version, and kind that uniquely specify the resource type")
}
cl, err := k.clientSet.ResourceClient(schema.GroupVersionKind{
Group: args["group"].StringValue(),
Version: args["version"].StringValue(),
Kind: args["kind"].StringValue(),
}, namespace)
if err != nil {
return err
}
list, err := cl.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
//
// List resources. Send them one-by-one, asynchronously, to the client requesting them.
//
objects := make(chan map[string]interface{})
defer close(objects)
done := make(chan struct{})
defer close(done)
go func() {
for _, o := range list.Items {
objects <- o.Object
}
done <- struct{}{}
}()
for {
select {
case <-k.canceler.context.Done():
//
// `kubeProvider#Cancel` was called. Terminate the `StreamInvoke` RPC, free all
// resources, and exit without error.
//
return nil
case <-done:
//
// Success. Return.
//
return nil
case o := <-objects:
//
// Publish resource from the list back to user.
//
resp, err := plugin.MarshalProperties(
resource.NewPropertyMapFromMap(o),
plugin.MarshalOptions{})
if err != nil {
return err
}
err = server.Send(&pulumirpc.InvokeResponse{Return: resp})
if err != nil {
return err
}
case <-server.Context().Done():
//
// gRPC stream was cancelled from the client that issued the `StreamInvoke` request
// to us. In this case, we terminate the `StreamInvoke` RPC, free all resources, and
// exit without error.
//
// This is required for `watch`, but is implemented in `list` for completeness.
// Users calling `watch` from one of the SDKs need to be able to cancel a `watch`
// and signal to the provider that it's ok to reclaim the resources associated with
// a `watch`. In `list` it's to prevent the user from getting weird errors if a
// client somehow cancels the streaming request and they subsequently send a message
// anyway.
//
return nil
}
}
case streamInvokeWatch:
//
// Set up resource watcher.
//
if k.clusterUnreachable {
return fmt.Errorf("configured Kubernetes cluster is unreachable: %s", k.clusterUnreachableReason)
}
namespace := ""
if args["namespace"].HasValue() {
namespace = args["namespace"].StringValue()
}
if !args["group"].HasValue() || !args["version"].HasValue() || !args["kind"].HasValue() {
return fmt.Errorf(
"watch requires a group, version, and kind that uniquely specify the resource type")
}
cl, err := k.clientSet.ResourceClient(schema.GroupVersionKind{
Group: args["group"].StringValue(),
Version: args["version"].StringValue(),
Kind: args["kind"].StringValue(),
}, namespace)
if err != nil {
return err
}
watch, err := cl.Watch(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
//
// Watch for resource updates, and stream them back to the caller.
//
for {
select {
case <-k.canceler.context.Done():
//
// `kubeProvider#Cancel` was called. Terminate the `StreamInvoke` RPC, free all
// resources, and exit without error.
//
watch.Stop()
return nil
case event := <-watch.ResultChan():
//
// Kubernetes resource was updated. Publish resource update back to user.
//
resp, err := plugin.MarshalProperties(
resource.NewPropertyMapFromMap(
map[string]interface{}{
"type": event.Type,
"object": event.Object.(*unstructured.Unstructured).Object,
}),
plugin.MarshalOptions{})
if err != nil {
return err
}
err = server.Send(&pulumirpc.InvokeResponse{Return: resp})
if err != nil {
return err
}
case <-server.Context().Done():
//
// gRPC stream was cancelled from the client that issued the `StreamInvoke` request
// to us. In this case, we terminate the `StreamInvoke` RPC, free all resources, and
// exit without error.
//
// Usually, this happens in the language provider, e.g., in the call to `cancel`
// below.
//
// const deployments = await streamInvoke("kubernetes:kubernetes:watch", {
// group: "apps", version: "v1", kind: "Deployment",
// });
// deployments.cancel();
//
watch.Stop()
return nil
}
}
case streamInvokePodLogs:
//
// Set up log stream for Pod.
//
if k.clusterUnreachable {
return fmt.Errorf("configured Kubernetes cluster is unreachable: %s", k.clusterUnreachableReason)
}
namespace := "default"
if args["namespace"].HasValue() {
namespace = args["namespace"].StringValue()
}
if !args["name"].HasValue() {
return fmt.Errorf(
"could not retrieve pod logs because the pod name was not present")
}
name := args["name"].StringValue()
podLogs, err := k.logClient.Logs(namespace, name)
if err != nil {
return err
}
defer podLogs.Close()
//
// Enumerate logs by line. Send back to the user.
//
// TODO: We send the logs back one-by-one, but we should probably batch them instead.
//
logLines := make(chan string)
defer close(logLines)
done := make(chan error)
defer close(done)
go func() {
podLogLines := bufio.NewScanner(podLogs)
for podLogLines.Scan() {
logLines <- podLogLines.Text()
}
if err := podLogLines.Err(); err != nil {
done <- err
} else {
done <- nil
}
}()
for {
select {
case <-k.canceler.context.Done():
//
// `kubeProvider#Cancel` was called. Terminate the `StreamInvoke` RPC, free all
// resources, and exit without error.
//
return nil
case err := <-done:
//
// Complete. Return the error if applicable.
//
return err
case line := <-logLines:
//
// Publish log line back to user.
//
resp, err := plugin.MarshalProperties(
resource.NewPropertyMapFromMap(
map[string]interface{}{"lines": []string{line}}),
plugin.MarshalOptions{})
if err != nil {
return err
}
err = server.Send(&pulumirpc.InvokeResponse{Return: resp})
if err != nil {
return err
}
case <-server.Context().Done():
//
// gRPC stream was cancelled from the client that issued the `StreamInvoke` request
// to us. In this case, we terminate the `StreamInvoke` RPC, free all resources, and
// exit without error.
//
// Usually, this happens in the language provider, e.g., in the call to `cancel`
// below.
//
// const podLogLines = await streamInvoke("kubernetes:kubernetes:podLogs", {
// namespace: "default", name: "nginx-f94d8bc55-xftvs",
// });
// podLogLines.cancel();
//
return nil
}
}
default:
return fmt.Errorf("unknown Invoke type '%s'", tok)
}
}
// Check validates that the given property bag is valid for a resource of the given type and returns
// the inputs that should be passed to successive calls to Diff, Create, or Update for this
// resource. As a rule, the provider inputs returned by a call to Check should preserve the original
// representation of the properties as present in the program inputs. Though this rule is not
// required for correctness, violations thereof can negatively impact the end-user experience, as
// the provider inputs are using for detecting and rendering diffs.
func (k *kubeProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) (*pulumirpc.CheckResponse, error) {
//
// Behavior as of v0.12.x: We take two inputs:
//
// 1. req.News, the new resource inputs, i.e., the property bag coming from a custom resource like
// k8s.core.v1.Service
// 2. req.Olds, the last version submitted from a custom resource.
//
// `req.Olds` are ignored (and are sometimes nil). `req.News` are validated, and `.metadata.name`
// is given to it if it's not already provided.
//
urn := resource.URN(req.GetUrn())
// Utilities for determining whether a resource's GVK exists.
gvkExists := func(gvk schema.GroupVersionKind) bool {
knownGVKs := sets.NewString()
if knownGVKs.Has(gvk.String()) {
return true
}
gv := gvk.GroupVersion()
rls, err := k.clientSet.DiscoveryClientCached.ServerResourcesForGroupVersion(gv.String())
if err != nil {
if !errors.IsNotFound(err) {
logger.V(3).Infof("ServerResourcesForGroupVersion(%q) returned unexpected error %v", gv, err)
}
return false
}
for _, rl := range rls.APIResources {
knownGVKs.Insert(gv.WithKind(rl.Kind).String())
}
return knownGVKs.Has(gvk.String())
}
label := fmt.Sprintf("%s.Check(%s)", k.label(), urn)
logger.V(9).Infof("%s executing", label)
// Obtain old resource inputs. This is the old version of the resource(s) supplied by the user as
// an update.
oldResInputs := req.GetOlds()
olds, err := plugin.UnmarshalProperties(oldResInputs, plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), KeepUnknowns: true, SkipNulls: true, KeepSecrets: true,
})
if err != nil {
return nil, err
}
// Obtain new resource inputs. This is the new version of the resource(s) supplied by the user as
// an update.
newResInputs := req.GetNews()
news, err := plugin.UnmarshalProperties(newResInputs, plugin.MarshalOptions{
Label: fmt.Sprintf("%s.news", label),
KeepUnknowns: true,
SkipNulls: true,
KeepSecrets: true,
})
if err != nil {
return nil, pkgerrors.Wrapf(err, "check failed because malformed resource inputs: %+v", err)
}
oldInputs := propMapToUnstructured(olds)
newInputs := propMapToUnstructured(news)
var failures []*pulumirpc.CheckFailure
k.helmHookWarning(ctx, newInputs, urn)
annotatedInputs, err := legacyInitialAPIVersion(oldInputs, newInputs)
if err != nil {
return nil, pkgerrors.Wrapf(
err, "Failed to create resource %s/%s because of an error generating the %s value in "+
"`.metadata.annotations`",
newInputs.GetNamespace(), newInputs.GetName(), metadata.AnnotationInitialAPIVersion)
}
newInputs = annotatedInputs
if isHelmRelease(urn) && !hasComputedValue(newInputs) {
if !k.clusterUnreachable {
return k.helmReleaseProvider.Check(ctx, req)
}
return nil, fmt.Errorf("can't use Helm Release with unreachable cluster. Reason: %q", k.clusterUnreachableReason)
}
// Adopt name from old object if appropriate.
//
// If the user HAS NOT assigned a name in the new inputs, we autoname it and mark the object as
// autonamed in `.metadata.annotations`. This makes it easier for `Diff` to decide whether this
// needs to be `DeleteBeforeReplace`'d. If the resource is marked `DeleteBeforeReplace`, then
// `Create` will allocate it a new name later.
if len(oldInputs.Object) > 0 {
// NOTE: If old inputs exist, they have a name, either provided by the user or filled in with a
// previous run of `Check`.
contract.Assert(oldInputs.GetName() != "")
metadata.AdoptOldAutonameIfUnnamed(newInputs, oldInputs)
// If this resource does not have a "managed-by: pulumi" label in its inputs, it is likely we are importing
// a resource that was created out-of-band. In this case, we do not add the `managed-by` label here, as doing
// so would result in a persistent failure to import due to a diff that the user cannot correct.
if metadata.HasManagedByLabel(oldInputs) {
_, err = metadata.TrySetManagedByLabel(newInputs)
if err != nil {
return nil, pkgerrors.Wrapf(err,
"Failed to create object because of a problem setting managed-by labels")
}
}
} else {
metadata.AssignNameIfAutonamable(newInputs, news, urn.Name())
// Set a "managed-by: pulumi" label on all created k8s resources.
_, err = metadata.TrySetManagedByLabel(newInputs)
if err != nil {
return nil, pkgerrors.Wrapf(err,
"Failed to create object because of a problem setting managed-by labels")
}
}
gvk, err := k.gvkFromURN(urn)
if err != nil {
return nil, err
}
// Skip the API version check if the cluster is unreachable.
if !k.clusterUnreachable {
if removed, version := kinds.RemovedAPIVersion(gvk, k.k8sVersion); removed {
_ = k.host.Log(ctx, diag.Warning, urn, (&kinds.RemovedAPIError{GVK: gvk, Version: version}).Error())
} else if !k.suppressDeprecationWarnings && kinds.DeprecatedAPIVersion(gvk, &k.k8sVersion) {
_ = k.host.Log(ctx, diag.Warning, urn, gen.APIVersionComment(gvk))
}
}
// If a default namespace is set on the provider for this resource, check if the resource has Namespaced
// or Global scope. For namespaced resources, set the namespace to the default value if unset.
if k.defaultNamespace != "" && len(newInputs.GetNamespace()) == 0 {
namespacedKind, err := clients.IsNamespacedKind(gvk, k.clientSet)
if err != nil {
if clients.IsNoNamespaceInfoErr(err) {
// This is probably a CustomResource without a registered CustomResourceDefinition.
// Since we can't tell for sure at this point, assume it is namespaced, and correct if
// required during the Create step.
namespacedKind = true
} else {
return nil, err
}
}
if namespacedKind {
newInputs.SetNamespace(k.defaultNamespace)
}
}
// HACK: Do not validate against OpenAPI spec if there is a computed value. The OpenAPI spec
// does not know how to deal with the placeholder values for computed values.
if !hasComputedValue(newInputs) && !k.clusterUnreachable {
resources, err := k.getResources()
if err != nil {
return nil, pkgerrors.Wrapf(err, "Failed to fetch OpenAPI schema from the API server")
}
// Validate the object according to the OpenAPI schema for its GVK.
err = openapi.ValidateAgainstSchema(resources, newInputs)
if err != nil {
resourceNotFound := errors.IsNotFound(err) ||
strings.Contains(err.Error(), "is not supported by the server")
k8sAPIUnreachable := strings.Contains(err.Error(), "connection refused")
if resourceNotFound && gvkExists(gvk) {
failures = append(failures, &pulumirpc.CheckFailure{
Reason: fmt.Sprintf(" Found API Group, but it did not contain a schema for %q", gvk),
})
} else if k8sAPIUnreachable {
k8sURL := ""
if err, ok := err.(*url.Error); ok {
k8sURL = fmt.Sprintf("at %q", err.URL)
}
failures = append(failures, &pulumirpc.CheckFailure{
Reason: fmt.Sprintf(" Kubernetes API server %s is unreachable. It's "+
"possible that the URL or authentication information in your "+
"kubeconfig is incorrect: %v", k8sURL, err),
})
} else if k.opts.rejectUnknownResources {
// If the schema doesn't exist, it could still be a CRD (which may not have a
// schema). Thus, if we are directed to check resources even if they have unknown
// types, we fail here.
return nil, pkgerrors.Wrapf(err, "unable to fetch schema for resource type %s/%s",
newInputs.GetAPIVersion(), newInputs.GetKind())
}
}
}
checkedInputs := resource.NewPropertyMapFromMap(newInputs.Object)
annotateSecrets(checkedInputs, news)
autonamedInputs, err := plugin.MarshalProperties(checkedInputs, plugin.MarshalOptions{
Label: fmt.Sprintf("%s.autonamedInputs", label),
KeepUnknowns: true,
SkipNulls: true,
KeepSecrets: k.enableSecrets,
})
if err != nil {
return nil, err
}
if k.yamlRenderMode {
if checkedInputs.ContainsSecrets() {
_ = k.host.Log(ctx, diag.Warning, urn, "rendered YAML will contain a secret value in plaintext")
}
}
// Return new, possibly-autonamed inputs.
return &pulumirpc.CheckResponse{Inputs: autonamedInputs, Failures: failures}, nil
}
// helmHookWarning logs a warning if a Chart contains unsupported hooks. The warning can be disabled by setting
// the suppressHelmHookWarnings provider flag or related ENV var.
func (k *kubeProvider) helmHookWarning(ctx context.Context, newInputs *unstructured.Unstructured, urn resource.URN) {
hasHelmHook := false
for key, value := range newInputs.GetAnnotations() {
// If annotations with a reserved internal prefix exist, ignore them.
if metadata.IsInternalAnnotation(key) {
_ = k.host.Log(ctx, diag.Warning, urn,
fmt.Sprintf("ignoring user-specified value for internal annotation %q", key))
}
// If the Helm hook annotation is found, set the hasHelmHook flag.
if has := metadata.IsHelmHookAnnotation(key); has {
// Test hooks are handled, so ignore this one.
if match, _ := regexp.MatchString(`test|test-success|test-failure`, value); !match {
hasHelmHook = hasHelmHook || has
}
}
}
if hasHelmHook && !k.suppressHelmHookWarnings {
_ = k.host.Log(ctx, diag.Warning, urn,
"This resource contains Helm hooks that are not currently supported by Pulumi. The resource will "+
"be created, but any hooks will not be executed. Hooks support is tracked at "+
"https://github.com/pulumi/pulumi-kubernetes/issues/555 -- This warning can be disabled by setting "+
"the PULUMI_K8S_SUPPRESS_HELM_HOOK_WARNINGS environment variable")
}
}
// Diff checks what impacts a hypothetical update will have on the resource's properties.
func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*pulumirpc.DiffResponse, error) {
//
// Behavior as of v0.12.x: We take 2 inputs:
//
// 1. req.News, the new resource inputs, i.e., the property bag coming from a custom resource like
// k8s.core.v1.Service
// 2. req.Olds, the old _state_ returned by a `Create` or an `Update`. The old state has the form
// {inputs: {...}, live: {...}}, and is a struct that contains the old inputs as well as the
// last computed value obtained from the Kubernetes API server.
//
// The list of properties that would cause replacement is then computed between the old and new
// _inputs_, as in Kubernetes this captures changes the user made that result in replacement
// (which is not true of the old computed values).
//
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Diff(%s)", k.label(), urn)
logger.V(9).Infof("%s executing", label)
// Get old state. This is an object of the form {inputs: {...}, live: {...}} where `inputs` is the
// previous resource inputs supplied by the user, and `live` is the computed state of that inputs
// we received back from the API server.
oldState, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), KeepUnknowns: true, SkipNulls: true, KeepSecrets: true,
})
if err != nil {
return nil, err
}
// Get new resource inputs. The user is submitting these as an update.
newResInputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.news", label),
KeepUnknowns: true,
SkipNulls: true,
KeepSecrets: true,
})
if err != nil {
return nil, pkgerrors.Wrapf(err, "diff failed because malformed resource inputs")
}
newInputs := propMapToUnstructured(newResInputs)
oldInputs, _ := parseCheckpointObject(oldState)
gvk, err := k.gvkFromURN(urn)
if err != nil {
return nil, err
}
if isHelmRelease(urn) && !hasComputedValue(newInputs) {
if !k.clusterUnreachable {
return k.helmReleaseProvider.Diff(ctx, req)
}
return nil, fmt.Errorf("can't use Helm Release with unreachable cluster. Reason: %q", k.clusterUnreachableReason)
}
namespacedKind, err := clients.IsNamespacedKind(gvk, k.clientSet)
if err != nil {
if clients.IsNoNamespaceInfoErr(err) {
// This is probably a CustomResource without a registered CustomResourceDefinition.
// Since we can't tell for sure at this point, assume it is namespaced, and correct if
// required during the Create step.
namespacedKind = true
} else {
return nil, pkgerrors.Wrapf(err,
"API server returned error when asked if resource type %s is namespaced", gvk)
}
}
if namespacedKind {
// Explicitly set the "default" namespace if unset so that the diff ignores it.
oldInputs.SetNamespace(canonicalNamespace(oldInputs.GetNamespace()))
newInputs.SetNamespace(canonicalNamespace(newInputs.GetNamespace()))
} else {
// Clear the namespace if it was set erroneously.
oldInputs.SetNamespace("")
newInputs.SetNamespace("")
}
if oldInputs.GroupVersionKind().Empty() {
oldInputs.SetGroupVersionKind(gvk)
}
var patch []byte
var patchBase map[string]interface{}
// Always compute a client-side patch.
patch, err = k.inputPatch(oldInputs, newInputs)
if err != nil {
return nil, pkgerrors.Wrapf(
err, "Failed to check for changes in resource %s/%s", newInputs.GetNamespace(), newInputs.GetName())
}
patchBase = oldInputs.Object
patchObj := map[string]interface{}{}
if err = json.Unmarshal(patch, &patchObj); err != nil {
return nil, pkgerrors.Wrapf(
err, "Failed to check for changes in resource %s/%s because of an error serializing "+
"the JSON patch describing resource changes",
newInputs.GetNamespace(), newInputs.GetName())
}
// Try to compute a server-side patch.
ssPatch, ssPatchBase, ssPatchOk := k.tryServerSidePatch(oldInputs, newInputs, gvk)
// If the server-side patch succeeded, then merge that patch into the client-side patch and override any conflicts
// with the server-side values.
if ssPatchOk {
logger.V(1).Infof("calculated diffs for %s/%s using dry-run and inputs", newInputs.GetNamespace(), newInputs.GetName())
err = mergo.Merge(&patchBase, ssPatchBase, mergo.WithOverride)
if err != nil {
return nil, err
}
ssPatchObj := map[string]interface{}{}
if err = json.Unmarshal(ssPatch, &ssPatchObj); err != nil {
return nil, pkgerrors.Wrapf(
err, "Failed to check for changes in resource %s/%s because of an error serializing "+
"the JSON patch describing resource changes",
newInputs.GetNamespace(), newInputs.GetName())
}
err = mergo.Merge(&patchObj, ssPatchObj, mergo.WithOverride)
if err != nil {
return nil, err
}
} else {
logger.V(1).Infof("calculated diffs for %s/%s using inputs only", newInputs.GetNamespace(), newInputs.GetName())
}
// Pack up PB, ship response back.
hasChanges := pulumirpc.DiffResponse_DIFF_NONE
var changes, replaces []string
var detailedDiff map[string]*pulumirpc.PropertyDiff
if len(patchObj) != 0 {
hasChanges = pulumirpc.DiffResponse_DIFF_SOME
for k := range patchObj {
changes = append(changes, k)
}
forceNewFields := forceNewProperties(gvk)
if detailedDiff, err = convertPatchToDiff(patchObj, patchBase, newInputs.Object, oldInputs.Object, forceNewFields...); err != nil {
return nil, pkgerrors.Wrapf(
err, "Failed to check for changes in resource %s/%s because of an error "+
"converting JSON patch describing resource changes to a diff",
newInputs.GetNamespace(), newInputs.GetName())
}
for _, v := range detailedDiff {
v.InputDiff = true
}
for k, v := range detailedDiff {
switch v.Kind {
case pulumirpc.PropertyDiff_ADD_REPLACE, pulumirpc.PropertyDiff_DELETE_REPLACE, pulumirpc.PropertyDiff_UPDATE_REPLACE:
replaces = append(replaces, k)
}
}
}
if metadata.ReplaceUnready(newInputs) {
switch newInputs.GetKind() {
case "Job":
// Fetch current Job status and check point-in-time readiness. Errors are ignored.
if live, err := k.readLiveObject(newInputs); err == nil {
jobChecker := states.NewJobChecker()
job, err := clients.FromUnstructured(live)
if err == nil {
jobChecker.Update(job)
if !jobChecker.Ready() {
hasChanges = pulumirpc.DiffResponse_DIFF_SOME
replaces = append(replaces, `.metadata.annotations["pulumi.com/replaceUnready"]`)
}
}
}
default:
_ = k.host.Log(ctx, diag.Warning, urn, "replaceUnready annotation is not supported for this resource")
}
}
// Delete before replacement if we are forced to replace the old object, and the new version of
// that object MUST have the same name.
deleteBeforeReplace :=
// 1. We know resource must be replaced.
len(replaces) > 0 &&
// 2. Object is NOT autonamed (i.e., user manually named it, and therefore we can't
// auto-generate the name).
!metadata.IsAutonamed(newInputs) &&
// 3. The new, user-specified name is the same as the old name.
newInputs.GetName() == oldInputs.GetName() &&
// 4. The resource is being deployed to the same namespace (i.e., we aren't creating the
// object in a new namespace and then deleting the old one).
newInputs.GetNamespace() == oldInputs.GetNamespace()
return &pulumirpc.DiffResponse{
Changes: hasChanges,
Replaces: replaces,
Stables: []string{},
DeleteBeforeReplace: deleteBeforeReplace,
Diffs: changes,
DetailedDiff: detailedDiff,
HasDetailedDiff: true,
}, nil
}
// Create allocates a new instance of the provided resource and returns its unique ID afterwards.
// (The input ID must be blank.) If this call fails, the resource must not have been created (i.e.,
// it is "transactional").
func (k *kubeProvider) Create(
ctx context.Context, req *pulumirpc.CreateRequest,
) (*pulumirpc.CreateResponse, error) {
//
// Behavior as of v0.12.x: We take 1 input:
//
// 1. `req.Properties`, the new resource inputs submitted by the user, after having been returned
// by `Check`.
//
// This is used to create a new resource, and the computed values are returned. Importantly:
//
// * The return is formatted as a "checkpoint object", i.e., an object of the form
// {inputs: {...}, live: {...}}. This is important both for `Diff` and for `Update`. See
// comments in those methods for details.
//
urn := resource.URN(req.GetUrn())
if isHelmRelease(urn) && !req.GetPreview() {
if !k.clusterUnreachable {
return k.helmReleaseProvider.Create(ctx, req)
}
return nil, fmt.Errorf("can't create Helm Release with unreachable cluster. Reason: %q", k.clusterUnreachableReason)
}
label := fmt.Sprintf("%s.Create(%s)", k.label(), urn)
logger.V(9).Infof("%s executing", label)
// Except in the case of yamlRender mode, Create requires a connection to a k8s cluster, so bail out
// immediately if it is unreachable.
if !req.GetPreview() && k.clusterUnreachable && !k.yamlRenderMode {
return nil, fmt.Errorf("configured Kubernetes cluster is unreachable: %s", k.clusterUnreachableReason)
}
// Parse inputs
newResInputs, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.properties", label),
KeepUnknowns: true,
SkipNulls: true,
KeepSecrets: true,
})
if err != nil {
return nil, pkgerrors.Wrapf(err, "create failed because malformed resource inputs")
}
newInputs := propMapToUnstructured(newResInputs)
// If this is a preview and the input values contain unknowns, return them as-is. This is compatible with
// prior behavior implemented by the Pulumi engine. Similarly, if the server does not support server-side
// dry run, return the inputs as-is.
if req.GetPreview() &&
(hasComputedValue(newInputs) || !k.supportsDryRun(newInputs.GroupVersionKind())) {
logger.V(9).Infof("cannot preview Create(%v)", urn)
return &pulumirpc.CreateResponse{Id: "", Properties: req.GetProperties()}, nil
}
annotatedInputs, err := withLastAppliedConfig(newInputs)
if err != nil {
return nil, pkgerrors.Wrapf(
err, "Failed to create resource %s/%s because of an error generating the %s value in "+
"`.metadata.annotations`",
newInputs.GetNamespace(), newInputs.GetName(), lastAppliedConfigKey)
}
initialAPIVersion := newInputs.GetAPIVersion()
if k.yamlRenderMode {
if newResInputs.ContainsSecrets() {
_ = k.host.Log(ctx, diag.Warning, urn, fmt.Sprintf(
"rendered file %s contains a secret value in plaintext",
renderPathForResource(annotatedInputs, k.yamlDirectory)))
}
err := renderYaml(annotatedInputs, k.yamlDirectory)
if err != nil {
return nil, err
}
obj := checkpointObject(newInputs, annotatedInputs, newResInputs, initialAPIVersion)
inputsAndComputed, err := plugin.MarshalProperties(
obj, plugin.MarshalOptions{
Label: fmt.Sprintf("%s.inputsAndComputed", label),
KeepUnknowns: true,
SkipNulls: true,
KeepSecrets: k.enableSecrets,
})
if err != nil {
return nil, err
}
_ = k.host.LogStatus(ctx, diag.Info, urn, fmt.Sprintf(
"rendered %s", renderPathForResource(annotatedInputs, k.yamlDirectory)))
return &pulumirpc.CreateResponse{
Id: fqObjName(annotatedInputs), Properties: inputsAndComputed,
}, nil
}
resources, err := k.getResources()
if err != nil {
return nil, pkgerrors.Wrapf(err, "Failed to fetch OpenAPI schema from the API server")
}
config := await.CreateConfig{
ProviderConfig: await.ProviderConfig{
Context: k.canceler.context,
Host: k.host,
URN: urn,
InitialAPIVersion: initialAPIVersion,
ClusterVersion: &k.k8sVersion,
ClientSet: k.clientSet,
DedupLogger: logging.NewLogger(k.canceler.context, k.host, urn),
Resources: resources,
},
Inputs: annotatedInputs,
Timeout: req.Timeout,
DryRun: req.GetPreview(),
}
initialized, awaitErr := await.Creation(config)
if awaitErr != nil {
if req.GetPreview() {
failedPreview := false
_, isPreviewErr := awaitErr.(await.PreviewError)
if k.isDryRunDisabledError(err) || isPreviewErr {
failedPreview = true
}
if failedPreview {
logger.V(9).Infof("could not preview Create(%v): %v", urn, err)
return &pulumirpc.CreateResponse{Id: "", Properties: req.GetProperties()}, nil
}
}
if meta.IsNoMatchError(awaitErr) {
// If it's a "no match" error, this is probably a CustomResource with no corresponding
// CustomResourceDefinition. This usually happens if the CRD was not created, and we
// print a more useful error message in this case.
return nil, pkgerrors.Wrapf(
awaitErr, "creation of resource %s failed because the Kubernetes API server "+
"reported that the apiVersion for this resource does not exist. "+
"Verify that any required CRDs have been created", fqObjName(newInputs))
}
partialErr, isPartialErr := awaitErr.(await.PartialError)
if !isPartialErr {
// Object creation failed.
return nil, pkgerrors.Wrapf(
awaitErr,
"resource %s was not successfully created by the Kubernetes API server ", fqObjName(newInputs))
}
// Resource was created, but failed to become fully initialized.
initialized = partialErr.Object()
}
obj := checkpointObject(newInputs, initialized, newResInputs, initialAPIVersion)
inputsAndComputed, err := plugin.MarshalProperties(
obj, plugin.MarshalOptions{
Label: fmt.Sprintf("%s.inputsAndComputed", label),
KeepUnknowns: true,
SkipNulls: true,
KeepSecrets: k.enableSecrets,
})
if err != nil {
return nil, err
}
if awaitErr != nil {
// Resource was created but failed to initialize. Return live version of object so it can be
// checkpointed.
return nil, partialError(
fqObjName(initialized),
pkgerrors.Wrapf(
awaitErr, "resource %s was successfully created, but the Kubernetes API server "+
"reported that it failed to fully initialize or become live", fqObjName(newInputs)),
inputsAndComputed,
nil)
}
// Invalidate the client cache if this was a CRD. This will require subsequent CR creations to
// refresh the cache, at which point the CRD definition will be present, so that it doesn't fail
// with an `errors.IsNotFound`.
if clients.IsCRD(newInputs) {
k.clientSet.RESTMapper.Reset()
k.invalidateResources()
}
id := ""
if !req.GetPreview() {
id = fqObjName(initialized)
}
return &pulumirpc.CreateResponse{Id: id, Properties: inputsAndComputed}, nil
}
// Read the current live state associated with a resource. Enough state must be include in the
// inputs to uniquely identify the resource; this is typically just the resource ID, but may also
// include some properties.
func (k *kubeProvider) Read(ctx context.Context, req *pulumirpc.ReadRequest) (*pulumirpc.ReadResponse, error) {
//
// Behavior as of v0.12.x: We take 1 input:
//
// 1. `req.Properties`, the new resource inputs submitted by the user, after having been persisted
// (e.g., by `Create` or `Update`).
//
// We use this information to read the live version of a Kubernetes resource. This is sometimes
// then checkpointed (e.g., in the case of `refresh`). Specifically:
//
// * The return is formatted as a "checkpoint object", i.e., an object of the form
// {inputs: {...}, live: {...}}. This is important both for `Diff` and for `Update`. See
// comments in those methods for details.
//
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Read(%s)", k.label(), urn)
logger.V(9).Infof("%s executing", label)
// If the cluster is unreachable, return an error.
if k.clusterUnreachable {
_ = k.host.Log(ctx, diag.Warning, urn, fmt.Sprintf(
"configured Kubernetes cluster is unreachable: %s", k.clusterUnreachableReason))
return nil, fmt.Errorf("failed to read resource state due to unreachable cluster. " +
"If the cluster has been deleted, you can edit the pulumi state to remove this resource")
}
if isHelmRelease(urn) {
contract.Assertf(k.helmReleaseProvider != nil, "helmReleaseProvider not initialized.")
return k.helmReleaseProvider.Read(ctx, req)
}
// Obtain new properties, create a Kubernetes `unstructured.Unstructured` that we can pass to the
// validation routines.
oldState, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), KeepUnknowns: true, SkipNulls: true, KeepSecrets: true,
})
if err != nil {
return nil, err
}
oldInputsPM, err := plugin.UnmarshalProperties(req.GetInputs(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.oldInputs", label), KeepUnknowns: true, SkipNulls: true, KeepSecrets: true,
})
if err != nil {
return nil, err
}
oldInputs, oldLive := parseCheckpointObject(oldState)
if oldInputs.GroupVersionKind().Empty() {
if oldLive.GroupVersionKind().Empty() {
gvk, err := k.gvkFromURN(urn)
if err != nil {
return nil, err
}
oldInputs.SetGroupVersionKind(gvk)
} else {
oldInputs.SetGroupVersionKind(oldLive.GroupVersionKind())
}
}
namespace, name := parseFqName(req.GetId())
if name == "" {
return nil, fmt.Errorf(
"failed to read resource because of a failure to parse resource name from request ID: %s",
req.GetId())
}
if oldInputs.GetName() == "" {
oldInputs.SetName(name)
}
if oldInputs.GetNamespace() == "" {
oldInputs.SetNamespace(namespace)
}
initialAPIVersion, err := initialAPIVersion(oldState, oldInputs)
if err != nil {
return nil, err
}
if k.yamlRenderMode {
// Return a new "checkpoint object".
state, err := plugin.MarshalProperties(
checkpointObject(oldInputs, oldLive, oldState, initialAPIVersion), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.state", label),
KeepUnknowns: true,
SkipNulls: true,
KeepSecrets: k.enableSecrets,
})
if err != nil {
return nil, err
}
inputs, err := plugin.MarshalProperties(oldInputsPM, plugin.MarshalOptions{
Label: label + ".inputs", KeepUnknowns: true, SkipNulls: true, KeepSecrets: k.enableSecrets,
})
if err != nil {
return nil, err
}
return &pulumirpc.ReadResponse{Id: req.GetId(), Properties: state, Inputs: inputs}, nil
}
resources, err := k.getResources()
if err != nil {
return nil, pkgerrors.Wrapf(err, "Failed to fetch OpenAPI schema from the API server")
}
config := await.ReadConfig{
ProviderConfig: await.ProviderConfig{
Context: k.canceler.context,
Host: k.host,
URN: urn,
InitialAPIVersion: initialAPIVersion,
ClientSet: k.clientSet,
DedupLogger: logging.NewLogger(k.canceler.context, k.host, urn),
Resources: resources,
},
Inputs: oldInputs,
Name: name,
}
liveObj, readErr := await.Read(config)
if readErr != nil {
logger.V(3).Infof("%v", readErr)
if meta.IsNoMatchError(readErr) {
// If it's a "no match" error, this is probably a CustomResource with no corresponding
// CustomResourceDefinition. This usually happens if the CRD was deleted, and it's safe
// to consider the CR to be deleted as well in this case.
return deleteResponse, nil
}
statusErr, ok := readErr.(*errors.StatusError)
if ok && statusErr.ErrStatus.Code == 404 {
// If it's a 404 error, this resource was probably deleted.
return deleteResponse, nil
}
if partialErr, ok := readErr.(await.PartialError); ok {
liveObj = partialErr.Object()
}
// If `liveObj == nil` at this point, it means we've encountered an error that is neither a
// 404, nor an `await.PartialError`. For example, the master could be unreachable. We
// should fail in this case.
if liveObj == nil {
return nil, readErr
}
// If we get here, resource successfully registered with the API server, but failed to
// initialize.
}
// Attempt to parse the inputs for this object. If parsing was unsuccessful, retain the old inputs.
liveInputs := parseLiveInputs(liveObj, oldInputs)
// TODO(lblackstone): not sure why this is needed
id := fqObjName(liveObj)
if reqID := req.GetId(); len(reqID) > 0 {
id = reqID
}
// Return a new "checkpoint object".
state, err := plugin.MarshalProperties(
checkpointObject(liveInputs, liveObj, oldInputsPM, initialAPIVersion), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.state", label),
KeepUnknowns: true,
SkipNulls: true,
KeepSecrets: k.enableSecrets,
})
if err != nil {
return nil, err
}
liveInputsPM := resource.NewPropertyMapFromMap(liveInputs.Object)
annotateSecrets(liveInputsPM, oldInputsPM)
inputs, err := plugin.MarshalProperties(liveInputsPM, plugin.MarshalOptions{
Label: label + ".inputs", KeepUnknowns: true, SkipNulls: true, KeepSecrets: k.enableSecrets,
})
if err != nil {
return nil, err
}
if readErr != nil {
// Resource was created but failed to initialize. Return live version of object so it can be
// checkpointed.
logger.V(3).Infof("%v", partialError(id, readErr, state, inputs))
return nil, partialError(id, readErr, state, inputs)
}
return &pulumirpc.ReadResponse{Id: id, Properties: state, Inputs: inputs}, nil
}
// Update updates an existing resource with new values. Currently this client supports the
// Kubernetes-standard three-way JSON patch. See references here[1] and here[2].
//
// nolint
// [1]: https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment
// nolint
// [2]: https://kubernetes.io/docs/concepts/overview/object-management-kubectl/declarative-config/#how-apply-calculates-differences-and-merges-changes
func (k *kubeProvider) Update(
ctx context.Context, req *pulumirpc.UpdateRequest,
) (*pulumirpc.UpdateResponse, error) {
//
// Behavior as of v0.12.x: We take 2 inputs:
//
// 1. req.News, the new resource inputs, i.e., the property bag coming from a custom resource like
// k8s.core.v1.Service
// 2. req.Olds, the old _state_ returned by a `Create` or an `Update`. The old state has the form
// {inputs: {...}, live: {...}}, and is a struct that contains the old inputs as well as the
// last computed value obtained from the Kubernetes API server.
//
// Unlike other providers, the update is computed as a three way merge between: (1) the new
// inputs, (2) the computed state returned by the API server, and (3) the old inputs. This is the
// main reason why the old state is an object with both the old inputs and the live version of the
// object.
//
//
// TREAD CAREFULLY. The semantics of a Kubernetes update are subtle and you should proceed to
// change them only if you understand them deeply.
//
// Briefly: when a user updates an existing resource definition (e.g., by modifying YAML), the API
// server must decide how to apply the changes inside it, to the version of the resource that it
// has stored in etcd. In Kubernetes this decision is turns out to be quite complex. `kubectl`
// currently uses the three-way "strategic merge" and falls back to the three-way JSON merge. We
// currently support the second, but eventually we'll have to support the first, too.
//
// (NOTE: This comment is scoped to the question of how to patch an existing resource, rather than
// how to recognize when a resource needs to be re-created from scratch.)
//
// There are several reasons for this complexity:
//
// * It's important not to clobber fields set or default-set by the server (e.g., NodePort,
// namespace, service type, etc.), or by out-of-band tooling like admission controllers
// (which, e.g., might do something like add a sidecar to a container list).
// * For example, consider a scenario where a user renames a container. It is a reasonable
// expectation the old version of the container gets destroyed when the update is applied. And
// if the update strategy is set to three-way JSON merge patching, it is.
// * But, consider if their administrator has set up (say) the Istio admission controller, which
// embeds a sidecar container in pods submitted to the API. This container would not be present
// in the YAML file representing that pod, but when an update is applied by the user, they
// not want it to get destroyed. And, so, when the strategy is set to three-way strategic
// merge, the container is not destroyed. (With this strategy, fields can have "merge keys" as
// part of their schema, which tells the API server how to merge each particular field.)
//
// What's worse is, currently nearly all of this logic exists on the client rather than the
// server, though there is work moving forward to move this to the server.
//
// So the roadmap is:
//
// - [x] Implement `Update` using the three-way JSON merge strategy.
// - [x] Cause `Update` to default to the three-way JSON merge patch strategy. (This will require
// plumbing, because it expects nominal types representing the API schema, but the
// discovery client is completely dynamic.)
// - [ ] Support server-side apply, when it comes out.
//
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Update(%s)", k.label(), urn)
logger.V(9).Infof("%s executing", label)
// Except in the case of yamlRender mode, Update requires a connection to a k8s cluster, so bail out
// immediately if it is unreachable.
if !req.GetPreview() && k.clusterUnreachable && !k.yamlRenderMode {
return nil, fmt.Errorf("configured Kubernetes cluster is unreachable: %s", k.clusterUnreachableReason)
}
// Obtain old properties, create a Kubernetes `unstructured.Unstructured`.
oldState, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), KeepUnknowns: true, SkipNulls: true, KeepSecrets: true,
})
if err != nil {
return nil, err
}
// Obtain new properties, create a Kubernetes `unstructured.Unstructured`.
newResInputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.news", label),
KeepUnknowns: true,
SkipNulls: true,
KeepSecrets: true,
})
if err != nil {
return nil, pkgerrors.Wrapf(err, "update failed because malformed resource inputs")
}
newInputs := propMapToUnstructured(newResInputs)
// If this is a preview and the input values contain unknowns, return them as-is. This is compatible with
// prior behavior implemented by the Pulumi engine. Similarly, if the server does not support server-side
// dry run, return the inputs as-is.
if req.GetPreview() &&
(hasComputedValue(newInputs) || !k.supportsDryRun(newInputs.GroupVersionKind())) {
logger.V(9).Infof("cannot preview Update(%v)", urn)
return &pulumirpc.UpdateResponse{Properties: req.News}, nil
}
if isHelmRelease(urn) {
if k.clusterUnreachable {
return nil, fmt.Errorf("can't update Helm Release with unreachable cluster. Reason: %q", k.clusterUnreachableReason)
}
return k.helmReleaseProvider.Update(ctx, req)
}
// Ignore old state; we'll get it from Kubernetes later.
oldInputs, _ := parseCheckpointObject(oldState)
annotatedInputs, err := withLastAppliedConfig(newInputs)
if err != nil {
return nil, pkgerrors.Wrapf(
err, "Failed to update resource %s/%s because of an error generating the %s value in "+
"`.metadata.annotations`",
newInputs.GetNamespace(), newInputs.GetName(), lastAppliedConfigKey)
}
initialAPIVersion, err := initialAPIVersion(oldState, oldInputs)
if err != nil {
return nil, err
}
if k.yamlRenderMode {
if newResInputs.ContainsSecrets() {
_ = k.host.LogStatus(ctx, diag.Warning, urn, fmt.Sprintf(
"rendered file %s contains a secret value in plaintext",
renderPathForResource(annotatedInputs, k.yamlDirectory)))
}
err := renderYaml(annotatedInputs, k.yamlDirectory)
if err != nil {
return nil, err
}
obj := checkpointObject(newInputs, annotatedInputs, newResInputs, initialAPIVersion)
inputsAndComputed, err := plugin.MarshalProperties(
obj, plugin.MarshalOptions{
Label: fmt.Sprintf("%s.inputsAndComputed", label),
KeepUnknowns: true,
SkipNulls: true,
KeepSecrets: k.enableSecrets,
})
if err != nil {
return nil, err
}
_ = k.host.LogStatus(ctx, diag.Info, urn, fmt.Sprintf(
"rendered %s", renderPathForResource(annotatedInputs, k.yamlDirectory)))
return &pulumirpc.UpdateResponse{Properties: inputsAndComputed}, nil
}
resources, err := k.getResources()
if err != nil {
return nil, pkgerrors.Wrapf(err, "Failed to fetch OpenAPI schema from the API server")
}
config := await.UpdateConfig{
ProviderConfig: await.ProviderConfig{
Context: k.canceler.context,
Host: k.host,
URN: urn,
InitialAPIVersion: initialAPIVersion,
EnableReplaceCRD: k.enableReplaceCRD,
ClientSet: k.clientSet,
DedupLogger: logging.NewLogger(k.canceler.context, k.host, urn),
Resources: resources,
},
Previous: oldInputs,
Inputs: annotatedInputs,
Timeout: req.Timeout,
DryRun: req.GetPreview(),
}
// Apply update.
initialized, awaitErr := await.Update(config)
if awaitErr != nil {
if req.GetPreview() && k.isDryRunDisabledError(err) {
logger.V(9).Infof("could not preview Update(%v): %v", urn, err)
return &pulumirpc.UpdateResponse{Properties: req.News}, nil
}
if meta.IsNoMatchError(awaitErr) {
// If it's a "no match" error, this is probably a CustomResource with no corresponding
// CustomResourceDefinition. This usually happens if the CRD was not created, and we
// print a more useful error message in this case.
return nil, pkgerrors.Wrapf(
awaitErr, "update of resource %s failed because the Kubernetes API server "+
"reported that the apiVersion for this resource does not exist. "+
"Verify that any required CRDs have been created", fqObjName(newInputs))
}
var getErr error
initialized, getErr = k.readLiveObject(newInputs)
if getErr != nil {
// Object update/creation failed.
return nil, pkgerrors.Wrapf(
awaitErr, "update of resource %s failed because the Kubernetes API server "+
"reported that it failed to fully initialize or become live", fqObjName(newInputs))
}
// If we get here, resource successfully registered with the API server, but failed to
// initialize.
}
// Return a new "checkpoint object".
obj := checkpointObject(newInputs, initialized, newResInputs, initialAPIVersion)
inputsAndComputed, err := plugin.MarshalProperties(
obj, plugin.MarshalOptions{
Label: fmt.Sprintf("%s.inputsAndComputed", label),
KeepUnknowns: true,
SkipNulls: true,
KeepSecrets: k.enableSecrets,
})
if err != nil {
return nil, err
}
if awaitErr != nil {
// Resource was updated/created but failed to initialize. Return live version of object so it
// can be checkpointed.
return nil, partialError(
fqObjName(initialized),
pkgerrors.Wrapf(
awaitErr, "the Kubernetes API server reported that %q failed to fully initialize "+
"or become live", fqObjName(newInputs)),
inputsAndComputed,
nil)
}
return &pulumirpc.UpdateResponse{Properties: inputsAndComputed}, nil
}
// Delete tears down an existing resource with the given ID. If it fails, the resource is assumed
// to still exist.
func (k *kubeProvider) Delete(ctx context.Context, req *pulumirpc.DeleteRequest) (*pbempty.Empty, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Delete(%s)", k.label(), urn)
logger.V(9).Infof("%s executing", label)
// TODO(hausdorff): Propagate other options, like grace period through flags.
// Obtain new properties, create a Kubernetes `unstructured.Unstructured`.
oldState, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), KeepUnknowns: true, SkipNulls: true, KeepSecrets: true,
})
if err != nil {
return nil, err
}
if isHelmRelease(urn) {
if !k.clusterUnreachable {
return k.helmReleaseProvider.Delete(ctx, req)
}
return nil, fmt.Errorf("can't delete Helm Release with unreachable cluster. Reason: %q", k.clusterUnreachableReason)
}
_, current := parseCheckpointObject(oldState)
_, name := parseFqName(req.GetId())
if k.yamlRenderMode {
file := renderPathForResource(current, k.yamlDirectory)
err := os.Remove(file)
if err != nil {
// Most of the time, errors will be because the file was already deleted. In this case,
// the operation succeeds. It's also possible that deletion fails due to file permission if
// the user changed the directory out-of-band, so log the error to help debug this scenario.
logger.V(3).Infof("Failed to delete YAML file: %q - %v", file, err)
}
_ = k.host.LogStatus(ctx, diag.Info, urn, fmt.Sprintf("deleted %s", file))
return &pbempty.Empty{}, nil
}
if k.clusterUnreachable {
return nil, fmt.Errorf("configured Kubernetes cluster is unreachable: %s\n"+
"If the cluster has been deleted, you can edit the pulumi state to remove this resource",
k.clusterUnreachableReason)
}
initialAPIVersion, err := initialAPIVersion(oldState, &unstructured.Unstructured{})
if err != nil {
return nil, err
}
resources, err := k.getResources()
if err != nil {
return nil, pkgerrors.Wrapf(err, "Failed to fetch OpenAPI schema from the API server")
}
config := await.DeleteConfig{
ProviderConfig: await.ProviderConfig{
Context: k.canceler.context, // TODO: should this just be ctx from the args?
Host: k.host,
URN: urn,
InitialAPIVersion: initialAPIVersion,
ClientSet: k.clientSet,
DedupLogger: logging.NewLogger(k.canceler.context, k.host, urn),
Resources: resources,
},
Inputs: current,
Name: name,
Timeout: req.Timeout,
}
awaitErr := await.Deletion(config)
if awaitErr != nil {
if meta.IsNoMatchError(awaitErr) {
// If it's a "no match" error, this is probably a CustomResource with no corresponding
// CustomResourceDefinition. This usually happens if the CRD was deleted, and it's safe
// to consider the CR to be deleted as well in this case.
return &pbempty.Empty{}, nil
}
partialErr, isPartialErr := awaitErr.(await.PartialError)
if !isPartialErr {
// There was an error executing the delete operation. The resource is still present and tracked.
return nil, awaitErr
}
lastKnownState := partialErr.Object()
inputsAndComputed, err := plugin.MarshalProperties(
checkpointObject(current, lastKnownState, oldState, initialAPIVersion), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.inputsAndComputed", label),
KeepUnknowns: true,
SkipNulls: true,
KeepSecrets: k.enableSecrets,
})
if err != nil {
return nil, err
}
// Resource delete was issued, but failed to complete. Return live version of object so it can be
// checkpointed.
return nil, partialError(fqObjName(lastKnownState), awaitErr, inputsAndComputed, nil)
}
return &pbempty.Empty{}, nil
}
// GetPluginInfo returns generic information about this plugin, like its version.
func (k *kubeProvider) GetPluginInfo(context.Context, *pbempty.Empty) (*pulumirpc.PluginInfo, error) {
return &pulumirpc.PluginInfo{
Version: k.version,
}, nil
}
// Cancel signals the provider to gracefully shut down and abort any ongoing resource operations.
// Operations aborted in this way will return an error (e.g., `Update` and `Create` will either a
// creation error or an initialization error). Since Cancel is advisory and non-blocking, it is up
// to the host to decide how long to wait after Cancel is called before (e.g.)
// hard-closing any gRPC connection.
func (k *kubeProvider) Cancel(context.Context, *pbempty.Empty) (*pbempty.Empty, error) {
k.canceler.cancel()
return &pbempty.Empty{}, nil
}
// --------------------------------------------------------------------------
// Private helpers.
// --------------------------------------------------------------------------
func (k *kubeProvider) label() string {
return fmt.Sprintf("Provider[%s]", k.name)
}
func (k *kubeProvider) gvkFromURN(urn resource.URN) (schema.GroupVersionKind, error) {
if string(urn.Type().Package()) != k.providerPackage {
return schema.GroupVersionKind{}, fmt.Errorf("unrecognized resource type: %q for this provider",
urn.Type())
}
// Emit GVK.
kind := string(urn.Type().Name())
gv := strings.Split(string(urn.Type().Module().Name()), "/")
if len(gv) != 2 {
return schema.GroupVersionKind{},
fmt.Errorf("apiVersion does not have both a group and a version: %q", urn.Type().Module().Name())
}
group, version := gv[0], gv[1]
if group == "core" {
group = ""
}
return schema.GroupVersionKind{
Group: group,
Version: version,
Kind: kind,
}, nil
}
func (k *kubeProvider) readLiveObject(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
rc, err := k.clientSet.ResourceClientForObject(obj)
if err != nil {
return nil, err
}
// Get the "live" version of the last submitted object. This is necessary because the server may
// have populated some fields automatically, updated status fields, and so on.
return rc.Get(context.TODO(), obj.GetName(), metav1.GetOptions{})
}
func (k *kubeProvider) serverSidePatch(oldInputs, newInputs *unstructured.Unstructured,
) ([]byte, map[string]interface{}, error) {
client, err := k.clientSet.ResourceClient(oldInputs.GroupVersionKind(), oldInputs.GetNamespace())
if err != nil {
return nil, nil, err
}
liveObject, err := client.Get(context.TODO(), oldInputs.GetName(), metav1.GetOptions{})
if err != nil {
return nil, nil, err
}
liveInputs := parseLiveInputs(liveObject, oldInputs)
resources, err := k.getResources()
if err != nil {
return nil, nil, err
}
patch, patchType, _, err := openapi.PatchForResourceUpdate(resources, liveInputs, newInputs, liveObject)
if err != nil {
return nil, nil, err
}
// If the new resource does not exist, we need to dry-run a Create rather than a Patch.
var newObject *unstructured.Unstructured
_, err = client.Get(context.TODO(), newInputs.GetName(), metav1.GetOptions{})
switch {
case errors.IsNotFound(err):
newObject, err = client.Create(context.TODO(), newInputs, metav1.CreateOptions{
DryRun: []string{metav1.DryRunAll},
})
case newInputs.GetNamespace() != oldInputs.GetNamespace():
client, err := k.clientSet.ResourceClient(newInputs.GroupVersionKind(), newInputs.GetNamespace())
if err != nil {
return nil, nil, err
}
newObject, err = client.Create(context.TODO(), newInputs, metav1.CreateOptions{
DryRun: []string{metav1.DryRunAll},
})
if err != nil {
return nil, nil, err
}
case err == nil:
newObject, err = client.Patch(context.TODO(), newInputs.GetName(), patchType, patch, metav1.PatchOptions{
DryRun: []string{metav1.DryRunAll},
})
default:
return nil, nil, err
}
if err != nil {
return nil, nil, err
}
liveJSON, err := liveObject.MarshalJSON()
if err != nil {
return nil, nil, err
}
newJSON, err := newObject.MarshalJSON()
if err != nil {
return nil, nil, err
}
patch, err = jsonpatch.CreateMergePatch(liveJSON, newJSON)
if err != nil {
return nil, nil, err
}
return patch, liveObject.Object, nil
}
// inputPatch calculates a patch on the client-side by comparing old inputs to the current inputs.
func (k *kubeProvider) inputPatch(
oldInputs, newInputs *unstructured.Unstructured,
) ([]byte, error) {
oldInputsJSON, err := oldInputs.MarshalJSON()
if err != nil {
return nil, err
}
newInputsJSON, err := newInputs.MarshalJSON()
if err != nil {
return nil, err
}
return jsonpatch.CreateMergePatch(oldInputsJSON, newInputsJSON)
}
func (k *kubeProvider) supportsDryRun(gvk schema.GroupVersionKind) bool {
// Check to see if the configuration has explicitly disabled server-side dry run.
if !k.enableDryRun {
logger.V(9).Infof("dry run is disabled")
return false
}
// Ensure that the cluster is reachable and supports the server-side diff feature.
if k.clusterUnreachable || !openapi.SupportsDryRun(k.dryRunVerifier, gvk) {
logger.V(9).Infof("server cannot dry run %v", gvk)
return false
}
return true
}
func (k *kubeProvider) isDryRunDisabledError(err error) bool {
se, isStatusError := err.(*errors.StatusError)
if !isStatusError {
return false
}
return se.Status().Code == http.StatusBadRequest &&
(se.Status().Message == "the dryRun alpha feature is disabled" ||
se.Status().Message == "the dryRun beta feature is disabled" ||
strings.Contains(se.Status().Message, "does not support dry run"))
}
// tryServerSidePatch attempts to compute a server-side patch. Returns true iff the operation succeeded.
func (k *kubeProvider) tryServerSidePatch(oldInputs, newInputs *unstructured.Unstructured, gvk schema.GroupVersionKind,
) ([]byte, map[string]interface{}, bool) {
// If the resource's GVK changed, so compute patch using inputs.
if oldInputs.GroupVersionKind().String() != gvk.String() {
return nil, nil, false
}
// If we can't dry-run the new GVK, computed the patch using inputs.
if !k.supportsDryRun(gvk) {
return nil, nil, false
}
// TODO: Skipping server-side diff for resources with computed values is a hack. We will want to address this
// more granularly so that previews are as accurate as possible, but this is an easy workaround for a critical
// bug.
if hasComputedValue(newInputs) || hasComputedValue(oldInputs) {
return nil, nil, false
}
ssPatch, ssPatchBase, err := k.serverSidePatch(oldInputs, newInputs)
if k.isDryRunDisabledError(err) {
return nil, nil, false
}
if se, isStatusError := err.(*errors.StatusError); isStatusError {
// If the resource field is immutable.
if se.Status().Code == http.StatusUnprocessableEntity ||
strings.Contains(se.ErrStatus.Message, "field is immutable") {
return nil, nil, false
}
}
// The server-side patch succeeded.
return ssPatch, ssPatchBase, true
}
func mapReplStripSecrets(v resource.PropertyValue) (interface{}, bool) {
if v.IsSecret() {
return v.SecretValue().Element.MapRepl(nil, mapReplStripSecrets), true
}
return nil, false
}
func propMapToUnstructured(pm resource.PropertyMap) *unstructured.Unstructured {
return &unstructured.Unstructured{Object: pm.MapRepl(nil, mapReplStripSecrets)}
}
func getAnnotations(config *unstructured.Unstructured) map[string]string {
annotations := config.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
return annotations
}
// legacyInitialAPIVersion maintains backward compatibility with behavior introduced in the 1.2.0 release. This
// information is now stored in the checkpoint file and the annotation is no longer used by the provider.
func legacyInitialAPIVersion(oldConfig, newConfig *unstructured.Unstructured) (*unstructured.Unstructured, error) {
oldAnnotations := getAnnotations(oldConfig)
newAnnotations := getAnnotations(newConfig)
apiVersion, exists := oldAnnotations[metadata.AnnotationInitialAPIVersion]
if exists {
// Keep the annotation if it was already created previously to minimize further disruption
// to existing resources.
newAnnotations[metadata.AnnotationInitialAPIVersion] = apiVersion
}
if len(newConfig.GetAnnotations()) > 0 {
newConfig.SetAnnotations(newAnnotations)
}
return newConfig, nil
}
// initialAPIVersion retrieves the initialAPIVersion property from the checkpoint file and falls back to using
// the `pulumi.com/initialAPIVersion` annotation if that property is not present.
func initialAPIVersion(state resource.PropertyMap, oldConfig *unstructured.Unstructured) (string, error) {
if v, ok := state[initialAPIVersionKey]; ok {
return v.StringValue(), nil
}
oldAnnotations := getAnnotations(oldConfig)
apiVersion, exists := oldAnnotations[metadata.AnnotationInitialAPIVersion]
if exists {
return apiVersion, nil
}
return oldConfig.GetAPIVersion(), nil
}
func withLastAppliedConfig(config *unstructured.Unstructured) (*unstructured.Unstructured, error) {
// Serialize the inputs and add the last-applied-configuration annotation.
marshaled, err := config.MarshalJSON()
if err != nil {
return nil, err
}
// Deep copy the config before returning.
config = config.DeepCopy()
annotations := getAnnotations(config)
annotations[lastAppliedConfigKey] = string(marshaled)
config.SetAnnotations(annotations)
return config, nil
}
func checkpointObject(inputs, live *unstructured.Unstructured, fromInputs resource.PropertyMap, initialAPIVersion string) resource.PropertyMap {
object := resource.NewPropertyMapFromMap(live.Object)
inputsPM := resource.NewPropertyMapFromMap(inputs.Object)
annotateSecrets(object, fromInputs)
annotateSecrets(inputsPM, fromInputs)
// For secrets, if `stringData` is present in the inputs, the API server will have filled in `data` based on it. By
// base64 encoding the secrets. We should mark any of the values which were secrets in the `stringData` object
// as secrets in the `data` field as well.
if live.GetAPIVersion() == "v1" && live.GetKind() == "Secret" {
stringData, hasStringData := fromInputs["stringData"]
data, hasData := object["data"]
if hasStringData && hasData {
if stringData.IsSecret() && !data.IsSecret() {
object["data"] = resource.MakeSecret(data)
}
if stringData.IsObject() && data.IsObject() {
annotateSecrets(data.ObjectValue(), stringData.ObjectValue())
}
}
}
// Ensure that the annotation we add for lastAppliedConfig is treated as a secret if any of the inputs were secret
// (the value of this annotation is a string-ified JSON so marking the entire thing as a secret is really the best
// that we can do).
if fromInputs.ContainsSecrets() {
if _, has := object["metadata"]; has && object["metadata"].IsObject() {
metadata := object["metadata"].ObjectValue()
if _, has := metadata["annotations"]; has && metadata["annotations"].IsObject() {
annotations := metadata["annotations"].ObjectValue()
if lastAppliedConfig, has := annotations[lastAppliedConfigKey]; has && !lastAppliedConfig.IsSecret() {
annotations[lastAppliedConfigKey] = resource.MakeSecret(lastAppliedConfig)
}
}
}
}
object["__inputs"] = resource.NewObjectProperty(inputsPM)
object[initialAPIVersionKey] = resource.NewStringProperty(initialAPIVersion)
return object
}
func parseCheckpointObject(obj resource.PropertyMap) (oldInputs, live *unstructured.Unstructured) {
// Since we are converting everything to unstructured's, we need to strip out any secretness that
// may nested deep within the object.
pm := obj.MapRepl(nil, mapReplStripSecrets)
//
// NOTE: Inputs are now stored in `__inputs` to allow output properties to work. The inputs and
// live properties used to be stored next to each other, in an object that looked like {live:
// (...), inputs: (...)}, but this broke this resolution. See[1] for more information.
//
// [1]: https://github.com/pulumi/pulumi-kubernetes/issues/137
//
inputs, hasInputs := pm["inputs"]
liveMap, hasLive := pm["live"]
if !hasInputs || !hasLive {
liveMap = pm
inputs, hasInputs = pm["__inputs"]
if hasInputs {
delete(liveMap.(map[string]interface{}), "__inputs")
} else {
inputs = map[string]interface{}{}
}
}
oldInputs = &unstructured.Unstructured{Object: inputs.(map[string]interface{})}
live = &unstructured.Unstructured{Object: liveMap.(map[string]interface{})}
return
}
// partialError creates an error for resources that did not complete an operation in progress.
// The last known state of the object is included in the error so that it can be checkpointed.
func partialError(id string, err error, state *structpb.Struct, inputs *structpb.Struct) error {
reasons := []string{err.Error()}
err = pkgerrors.Cause(err)
if aggregate, isAggregate := err.(await.AggregatedError); isAggregate {
reasons = append(reasons, aggregate.SubErrors()...)
}
detail := pulumirpc.ErrorResourceInitFailed{
Id: id,
Properties: state,
Reasons: reasons,
Inputs: inputs,
}
return rpcerror.WithDetails(rpcerror.New(codes.Unknown, err.Error()), &detail)
}
// canonicalNamespace will provides the canonical name for a namespace. Specifically, if the
// namespace is "", the empty string, we report this as its canonical name, "default".
func canonicalNamespace(ns string) string {
if ns == "" {
return "default"
}
return ns
}
// deleteResponse causes the resource to be deleted from the state.
var deleteResponse = &pulumirpc.ReadResponse{Id: "", Properties: nil}
// parseLastAppliedConfig attempts to find and parse an annotation that records the last applied configuration for the
// given live object state.
func parseLastAppliedConfig(live *unstructured.Unstructured) *unstructured.Unstructured {
// If `kubectl.kubernetes.io/last-applied-configuration` metadata annotation is present, parse it into a real object
// and use it as the current set of live inputs. Otherwise, return nil.
if live == nil {
return nil
}
annotations := live.GetAnnotations()
if annotations == nil {
return nil
}
lastAppliedConfig, ok := annotations[lastAppliedConfigKey]
if !ok {
return nil
}
liveInputs := &unstructured.Unstructured{}
if err := liveInputs.UnmarshalJSON([]byte(lastAppliedConfig)); err != nil {
return nil
}
return liveInputs
}
// parseLiveInputs attempts to parse the provider inputs that produced the given live object out of the object's state.
// This is used by Read.
func parseLiveInputs(live, oldInputs *unstructured.Unstructured) *unstructured.Unstructured {
// First try to find and parse a `kubectl.kubernetes.io/last-applied-configuration` metadata anotation. If that
// succeeds, we are done.
if inputs := parseLastAppliedConfig(live); inputs != nil {
return inputs
}
// If no such annotation was present--or if parsing failed--either retain the old inputs if they exist, or
// attempt to propagate the live object's GVK, any Pulumi-generated autoname and its annotation, and return
// the result.
if oldInputs != nil && len(oldInputs.Object) > 0 {
return oldInputs
}
inputs := &unstructured.Unstructured{Object: map[string]interface{}{}}
inputs.SetGroupVersionKind(live.GroupVersionKind())
metadata.AdoptOldAutonameIfUnnamed(inputs, live)
return inputs
}
// convertPatchToDiff converts the given JSON merge patch to a Pulumi detailed diff.
func convertPatchToDiff(
patch, oldLiveState, newInputs, oldInputs map[string]interface{}, forceNewFields ...string,
) (map[string]*pulumirpc.PropertyDiff, error) {
contract.Require(len(patch) != 0, "len(patch) != 0")
contract.Require(oldLiveState != nil, "oldLiveState != nil")
pc := &patchConverter{
forceNew: forceNewFields,
diff: map[string]*pulumirpc.PropertyDiff{},
}
err := pc.addPatchMapToDiff(nil, patch, oldLiveState, newInputs, oldInputs, false)
return pc.diff, err
}
// makePatchSlice recursively processes the given path to create a slice of a POJO value that is appropriately shaped
// for querying using a JSON path. We use this in addPatchValueToDiff when deciding whether or not a particular
// property causes a replacement.
func makePatchSlice(path []interface{}, v interface{}) interface{} {
if len(path) == 0 {
return v
}
switch p := path[0].(type) {
case string:
return map[string]interface{}{
p: makePatchSlice(path[1:], v),
}
case int:
return []interface{}{makePatchSlice(path[1:], v)}
default:
contract.Failf("unexpected element type in path: %T", p)
return nil
}
}
// equalNumbers returns true if both a and b are number values (int64 or float64). Note that if a this will fail if
// either value is not representable as a float64.
func equalNumbers(a, b interface{}) bool {
aKind, bKind := reflect.TypeOf(a).Kind(), reflect.TypeOf(b).Kind()
if aKind == bKind {
return reflect.DeepEqual(a, b)
}
toFloat := func(v interface{}) (float64, bool) {
switch field := v.(type) {
case int64:
return float64(field), true
case float64:
return field, true
default:
return 0, false
}
}
aVal, aOk := toFloat(a)
bVal, bOk := toFloat(b)
return aOk && bOk && aVal == bVal
}
// patchConverter carries context for convertPatchToDiff.
type patchConverter struct {
forceNew []string
diff map[string]*pulumirpc.PropertyDiff
}
// addPatchValueToDiff adds the given patched value to the detailed diff. Either the patched value or the old value
// must not be nil.
//
// The particular difference that is recorded depends on the old and new values:
// - If the patched value is nil, the property is recorded as deleted
// - If the old value is nil, the property is recorded as added
// - If the types of the old and new values differ, the property is recorded as updated
// - If both values are maps, the maps are recursively compared on a per-property basis and added to the diff
// - If both values are arrays, the arrays are recursively compared on a per-element basis and added to the diff
// - If both values are primitives and the values differ, the property is recorded as updated
// - Otherwise, no diff is recorded.
//
// If a difference is present at the given path and the path matches one of the patterns in the database of
// force-new properties, the diff is amended to indicate that the resource needs to be replaced due to the change in
// this property.
func (pc *patchConverter) addPatchValueToDiff(
path []interface{}, v, old, newInput, oldInput interface{}, inArray bool,
) error {
contract.Assert(v != nil || old != nil)
// If there is no new input, then the only possible diff here is a delete. All other diffs must be diffs between
// old and new properties that are populated by the server. If there is also no old input, then there is no diff
// whatsoever.
if newInput == nil && (v != nil || oldInput == nil) {
return nil
}
var diffKind pulumirpc.PropertyDiff_Kind
inputDiff := false
if v == nil {
diffKind, inputDiff = pulumirpc.PropertyDiff_DELETE, true
} else if old == nil {
diffKind = pulumirpc.PropertyDiff_ADD
} else {
switch v := v.(type) {
case map[string]interface{}:
if oldMap, ok := old.(map[string]interface{}); ok {
newInputMap, _ := newInput.(map[string]interface{})
oldInputMap, _ := oldInput.(map[string]interface{})
return pc.addPatchMapToDiff(path, v, oldMap, newInputMap, oldInputMap, inArray)
}
diffKind = pulumirpc.PropertyDiff_UPDATE
case []interface{}:
if oldArray, ok := old.([]interface{}); ok {
newInputArray, _ := newInput.([]interface{})
oldInputArray, _ := oldInput.([]interface{})
return pc.addPatchArrayToDiff(path, v, oldArray, newInputArray, oldInputArray, inArray)
}
diffKind = pulumirpc.PropertyDiff_UPDATE
default:
if reflect.DeepEqual(v, old) || equalNumbers(v, old) {
// From RFC 7386 (the JSON Merge Patch spec):
//
// If the patch is anything other than an object, the result will always be to replace the entire
// target with the entire patch. Also, it is not possible to patch part of a target that is not an
// object, such as to replace just some of the values in an array.
//
// Because JSON merge patch does not allow array elements to be updated--instead, the array must be
// replaced in full--the patch we have is an overestimate of the properties that changed. As such, we
// only record updates for values that have in fact changed.
return nil
}
diffKind = pulumirpc.PropertyDiff_UPDATE
}
}
// Determine if this change causes a replace.
matches, err := openapi.PatchPropertiesChanged(makePatchSlice(path, v).(map[string]interface{}), pc.forceNew)
if err != nil {
return err
}
if len(matches) != 0 {
switch diffKind {
case pulumirpc.PropertyDiff_ADD:
diffKind = pulumirpc.PropertyDiff_ADD_REPLACE
case pulumirpc.PropertyDiff_DELETE:
diffKind = pulumirpc.PropertyDiff_DELETE_REPLACE
case pulumirpc.PropertyDiff_UPDATE:
diffKind = pulumirpc.PropertyDiff_UPDATE_REPLACE
}
}
pathStr := ""
for _, v := range path {
switch v := v.(type) {
case string:
if strings.ContainsAny(v, `."[]`) {
pathStr = fmt.Sprintf(`%s["%s"]`, pathStr, strings.ReplaceAll(v, `"`, `\"`))
} else if pathStr != "" {
pathStr = fmt.Sprintf("%s.%s", pathStr, v)
} else {
pathStr = v
}
case int:
pathStr = fmt.Sprintf("%s[%d]", pathStr, v)
}
}
pc.diff[pathStr] = &pulumirpc.PropertyDiff{Kind: diffKind, InputDiff: inputDiff}
return nil
}
// addPatchMapToDiff adds the diffs in the given patched map to the detailed diff.
//
// If this map is contained within an array, we do a little bit more work to detect deletes, as they are not recorded
// in the patch in this case (see the note in addPatchValueToDiff for more details).
func (pc *patchConverter) addPatchMapToDiff(
path []interface{}, m, old, newInput, oldInput map[string]interface{}, inArray bool,
) error {
if newInput == nil {
newInput = map[string]interface{}{}
}
if oldInput == nil {
oldInput = map[string]interface{}{}
}
for k, v := range m {
if err := pc.addPatchValueToDiff(append(path, k), v, old[k], newInput[k], oldInput[k], inArray); err != nil {
return err
}
}
if inArray {
for k, v := range old {
if _, ok := m[k]; ok {
continue
}
if err := pc.addPatchValueToDiff(append(path, k), nil, v, newInput[k], oldInput[k], inArray); err != nil {
return err
}
}
}
return nil
}
// addPatchArrayToDiff adds the diffs in the given patched array to the detailed diff.
func (pc *patchConverter) addPatchArrayToDiff(
path []interface{}, a, old, newInput, oldInput []interface{}, inArray bool,
) error {
at := func(arr []interface{}, i int) interface{} {
if i < len(arr) {
return arr[i]
}
return nil
}
var i int
for i = 0; i < len(a) && i < len(old); i++ {
err := pc.addPatchValueToDiff(append(path, i), a[i], old[i], at(newInput, i), at(oldInput, i), true)
if err != nil {
return err
}
}
if i < len(a) {
for ; i < len(a); i++ {
err := pc.addPatchValueToDiff(append(path, i), a[i], nil, at(newInput, i), at(oldInput, i), true)
if err != nil {
return err
}
}
} else {
for ; i < len(old); i++ {
err := pc.addPatchValueToDiff(append(path, i), nil, old[i], at(newInput, i), at(oldInput, i), true)
if err != nil {
return err
}
}
}
return nil
}
// annotateSecrets copies the "secretness" from the ins to the outs. If there are values with the same keys for the
// outs and the ins, if they are both objects, they are transformed recursively. Otherwise, if the value in the ins
// contains a secret, the entire out value is marked as a secret. This is very close to how we project secrets
// in the programming model, with one small difference, which is how we treat the case where both are objects. In the
// programming model, we would say the entire output object is a secret. Here, we actually recur in. We do this because
// we don't want a single secret value in a rich structure to taint the entire object. Doing so would mean things like
// the entire value in the deployment would be encrypted instead of a small chunk. It also means the entire property
// would be displayed as `[secret]` in the CLI instead of a small part.
//
// NOTE: This means that for an array, if any value in the input version is a secret, the entire output array is
// marked as a secret. This is actually a very nice result, because often arrays are treated like sets by providers
// and the order may not be preserved across an operation. This means we do end up encrypting the entire array
// but that's better than accidentally leaking a value which just moved to a different location.
func annotateSecrets(outs, ins resource.PropertyMap) {
if outs == nil || ins == nil {
return
}
for key, inValue := range ins {
outValue, has := outs[key]
if !has {
continue
}
if outValue.IsObject() && inValue.IsObject() {
annotateSecrets(outValue.ObjectValue(), inValue.ObjectValue())
} else if !outValue.IsSecret() && inValue.ContainsSecrets() {
outs[key] = resource.MakeSecret(outValue)
}
}
}
// renderYaml marshals an Unstructured resource to YAML and writes it to the specified path on disk or returns an error.
func renderYaml(resource *unstructured.Unstructured, yamlDirectory string) error {
jsonBytes, err := resource.MarshalJSON()
if err != nil {
return pkgerrors.Wrapf(err, "failed to render YAML file: %q", yamlDirectory)
}
yamlBytes, err := yaml.JSONToYAML(jsonBytes)
if err != nil {
return pkgerrors.Wrapf(err, "failed to render YAML file: %q", yamlDirectory)
}
crdDirectory := filepath.Join(yamlDirectory, "0-crd")
manifestDirectory := filepath.Join(yamlDirectory, "1-manifest")
if _, err := os.Stat(crdDirectory); os.IsNotExist(err) {
err = os.MkdirAll(crdDirectory, 0700)
if err != nil {
return pkgerrors.Wrapf(err, "failed to create directory for rendered YAML: %q", crdDirectory)
}
}
if _, err := os.Stat(manifestDirectory); os.IsNotExist(err) {
err = os.MkdirAll(manifestDirectory, 0700)
if err != nil {
return pkgerrors.Wrapf(err, "failed to create directory for rendered YAML: %q", manifestDirectory)
}
}
path := renderPathForResource(resource, yamlDirectory)
err = ioutil.WriteFile(path, yamlBytes, 0600)
if err != nil {
return pkgerrors.Wrapf(err, "failed to write YAML file: %q", path)
}
return nil
}
// renderPathForResource determines the appropriate YAML render path depending on the resource kind.
func renderPathForResource(resource *unstructured.Unstructured, yamlDirectory string) string {
crdDirectory := filepath.Join(yamlDirectory, "0-crd")
manifestDirectory := filepath.Join(yamlDirectory, "1-manifest")
namespace := "default"
if "" != resource.GetNamespace() {
namespace = resource.GetNamespace()
}
fileName := fmt.Sprintf("%s-%s-%s.yaml", strings.ToLower(resource.GetKind()), namespace, resource.GetName())
filepath.Join(yamlDirectory, fileName)
var path string
if kinds.Kind(resource.GetKind()) == kinds.CustomResourceDefinition {
path = filepath.Join(crdDirectory, fileName)
} else {
path = filepath.Join(manifestDirectory, fileName)
}
return path
}
|
[
"\"PULUMI_K8S_CLIENT_BURST\"",
"\"PULUMI_K8S_CLIENT_QPS\""
] |
[] |
[
"PULUMI_K8S_CLIENT_QPS",
"PULUMI_K8S_CLIENT_BURST"
] |
[]
|
["PULUMI_K8S_CLIENT_QPS", "PULUMI_K8S_CLIENT_BURST"]
|
go
| 2 | 0 | |
cointop/cointop.go
|
package cointop
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"sync"
"time"
"github.com/miguelmota/cointop/cointop/common/api"
"github.com/miguelmota/cointop/cointop/common/api/types"
"github.com/miguelmota/cointop/cointop/common/filecache"
"github.com/miguelmota/cointop/cointop/common/gizak/termui"
"github.com/miguelmota/cointop/cointop/common/humanize"
"github.com/miguelmota/cointop/cointop/common/pathutil"
"github.com/miguelmota/cointop/cointop/common/table"
"github.com/miguelmota/gocui"
"github.com/patrickmn/go-cache"
)
// TODO: clean up and optimize codebase
// ErrInvalidAPIChoice is error for invalid API choice
var ErrInvalidAPIChoice = errors.New("Invalid API choice")
// Views are all views in cointop
type Views struct {
Chart *ChartView
Table *TableView
TableHeader *TableHeaderView
Marketbar *MarketbarView
SearchField *SearchFieldView
Statusbar *StatusbarView
Help *HelpView
ConvertMenu *ConvertMenuView
Input *InputView
PortfolioUpdateMenu *PortfolioUpdateMenuView
}
// State is the state preferences of cointop
type State struct {
allCoins []*Coin
allCoinsSlugMap sync.Map
coins []*Coin
chartPoints [][]termui.Cell
currencyConversion string
convertMenuVisible bool
defaultView string
// DEPRECATED: favorites by 'symbol' is deprecated because of collisions.
favoritesBySymbol map[string]bool
favorites map[string]bool
filterByFavorites bool
helpVisible bool
hideMarketbar bool
hideChart bool
hideStatusbar bool
lastSelectedRowIndex int
page int
perPage int
portfolio *Portfolio
portfolioVisible bool
portfolioUpdateMenuVisible bool
refreshRate time.Duration
running bool
searchFieldVisible bool
selectedCoin *Coin
selectedChartRange string
shortcutKeys map[string]string
sortDesc bool
sortBy string
onlyTable bool
chartHeight int
}
// Cointop cointop
type Cointop struct {
g *gocui.Gui
ActionsMap map[string]bool
apiKeys *APIKeys
cache *cache.Cache
config config // toml config
configFilepath string
api api.Interface
apiChoice string
chartRanges []string
chartRangesMap map[string]time.Duration
colorschemeName string
colorscheme *Colorscheme
debug bool
filecache *filecache.FileCache
forceRefresh chan bool
limiter <-chan time.Time
maxTableWidth int
refreshMux sync.Mutex
refreshTicker *time.Ticker
saveMux sync.Mutex
State *State
table *table.Table
TableColumnOrder []string
Views *Views
}
// CoinMarketCap is API choice
var CoinMarketCap = "coinmarketcap"
// CoinGecko is API choice
var CoinGecko = "coingecko"
// PortfolioEntry is portfolio entry
type PortfolioEntry struct {
Coin string
Holdings float64
}
// Portfolio is portfolio structure
type Portfolio struct {
Entries map[string]*PortfolioEntry
}
// Config config options
type Config struct {
APIChoice string
CacheDir string
Colorscheme string
ConfigFilepath string
CoinMarketCapAPIKey string
NoPrompts bool
HideMarketbar bool
HideChart bool
HideStatusbar bool
NoCache bool
OnlyTable bool
RefreshRate *uint
PerPage uint
}
// APIKeys is api keys structure
type APIKeys struct {
cmc string
}
// DefaultPerPage ...
var DefaultPerPage uint = 100
// DefaultColorscheme ...
var DefaultColorscheme = "cointop"
// DefaultConfigFilepath ...
var DefaultConfigFilepath = "~/.config/cointop/config.toml"
// DefaultCacheDir ...
var DefaultCacheDir = filecache.DefaultCacheDir
// NewCointop initializes cointop
func NewCointop(config *Config) (*Cointop, error) {
var debug bool
if os.Getenv("DEBUG") != "" {
debug = true
}
if config == nil {
config = &Config{}
}
configFilepath := DefaultConfigFilepath
if config.ConfigFilepath != "" {
configFilepath = config.ConfigFilepath
}
var fcache *filecache.FileCache
if !config.NoCache {
fcache = filecache.NewFileCache(&filecache.Config{
CacheDir: config.CacheDir,
})
}
perPage := DefaultPerPage
if config.PerPage != 0 {
perPage = config.PerPage
}
ct := &Cointop{
apiChoice: CoinGecko,
apiKeys: new(APIKeys),
forceRefresh: make(chan bool),
maxTableWidth: 175,
ActionsMap: ActionsMap(),
cache: cache.New(1*time.Minute, 2*time.Minute),
configFilepath: configFilepath,
chartRanges: ChartRanges(),
debug: debug,
chartRangesMap: ChartRangesMap(),
limiter: time.Tick(2 * time.Second),
filecache: fcache,
State: &State{
allCoins: []*Coin{},
currencyConversion: "USD",
// DEPRECATED: favorites by 'symbol' is deprecated because of collisions. Kept for backward compatibility.
favoritesBySymbol: make(map[string]bool),
favorites: make(map[string]bool),
hideMarketbar: config.HideMarketbar,
hideChart: config.HideChart,
hideStatusbar: config.HideStatusbar,
onlyTable: config.OnlyTable,
refreshRate: 60 * time.Second,
selectedChartRange: "7D",
shortcutKeys: DefaultShortcuts(),
sortBy: "rank",
page: 0,
perPage: int(perPage),
portfolio: &Portfolio{
Entries: make(map[string]*PortfolioEntry, 0),
},
chartHeight: 10,
},
TableColumnOrder: TableColumnOrder(),
Views: &Views{
Chart: NewChartView(),
Table: NewTableView(),
TableHeader: NewTableHeaderView(),
Marketbar: NewMarketbarView(),
SearchField: NewSearchFieldView(),
Statusbar: NewStatusbarView(),
Help: NewHelpView(),
ConvertMenu: NewConvertMenuView(),
Input: NewInputView(),
PortfolioUpdateMenu: NewPortfolioUpdateMenuView(),
},
}
err := ct.SetupConfig()
if err != nil {
return nil, err
}
ct.cache.Set("onlyTable", ct.State.onlyTable, cache.NoExpiration)
ct.cache.Set("hideMarketbar", ct.State.hideMarketbar, cache.NoExpiration)
ct.cache.Set("hideChart", ct.State.hideChart, cache.NoExpiration)
ct.cache.Set("hideStatusbar", ct.State.hideStatusbar, cache.NoExpiration)
if config.RefreshRate != nil {
ct.State.refreshRate = time.Duration(*config.RefreshRate) * time.Second
}
if ct.State.refreshRate == 0 {
ct.refreshTicker = time.NewTicker(time.Duration(1))
ct.refreshTicker.Stop()
} else {
ct.refreshTicker = time.NewTicker(ct.State.refreshRate)
}
// prompt for CoinMarketCap api key if not found
if config.CoinMarketCapAPIKey != "" {
ct.apiKeys.cmc = config.CoinMarketCapAPIKey
if err := ct.SaveConfig(); err != nil {
return nil, err
}
}
if config.Colorscheme != "" {
ct.colorschemeName = config.Colorscheme
}
colors, err := ct.getColorschemeColors()
if err != nil {
return nil, err
}
ct.colorscheme = NewColorscheme(colors)
if config.APIChoice != "" {
ct.apiChoice = config.APIChoice
if err := ct.SaveConfig(); err != nil {
return nil, err
}
}
if ct.apiChoice == CoinMarketCap && ct.apiKeys.cmc == "" {
apiKey := os.Getenv("CMC_PRO_API_KEY")
if apiKey == "" {
if !config.NoPrompts {
apiKey, err = ct.ReadAPIKeyFromStdin("CoinMarketCap Pro")
if err != nil {
return nil, err
}
ct.apiKeys.cmc = apiKey
}
} else {
ct.apiKeys.cmc = apiKey
}
if err := ct.SaveConfig(); err != nil {
return nil, err
}
}
if ct.apiChoice == CoinGecko {
ct.State.selectedChartRange = "1Y"
}
if ct.apiChoice == CoinMarketCap {
ct.api = api.NewCMC(ct.apiKeys.cmc)
} else if ct.apiChoice == CoinGecko {
ct.api = api.NewCG()
} else {
return nil, ErrInvalidAPIChoice
}
allCoinsSlugMap := make(map[string]*Coin)
coinscachekey := ct.CacheKey("allCoinsSlugMap")
if ct.filecache != nil {
ct.filecache.Get(coinscachekey, &allCoinsSlugMap)
}
for k, v := range allCoinsSlugMap {
ct.State.allCoinsSlugMap.Store(k, v)
}
ct.State.allCoinsSlugMap.Range(func(key, value interface{}) bool {
if coin, ok := value.(*Coin); ok {
ct.State.allCoins = append(ct.State.allCoins, coin)
}
return true
})
if len(ct.State.allCoins) > 1 {
max := len(ct.State.allCoins)
if max > 100 {
max = 100
}
ct.Sort(ct.State.sortBy, ct.State.sortDesc, ct.State.allCoins, false)
ct.State.coins = ct.State.allCoins[0:max]
}
// DEPRECATED: favorites by 'symbol' is deprecated because of collisions. Kept for backward compatibility.
// Here we're doing a lookup based on symbol and setting the favorite to the coin name instead of coin symbol.
ct.State.allCoinsSlugMap.Range(func(key, value interface{}) bool {
if coin, ok := value.(*Coin); ok {
for k := range ct.State.favoritesBySymbol {
if coin.Symbol == k {
ct.State.favorites[coin.Name] = true
delete(ct.State.favoritesBySymbol, k)
}
}
}
return true
})
var globaldata []float64
chartcachekey := ct.CacheKey(fmt.Sprintf("%s_%s", "globaldata", strings.Replace(ct.State.selectedChartRange, " ", "", -1)))
if ct.filecache != nil {
ct.filecache.Get(chartcachekey, &globaldata)
}
ct.cache.Set(chartcachekey, globaldata, 10*time.Second)
var market types.GlobalMarketData
marketcachekey := ct.CacheKey("market")
if ct.filecache != nil {
ct.filecache.Get(marketcachekey, &market)
}
ct.cache.Set(marketcachekey, market, 10*time.Second)
// TODO: notify offline status in status bar
/*
if err := ct.api.Ping(); err != nil {
return nil, err
}
*/
return ct, nil
}
// Run runs cointop
func (ct *Cointop) Run() error {
ct.debuglog("run()")
g, err := gocui.NewGui(gocui.Output256)
if err != nil {
return fmt.Errorf("new gocui: %v", err)
}
g.FgColor = ct.colorscheme.BaseFg()
g.BgColor = ct.colorscheme.BaseBg()
ct.g = g
defer g.Close()
g.InputEsc = true
g.Mouse = true
g.Highlight = true
g.SetManagerFunc(ct.layout)
if err := ct.Keybindings(g); err != nil {
return fmt.Errorf("keybindings: %v", err)
}
ct.State.running = true
if err := g.MainLoop(); err != nil && err != gocui.ErrQuit {
return fmt.Errorf("main loop: %v", err)
}
return nil
}
// IsRunning returns true if cointop is running
func (ct *Cointop) IsRunning() bool {
return ct.State.running
}
// PriceConfig is the config options for the price command
type PriceConfig struct {
Coin string
Currency string
APIChoice string
}
// PrintPrice outputs the current price of the coin
func PrintPrice(config *PriceConfig) error {
var priceAPI api.Interface
if config.APIChoice == CoinMarketCap {
priceAPI = api.NewCMC("")
} else if config.APIChoice == CoinGecko {
priceAPI = api.NewCG()
} else {
return ErrInvalidAPIChoice
}
price, err := priceAPI.Price(config.Coin, config.Currency)
if err != nil {
return err
}
symbol := CurrencySymbol(config.Currency)
fmt.Fprintf(os.Stdout, "%s%s\n", symbol, humanize.Commaf(price))
return nil
}
// CleanConfig is the config for the clean function
type CleanConfig struct {
Log bool
CacheDir string
}
// Clean removes cache files
func Clean(config *CleanConfig) error {
if config == nil {
config = &CleanConfig{}
}
cacheCleaned := false
cacheDir := DefaultCacheDir
if config.CacheDir != "" {
cacheDir = config.CacheDir
}
if _, err := os.Stat(cacheDir); !os.IsNotExist(err) {
files, err := ioutil.ReadDir(cacheDir)
if err != nil {
return err
}
for _, f := range files {
if strings.HasPrefix(f.Name(), "fcache.") {
file := fmt.Sprintf("%s/%s", cacheDir, f.Name())
if config.Log {
fmt.Printf("removing %s\n", file)
}
if err := os.Remove(file); err != nil {
return err
}
cacheCleaned = true
}
}
}
if config.Log {
if cacheCleaned {
fmt.Println("cointop cache has been cleaned")
}
}
return nil
}
// ResetConfig is the config for the reset function
type ResetConfig struct {
Log bool
CacheDir string
}
// Reset removes configuration and cache files
func Reset(config *ResetConfig) error {
if config == nil {
config = &ResetConfig{}
}
if err := Clean(&CleanConfig{
CacheDir: config.CacheDir,
Log: config.Log,
}); err != nil {
return err
}
configDeleted := false
for _, configPath := range possibleConfigPaths {
normalizedPath := pathutil.NormalizePath(configPath)
if _, err := os.Stat(normalizedPath); !os.IsNotExist(err) {
if config.Log {
fmt.Printf("removing %s\n", normalizedPath)
}
if err := os.RemoveAll(normalizedPath); err != nil {
return err
}
configDeleted = true
}
}
if config.Log {
if configDeleted {
fmt.Println("cointop has been reset")
}
}
return nil
}
|
[
"\"DEBUG\"",
"\"CMC_PRO_API_KEY\""
] |
[] |
[
"CMC_PRO_API_KEY",
"DEBUG"
] |
[]
|
["CMC_PRO_API_KEY", "DEBUG"]
|
go
| 2 | 0 | |
external-deps/python-lsp-server/test/plugins/test_completion.py
|
# Copyright 2017-2020 Palantir Technologies, Inc.
# Copyright 2021- Python Language Server Contributors.
import math
import os
import sys
from pathlib import Path
from typing import NamedTuple, Dict
import pytest
from pylsp import uris, lsp
from pylsp.workspace import Document
from pylsp.plugins.jedi_completion import pylsp_completions as pylsp_jedi_completions
from pylsp.plugins.jedi_completion import pylsp_completion_item_resolve as pylsp_jedi_completion_item_resolve
from pylsp.plugins.rope_completion import pylsp_completions as pylsp_rope_completions
from pylsp._utils import JEDI_VERSION
PY2 = sys.version[0] == '2'
LINUX = sys.platform.startswith('linux')
CI = os.environ.get('CI')
LOCATION = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__))
)
DOC_URI = uris.from_fs_path(__file__)
DOC = """import os
print os.path.isabs("/tmp")
def hello():
pass
def _a_hello():
pass
class Hello():
@property
def world(self):
return None
def everyone(self, a, b, c=None, d=2):
pass
print Hello().world
print Hello().every
def documented_hello():
\"\"\"Sends a polite greeting\"\"\"
pass
"""
def test_rope_import_completion(config, workspace):
com_position = {'line': 0, 'character': 7}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_rope_completions(config, workspace, doc, com_position)
assert items is None
class TypeCase(NamedTuple):
document: str
position: dict
label: str
expected: lsp.CompletionItemKind
TYPE_CASES: Dict[str, TypeCase] = {
'variable': TypeCase(
document='test = 1\ntes',
position={'line': 1, 'character': 3},
label='test',
expected=lsp.CompletionItemKind.Variable
),
'function': TypeCase(
document='def test():\n pass\ntes',
position={'line': 2, 'character': 3},
label='test()',
expected=lsp.CompletionItemKind.Function
),
'keyword': TypeCase(
document='fro',
position={'line': 0, 'character': 3},
label='from',
expected=lsp.CompletionItemKind.Keyword
),
'file': TypeCase(
document='"' + __file__[:-2].replace('"', '\\"') + '"',
position={'line': 0, 'character': len(__file__) - 2},
label=Path(__file__).name + '"',
expected=lsp.CompletionItemKind.File
),
'module': TypeCase(
document='import statis',
position={'line': 0, 'character': 13},
label='statistics',
expected=lsp.CompletionItemKind.Module
),
'class': TypeCase(
document='KeyErr',
position={'line': 0, 'character': 6},
label='KeyError',
expected=lsp.CompletionItemKind.Class
),
'property': TypeCase(
document=(
'class A:\n'
' @property\n'
' def test(self):\n'
' pass\n'
'A().tes'
),
position={'line': 4, 'character': 5},
label='test',
expected=lsp.CompletionItemKind.Property
)
}
@pytest.mark.parametrize('case', list(TYPE_CASES.values()), ids=list(TYPE_CASES.keys()))
def test_jedi_completion_type(case, config, workspace):
# property support was introduced in 0.18
if case.expected == lsp.CompletionItemKind.Property and JEDI_VERSION.startswith('0.17'):
return
doc = Document(DOC_URI, workspace, case.document)
items = pylsp_jedi_completions(config, doc, case.position)
items = {i['label']: i for i in items}
assert items[case.label]['kind'] == case.expected
def test_jedi_completion(config, workspace):
# Over 'i' in os.path.isabs(...)
com_position = {'line': 1, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
labels = [i['label'] for i in items]
assert 'isfile(path)' in labels
# Test we don't throw with big character
pylsp_jedi_completions(config, doc, {'line': 1, 'character': 1000})
def test_jedi_completion_item_resolve(config, workspace):
# Over the blank line
com_position = {'line': 8, 'character': 0}
doc = Document(DOC_URI, workspace, DOC)
config.update({'plugins': {'jedi_completion': {'resolve_at_most': math.inf}}})
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c['label']: c for c in completions}
documented_hello_item = items['documented_hello()']
assert 'documentation' not in documented_hello_item
assert 'detail' not in documented_hello_item
resolved_documented_hello = pylsp_jedi_completion_item_resolve(
completion_item=documented_hello_item,
document=doc
)
assert 'Sends a polite greeting' in resolved_documented_hello['documentation']
def test_jedi_completion_with_fuzzy_enabled(config, workspace):
# Over 'i' in os.path.isabs(...)
config.update({'plugins': {'jedi_completion': {'fuzzy': True}}})
com_position = {'line': 1, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
expected = 'commonprefix(m)'
if JEDI_VERSION == '0.18.0':
expected = 'commonprefix(list)'
assert items[0]['label'] == expected
# Test we don't throw with big character
pylsp_jedi_completions(config, doc, {'line': 1, 'character': 1000})
def test_jedi_completion_resolve_at_most(config, workspace):
# Over 'i' in os.path.isabs(...)
com_position = {'line': 1, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
# Do not resolve any labels
config.update({'plugins': {'jedi_completion': {'resolve_at_most': 0}}})
items = pylsp_jedi_completions(config, doc, com_position)
labels = {i['label'] for i in items}
assert 'isabs' in labels
# Resolve all items
config.update({'plugins': {'jedi_completion': {'resolve_at_most': math.inf}}})
items = pylsp_jedi_completions(config, doc, com_position)
labels = {i['label'] for i in items}
assert 'isfile(path)' in labels
def test_rope_completion(config, workspace):
# Over 'i' in os.path.isabs(...)
com_position = {'line': 1, 'character': 15}
workspace.put_document(DOC_URI, source=DOC)
doc = workspace.get_document(DOC_URI)
items = pylsp_rope_completions(config, workspace, doc, com_position)
assert items
assert items[0]['label'] == 'isabs'
def test_jedi_completion_ordering(config, workspace):
# Over the blank line
com_position = {'line': 8, 'character': 0}
doc = Document(DOC_URI, workspace, DOC)
config.update({'plugins': {'jedi_completion': {'resolve_at_most': math.inf}}})
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c['label']: c['sortText'] for c in completions}
# And that 'hidden' functions come after unhidden ones
assert items['hello()'] < items['_a_hello()']
def test_jedi_property_completion(config, workspace):
# Over the 'w' in 'print Hello().world'
com_position = {'line': 18, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c['label']: c['sortText'] for c in completions}
# Ensure we can complete the 'world' property
assert 'world' in list(items.keys())[0]
def test_jedi_method_completion(config, workspace):
# Over the 'y' in 'print Hello().every'
com_position = {'line': 20, 'character': 19}
doc = Document(DOC_URI, workspace, DOC)
config.capabilities['textDocument'] = {'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
completions = pylsp_jedi_completions(config, doc, com_position)
everyone_method = [completion for completion in completions if completion['label'] == 'everyone(a, b, c, d)'][0]
# Ensure we only generate snippets for positional args
assert everyone_method['insertTextFormat'] == lsp.InsertTextFormat.Snippet
assert everyone_method['insertText'] == 'everyone(${1:a}, ${2:b})$0'
# Disable param snippets
config.update({'plugins': {'jedi_completion': {'include_params': False}}})
completions = pylsp_jedi_completions(config, doc, com_position)
everyone_method = [completion for completion in completions if completion['label'] == 'everyone(a, b, c, d)'][0]
assert 'insertTextFormat' not in everyone_method
assert everyone_method['insertText'] == 'everyone'
@pytest.mark.skipif(PY2 or (sys.platform.startswith('linux') and os.environ.get('CI') is not None),
reason="Test in Python 3 and not on CIs on Linux because wheels don't work on them.")
def test_pyqt_completion(config, workspace):
# Over 'QA' in 'from PyQt5.QtWidgets import QApplication'
doc_pyqt = "from PyQt5.QtWidgets import QA"
com_position = {'line': 0, 'character': len(doc_pyqt)}
doc = Document(DOC_URI, workspace, doc_pyqt)
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions is not None
def test_numpy_completions(config, workspace):
doc_numpy = "import numpy as np; np."
com_position = {'line': 0, 'character': len(doc_numpy)}
doc = Document(DOC_URI, workspace, doc_numpy)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any('array' in i['label'] for i in items)
def test_pandas_completions(config, workspace):
doc_pandas = "import pandas as pd; pd."
com_position = {'line': 0, 'character': len(doc_pandas)}
doc = Document(DOC_URI, workspace, doc_pandas)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any('DataFrame' in i['label'] for i in items)
def test_matplotlib_completions(config, workspace):
doc_mpl = "import matplotlib.pyplot as plt; plt."
com_position = {'line': 0, 'character': len(doc_mpl)}
doc = Document(DOC_URI, workspace, doc_mpl)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any('plot' in i['label'] for i in items)
def test_snippets_completion(config, workspace):
doc_snippets = 'from collections import defaultdict \na=defaultdict'
com_position = {'line': 0, 'character': 35}
doc = Document(DOC_URI, workspace, doc_snippets)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]['insertText'] == 'defaultdict'
com_position = {'line': 1, 'character': len(doc_snippets)}
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]['insertText'] == 'defaultdict($0)'
assert completions[0]['insertTextFormat'] == lsp.InsertTextFormat.Snippet
def test_snippets_completion_at_most(config, workspace):
doc_snippets = 'from collections import defaultdict \na=defaultdict'
doc = Document(DOC_URI, workspace, doc_snippets)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
config.update({'plugins': {'jedi_completion': {'resolve_at_most': 0}}})
com_position = {'line': 1, 'character': len(doc_snippets)}
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]['insertText'] == 'defaultdict'
assert not completions[0].get('insertTextFormat', None)
def test_completion_with_class_objects(config, workspace):
doc_text = 'class FOOBAR(Object): pass\nFOOB'
com_position = {'line': 1, 'character': 4}
doc = Document(DOC_URI, workspace, doc_text)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {
'include_params': True,
'include_class_objects': True,
}}})
completions = pylsp_jedi_completions(config, doc, com_position)
assert len(completions) == 2
assert completions[0]['label'] == 'FOOBAR'
assert completions[0]['kind'] == lsp.CompletionItemKind.Class
assert completions[1]['label'] == 'FOOBAR object'
assert completions[1]['kind'] == lsp.CompletionItemKind.TypeParameter
def test_snippet_parsing(config, workspace):
doc = 'divmod'
completion_position = {'line': 0, 'character': 6}
doc = Document(DOC_URI, workspace, doc)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
completions = pylsp_jedi_completions(config, doc, completion_position)
out = 'divmod(${1:x}, ${2:y})$0'
if JEDI_VERSION == '0.18.0':
out = 'divmod(${1:a}, ${2:b})$0'
assert completions[0]['insertText'] == out
def test_multiline_import_snippets(config, workspace):
document = 'from datetime import(\n date,\n datetime)\na=date'
doc = Document(DOC_URI, workspace, document)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'date'
position = {'line': 2, 'character': 9}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'datetime'
def test_multiline_snippets(config, workspace):
document = 'from datetime import\\\n date,\\\n datetime \na=date'
doc = Document(DOC_URI, workspace, document)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'date'
position = {'line': 2, 'character': 9}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'datetime'
def test_multistatement_snippet(config, workspace):
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
document = 'a = 1; from datetime import date'
doc = Document(DOC_URI, workspace, document)
position = {'line': 0, 'character': len(document)}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'date'
document = 'from math import fmod; a = fmod'
doc = Document(DOC_URI, workspace, document)
position = {'line': 0, 'character': len(document)}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'fmod(${1:x}, ${2:y})$0'
def test_jedi_completion_extra_paths(tmpdir, workspace):
# Create a tempfile with some content and pass to extra_paths
temp_doc_content = '''
def spam():
pass
'''
p = tmpdir.mkdir("extra_path")
extra_paths = [str(p)]
p = p.join("foo.py")
p.write(temp_doc_content)
# Content of doc to test completion
doc_content = """import foo
foo.s"""
doc = Document(DOC_URI, workspace, doc_content)
# After 'foo.s' without extra paths
com_position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions is None
# Update config extra paths
settings = {'pylsp': {'plugins': {'jedi': {'extra_paths': extra_paths}}}}
doc.update_config(settings)
# After 'foo.s' with extra paths
com_position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]['label'] == 'spam()'
@pytest.mark.skipif(PY2 or not LINUX or not CI, reason="tested on linux and python 3 only")
def test_jedi_completion_environment(workspace):
# Content of doc to test completion
doc_content = '''import logh
'''
doc = Document(DOC_URI, workspace, doc_content)
# After 'import logh' with default environment
com_position = {'line': 0, 'character': 11}
assert os.path.isdir('/tmp/pyenv/')
settings = {'pylsp': {'plugins': {'jedi': {'environment': None}}}}
doc.update_config(settings)
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions is None
# Update config extra environment
env_path = '/tmp/pyenv/bin/python'
settings = {'pylsp': {'plugins': {'jedi': {'environment': env_path}}}}
doc.update_config(settings)
# After 'import logh' with new environment
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]['label'] == 'loghub'
resolved = pylsp_jedi_completion_item_resolve(completions[0], doc)
assert 'changelog generator' in resolved['documentation'].lower()
def test_document_path_completions(tmpdir, workspace_other_root_path):
# Create a dummy module out of the workspace's root_path and try to get
# completions for it in another file placed next to it.
module_content = '''
def foo():
pass
'''
p = tmpdir.join("mymodule.py")
p.write(module_content)
# Content of doc to test completion
doc_content = """import mymodule
mymodule.f"""
doc_path = str(tmpdir) + os.path.sep + 'myfile.py'
doc_uri = uris.from_fs_path(doc_path)
doc = Document(doc_uri, workspace_other_root_path, doc_content)
com_position = {'line': 1, 'character': 10}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]['label'] == 'foo()'
|
[] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
python
| 1 | 0 | |
pkg/lib/editor/editor.go
|
// @title Config Tool Editor API
// @version 0.0
// @contact.name Jonathan King
// @contact.email [email protected]
// @BasePath /api/v1
// @securityDefinitions.basic BasicAuth
// @schemes http
package editor
import (
"crypto/tls"
"errors"
"io/ioutil"
"log"
"mime"
"net/http"
"os"
auth "github.com/abbot/go-http-auth"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
_ "github.com/quay/config-tool/docs"
httpSwagger "github.com/swaggo/http-swagger"
"golang.org/x/crypto/bcrypt"
)
// ServerOptions holds information regarding the set up of the config-tool server
type ServerOptions struct {
username string
password string
port string
configPath string
staticContentPath string
operatorEndpoint string
readOnlyFieldGroups []string
podNamespace string // Optional
podName string // Optional
publicKeyPath string
privateKeyPath string
}
// ConfigBundle is the current state of the config bundle on the server. It may read from a path on disk and then edited through the API.
type ConfigBundle struct {
Config map[string]interface{} `json:"config.yaml" yaml:"config.yaml"`
Certificates map[string][]byte `json:"certs,omitempty" yaml:"certs,omitempty"`
ManagedFieldGroups []string `json:"managedFieldGroups,omitempty" yaml:"managedFieldGroups,omitempty"`
}
// RunConfigEditor runs the configuration editor server.
func RunConfigEditor(password, configPath, operatorEndpoint string, readOnlyFieldGroups []string) {
// FIX THIS
publicKeyPath := os.Getenv("CONFIG_TOOL_PUBLIC_KEY")
privateKeyPath := os.Getenv("CONFIG_TOOL_PRIVATE_KEY")
staticContentPath, exists := os.LookupEnv("CONFIG_EDITOR_STATIC_CONTENT_PATH")
if !exists {
staticContentPath = "pkg/lib/editor/static"
}
podNamespace := os.Getenv("MY_POD_NAMESPACE")
podName := os.Getenv("MY_POD_NAME")
if operatorEndpoint != "" && (podNamespace == "" || podName == "") {
panic("If you would like to use operator reconfiguration features you must specify your namespace and pod name") // FIXME (jonathan) - come up with better error message
}
if readOnlyFieldGroups == nil {
readOnlyFieldGroups = []string{}
}
opts := &ServerOptions{
username: "quayconfig", // FIXME (jonathan) - add option to change username
password: password,
port: "8080", // FIXME (jonathan) - add option to change port
configPath: configPath,
staticContentPath: staticContentPath,
operatorEndpoint: operatorEndpoint,
readOnlyFieldGroups: readOnlyFieldGroups,
podNamespace: podNamespace,
podName: podName,
publicKeyPath: publicKeyPath,
privateKeyPath: privateKeyPath,
}
hashed, _ := bcrypt.GenerateFromPassword([]byte(opts.password), 5)
authenticator := auth.NewBasicAuthenticator(opts.username, func(user, realm string) string {
if user == opts.username {
return string(hashed)
}
return ""
})
mime.AddExtensionType(".css", "text/css; charset=utf-8")
mime.AddExtensionType(".js", "application/javascript; charset=utf-8")
if opts.operatorEndpoint != "" {
log.Printf("Using Operator Endpoint: " + opts.operatorEndpoint)
}
r := chi.NewRouter()
if debug := os.Getenv("DEBUGLOG"); debug != "" {
r.Use(middleware.RequestID)
r.Use(middleware.RealIP)
r.Use(middleware.Logger)
r.Use(middleware.Recoverer)
}
// Function handlers
r.Get("/", rootHandler(opts))
r.Get("/api/v1/config", auth.JustCheck(authenticator, getMountedConfigBundle(opts)))
r.Post("/api/v1/config/validate", auth.JustCheck(authenticator, validateConfigBundle(opts)))
r.Post("/api/v1/config/download", auth.JustCheck(authenticator, downloadConfigBundle(opts)))
r.Post("/api/v1/config/operator", auth.JustCheck(authenticator, commitToOperator(opts)))
r.Get("/swagger/*", httpSwagger.Handler(
httpSwagger.URL("/docs/swagger.json"), // FIXME(jonathan) - This can eventually be changed to the github link to this file.
))
// File handlers
r.Get("/static/*", func(w http.ResponseWriter, r *http.Request) {
fs := http.StripPrefix("/static/", http.FileServer(http.Dir(opts.staticContentPath)))
fs.ServeHTTP(w, r)
})
r.Get("/docs/*", func(w http.ResponseWriter, r *http.Request) {
fs := http.StripPrefix("/docs/", http.FileServer(http.Dir("docs")))
fs.ServeHTTP(w, r)
})
// Create server base
s := &http.Server{
Addr: ":" + opts.port,
Handler: r,
}
// Try to load TLS
tlsConfig, err := loadTLS(opts.publicKeyPath, opts.privateKeyPath)
if err != nil {
log.Printf("An error occurred loading TLS: " + err.Error() + ". Server falling back to HTTP.")
log.Printf("Running the configuration editor with HTTP on port %v with username %s", opts.port, opts.username)
log.Fatal(s.ListenAndServe())
} else {
s.TLSConfig = tlsConfig
log.Printf("Running the configuration editor with HTTPS on port %v with username %s", opts.port, opts.username)
log.Fatal(s.ListenAndServeTLS("", ""))
}
}
// tlsConfig will attempt to create a tls config given a public and private key. It returns an error if it fails to create a Config.
func loadTLS(publicKeyPath, privateKeyPath string) (*tls.Config, error) {
if publicKeyPath == "" {
return nil, errors.New("No public key provided for HTTPS")
}
if privateKeyPath == "" {
return nil, errors.New("No private key provided for HTTPS")
}
crt, err := ioutil.ReadFile(publicKeyPath)
if err != nil {
return nil, errors.New("Could not open public key: " + publicKeyPath)
}
key, err := ioutil.ReadFile(privateKeyPath)
if err != nil {
return nil, errors.New("Could not open private key: " + privateKeyPath)
}
cert, err := tls.X509KeyPair(crt, key)
if err != nil {
return nil, errors.New("Could not load X509 key pair: " + err.Error())
}
return &tls.Config{
Certificates: []tls.Certificate{cert}}, nil
}
|
[
"\"CONFIG_TOOL_PUBLIC_KEY\"",
"\"CONFIG_TOOL_PRIVATE_KEY\"",
"\"MY_POD_NAMESPACE\"",
"\"MY_POD_NAME\"",
"\"DEBUGLOG\""
] |
[] |
[
"CONFIG_TOOL_PRIVATE_KEY",
"MY_POD_NAMESPACE",
"CONFIG_TOOL_PUBLIC_KEY",
"MY_POD_NAME",
"DEBUGLOG"
] |
[]
|
["CONFIG_TOOL_PRIVATE_KEY", "MY_POD_NAMESPACE", "CONFIG_TOOL_PUBLIC_KEY", "MY_POD_NAME", "DEBUGLOG"]
|
go
| 5 | 0 | |
storage/remote/queue_manager_test.go
|
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"context"
"fmt"
"io/ioutil"
"math"
"net/url"
"os"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/go-kit/kit/log"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/prometheus/client_golang/prometheus"
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
common_config "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/tsdb/record"
)
const defaultFlushDeadline = 1 * time.Minute
func newHighestTimestampMetric() *maxTimestamp {
return &maxTimestamp{
Gauge: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "highest_timestamp_in_seconds",
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.",
}),
}
}
func TestSampleDelivery(t *testing.T) {
// Let's create an even number of send batches so we don't run into the
// batch timeout case.
n := config.DefaultQueueConfig.MaxSamplesPerSend * 2
samples, series := createTimeseries(n, n)
c := NewTestWriteClient()
c.expectSamples(samples[:len(samples)/2], series)
queueConfig := config.DefaultQueueConfig
queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond)
queueConfig.MaxShards = 1
queueConfig.Capacity = len(samples)
queueConfig.MaxSamplesPerSend = len(samples) / 2
dir, err := ioutil.TempDir("", "TestSampleDeliver")
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline)
defer s.Close()
writeConfig := config.DefaultRemoteWriteConfig
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{
&writeConfig,
},
}
// We need to set URL's so that metric creation doesn't panic.
writeConfig.URL = &common_config.URL{
URL: &url.URL{
Host: "http://test-storage.com",
},
}
writeConfig.QueueConfig = queueConfig
require.NoError(t, s.ApplyConfig(conf))
hash, err := toHash(writeConfig)
require.NoError(t, err)
qm := s.rws.queues[hash]
qm.SetClient(c)
qm.StoreSeries(series, 0)
qm.Append(samples[:len(samples)/2])
c.waitForExpectedSamples(t)
c.expectSamples(samples[len(samples)/2:], series)
qm.Append(samples[len(samples)/2:])
c.waitForExpectedSamples(t)
}
func TestSampleDeliveryTimeout(t *testing.T) {
// Let's send one less sample than batch size, and wait the timeout duration
n := 9
samples, series := createTimeseries(n, n)
c := NewTestWriteClient()
cfg := config.DefaultQueueConfig
cfg.MaxShards = 1
cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
dir, err := ioutil.TempDir("", "TestSampleDeliveryTimeout")
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric())
m.StoreSeries(series, 0)
m.Start()
defer m.Stop()
// Send the samples twice, waiting for the samples in the meantime.
c.expectSamples(samples, series)
m.Append(samples)
c.waitForExpectedSamples(t)
c.expectSamples(samples, series)
m.Append(samples)
c.waitForExpectedSamples(t)
}
func TestSampleDeliveryOrder(t *testing.T) {
ts := 10
n := config.DefaultQueueConfig.MaxSamplesPerSend * ts
samples := make([]record.RefSample, 0, n)
series := make([]record.RefSeries, 0, n)
for i := 0; i < n; i++ {
name := fmt.Sprintf("test_metric_%d", i%ts)
samples = append(samples, record.RefSample{
Ref: uint64(i),
T: int64(i),
V: float64(i),
})
series = append(series, record.RefSeries{
Ref: uint64(i),
Labels: labels.Labels{labels.Label{Name: "__name__", Value: name}},
})
}
c := NewTestWriteClient()
c.expectSamples(samples, series)
dir, err := ioutil.TempDir("", "TestSampleDeliveryOrder")
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, defaultFlushDeadline, newPool(), nil)
m.StoreSeries(series, 0)
m.Start()
defer m.Stop()
// These should be received by the client.
m.Append(samples)
c.waitForExpectedSamples(t)
}
func TestShutdown(t *testing.T) {
deadline := 1 * time.Second
c := NewTestBlockedWriteClient()
dir, err := ioutil.TempDir("", "TestShutdown")
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, deadline, newPool(), newHighestTimestampMetric())
n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend
samples, series := createTimeseries(n, n)
m.StoreSeries(series, 0)
m.Start()
// Append blocks to guarantee delivery, so we do it in the background.
go func() {
m.Append(samples)
}()
time.Sleep(100 * time.Millisecond)
// Test to ensure that Stop doesn't block.
start := time.Now()
m.Stop()
// The samples will never be delivered, so duration should
// be at least equal to deadline, otherwise the flush deadline
// was not respected.
duration := time.Since(start)
if duration > time.Duration(deadline+(deadline/10)) {
t.Errorf("Took too long to shutdown: %s > %s", duration, deadline)
}
if duration < time.Duration(deadline) {
t.Errorf("Shutdown occurred before flush deadline: %s < %s", duration, deadline)
}
}
func TestSeriesReset(t *testing.T) {
c := NewTestBlockedWriteClient()
deadline := 5 * time.Second
numSegments := 4
numSeries := 25
dir, err := ioutil.TempDir("", "TestSeriesReset")
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, deadline, newPool(), newHighestTimestampMetric())
for i := 0; i < numSegments; i++ {
series := []record.RefSeries{}
for j := 0; j < numSeries; j++ {
series = append(series, record.RefSeries{Ref: uint64((i * 100) + j), Labels: labels.Labels{{Name: "a", Value: "a"}}})
}
m.StoreSeries(series, i)
}
require.Equal(t, numSegments*numSeries, len(m.seriesLabels))
m.SeriesReset(2)
require.Equal(t, numSegments*numSeries/2, len(m.seriesLabels))
}
func TestReshard(t *testing.T) {
size := 10 // Make bigger to find more races.
nSeries := 6
nSamples := config.DefaultQueueConfig.Capacity * size
samples, series := createTimeseries(nSamples, nSeries)
c := NewTestWriteClient()
c.expectSamples(samples, series)
cfg := config.DefaultQueueConfig
cfg.MaxShards = 1
dir, err := ioutil.TempDir("", "TestReshard")
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric())
m.StoreSeries(series, 0)
m.Start()
defer m.Stop()
go func() {
for i := 0; i < len(samples); i += config.DefaultQueueConfig.Capacity {
sent := m.Append(samples[i : i+config.DefaultQueueConfig.Capacity])
require.True(t, sent, "samples not sent")
time.Sleep(100 * time.Millisecond)
}
}()
for i := 1; i < len(samples)/config.DefaultQueueConfig.Capacity; i++ {
m.shards.stop()
m.shards.start(i)
time.Sleep(100 * time.Millisecond)
}
c.waitForExpectedSamples(t)
}
func TestReshardRaceWithStop(t *testing.T) {
c := NewTestWriteClient()
var m *QueueManager
h := sync.Mutex{}
h.Lock()
go func() {
for {
metrics := newQueueManagerMetrics(nil, "", "")
m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric())
m.Start()
h.Unlock()
h.Lock()
m.Stop()
}
}()
for i := 1; i < 100; i++ {
h.Lock()
m.reshardChan <- i
h.Unlock()
}
}
func TestReleaseNoninternedString(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestWriteClient()
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric())
m.Start()
for i := 1; i < 1000; i++ {
m.StoreSeries([]record.RefSeries{
{
Ref: uint64(i),
Labels: labels.Labels{
labels.Label{
Name: "asdf",
Value: fmt.Sprintf("%d", i),
},
},
},
}, 0)
m.SeriesReset(1)
}
metric := client_testutil.ToFloat64(noReferenceReleases)
require.Equal(t, 0.0, metric, "expected there to be no calls to release for strings that were not already interned: %d", int(metric))
}
func TestShouldReshard(t *testing.T) {
type testcase struct {
startingShards int
samplesIn, samplesOut, lastSendTimestamp int64
expectedToReshard bool
}
cases := []testcase{
{
// Resharding shouldn't take place if the last successful send was > batch send deadline*2 seconds ago.
startingShards: 10,
samplesIn: 1000,
samplesOut: 10,
lastSendTimestamp: time.Now().Unix() - int64(3*time.Duration(config.DefaultQueueConfig.BatchSendDeadline)/time.Second),
expectedToReshard: false,
},
{
startingShards: 5,
samplesIn: 1000,
samplesOut: 10,
lastSendTimestamp: time.Now().Unix(),
expectedToReshard: true,
},
}
for _, c := range cases {
metrics := newQueueManagerMetrics(nil, "", "")
client := NewTestWriteClient()
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric())
m.numShards = c.startingShards
m.samplesIn.incr(c.samplesIn)
m.samplesOut.incr(c.samplesOut)
m.lastSendTimestamp.Store(c.lastSendTimestamp)
m.Start()
desiredShards := m.calculateDesiredShards()
shouldReshard := m.shouldReshard(desiredShards)
m.Stop()
require.Equal(t, c.expectedToReshard, shouldReshard)
}
}
func createTimeseries(numSamples, numSeries int) ([]record.RefSample, []record.RefSeries) {
samples := make([]record.RefSample, 0, numSamples)
series := make([]record.RefSeries, 0, numSeries)
for i := 0; i < numSeries; i++ {
name := fmt.Sprintf("test_metric_%d", i)
for j := 0; j < numSamples; j++ {
samples = append(samples, record.RefSample{
Ref: uint64(i),
T: int64(j),
V: float64(i),
})
}
series = append(series, record.RefSeries{
Ref: uint64(i),
Labels: labels.Labels{{Name: "__name__", Value: name}},
})
}
return samples, series
}
func getSeriesNameFromRef(r record.RefSeries) string {
for _, l := range r.Labels {
if l.Name == "__name__" {
return l.Value
}
}
return ""
}
type TestWriteClient struct {
receivedSamples map[string][]prompb.Sample
expectedSamples map[string][]prompb.Sample
withWaitGroup bool
wg sync.WaitGroup
mtx sync.Mutex
buf []byte
}
func NewTestWriteClient() *TestWriteClient {
return &TestWriteClient{
withWaitGroup: true,
receivedSamples: map[string][]prompb.Sample{},
expectedSamples: map[string][]prompb.Sample{},
}
}
func (c *TestWriteClient) expectSamples(ss []record.RefSample, series []record.RefSeries) {
if !c.withWaitGroup {
return
}
c.mtx.Lock()
defer c.mtx.Unlock()
c.expectedSamples = map[string][]prompb.Sample{}
c.receivedSamples = map[string][]prompb.Sample{}
for _, s := range ss {
seriesName := getSeriesNameFromRef(series[s.Ref])
c.expectedSamples[seriesName] = append(c.expectedSamples[seriesName], prompb.Sample{
Timestamp: s.T,
Value: s.V,
})
}
c.wg.Add(len(ss))
}
func (c *TestWriteClient) waitForExpectedSamples(tb testing.TB) {
if !c.withWaitGroup {
return
}
c.wg.Wait()
c.mtx.Lock()
defer c.mtx.Unlock()
for ts, expectedSamples := range c.expectedSamples {
require.Equal(tb, expectedSamples, c.receivedSamples[ts], ts)
}
}
func (c *TestWriteClient) expectSampleCount(numSamples int) {
if !c.withWaitGroup {
return
}
c.mtx.Lock()
defer c.mtx.Unlock()
c.wg.Add(numSamples)
}
func (c *TestWriteClient) waitForExpectedSampleCount() {
if !c.withWaitGroup {
return
}
c.wg.Wait()
}
func (c *TestWriteClient) Store(_ context.Context, req []byte) error {
c.mtx.Lock()
defer c.mtx.Unlock()
// nil buffers are ok for snappy, ignore cast error.
if c.buf != nil {
c.buf = c.buf[:cap(c.buf)]
}
reqBuf, err := snappy.Decode(c.buf, req)
c.buf = reqBuf
if err != nil {
return err
}
var reqProto prompb.WriteRequest
if err := proto.Unmarshal(reqBuf, &reqProto); err != nil {
return err
}
count := 0
for _, ts := range reqProto.Timeseries {
var seriesName string
labels := labelProtosToLabels(ts.Labels)
for _, label := range labels {
if label.Name == "__name__" {
seriesName = label.Value
}
}
for _, sample := range ts.Samples {
count++
c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample)
}
}
if c.withWaitGroup {
c.wg.Add(-count)
}
return nil
}
func (c *TestWriteClient) Name() string {
return "testwriteclient"
}
func (c *TestWriteClient) Endpoint() string {
return "http://test-remote.com/1234"
}
// TestBlockingWriteClient is a queue_manager WriteClient which will block
// on any calls to Store(), until the request's Context is cancelled, at which
// point the `numCalls` property will contain a count of how many times Store()
// was called.
type TestBlockingWriteClient struct {
numCalls atomic.Uint64
}
func NewTestBlockedWriteClient() *TestBlockingWriteClient {
return &TestBlockingWriteClient{}
}
func (c *TestBlockingWriteClient) Store(ctx context.Context, _ []byte) error {
c.numCalls.Inc()
<-ctx.Done()
return nil
}
func (c *TestBlockingWriteClient) NumCalls() uint64 {
return c.numCalls.Load()
}
func (c *TestBlockingWriteClient) Name() string {
return "testblockingwriteclient"
}
func (c *TestBlockingWriteClient) Endpoint() string {
return "http://test-remote-blocking.com/1234"
}
func BenchmarkSampleDelivery(b *testing.B) {
// Let's create an even number of send batches so we don't run into the
// batch timeout case.
n := config.DefaultQueueConfig.MaxSamplesPerSend * 10
samples, series := createTimeseries(n, n)
c := NewTestWriteClient()
cfg := config.DefaultQueueConfig
cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
cfg.MaxShards = 1
dir, err := ioutil.TempDir("", "BenchmarkSampleDelivery")
require.NoError(b, err)
defer os.RemoveAll(dir)
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric())
m.StoreSeries(series, 0)
// These should be received by the client.
m.Start()
defer m.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
c.expectSampleCount(len(samples))
m.Append(samples)
c.waitForExpectedSampleCount()
}
// Do not include shutdown
b.StopTimer()
}
func BenchmarkStartup(b *testing.B) {
dir := os.Getenv("WALDIR")
if dir == "" {
return
}
// Find the second largest segment; we will replay up to this.
// (Second largest as WALWatcher will start tailing the largest).
dirents, err := ioutil.ReadDir(dir)
require.NoError(b, err)
var segments []int
for _, dirent := range dirents {
if i, err := strconv.Atoi(dirent.Name()); err != nil {
segments = append(segments, i)
}
}
sort.Ints(segments)
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
logger = log.With(logger, "caller", log.DefaultCaller)
for n := 0; n < b.N; n++ {
metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestBlockedWriteClient()
m := NewQueueManager(metrics, nil, nil, logger, dir,
newEWMARate(ewmaWeight, shardUpdateDuration),
config.DefaultQueueConfig, nil, nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric())
m.watcher.SetStartTime(timestamp.Time(math.MaxInt64))
m.watcher.MaxSegment = segments[len(segments)-2]
err := m.watcher.Run()
require.NoError(b, err)
}
}
func TestProcessExternalLabels(t *testing.T) {
for _, tc := range []struct {
labels labels.Labels
externalLabels labels.Labels
expected labels.Labels
}{
// Test adding labels at the end.
{
labels: labels.Labels{{Name: "a", Value: "b"}},
externalLabels: labels.Labels{{Name: "c", Value: "d"}},
expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}},
},
// Test adding labels at the beginning.
{
labels: labels.Labels{{Name: "c", Value: "d"}},
externalLabels: labels.Labels{{Name: "a", Value: "b"}},
expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}},
},
// Test we don't override existing labels.
{
labels: labels.Labels{{Name: "a", Value: "b"}},
externalLabels: labels.Labels{{Name: "a", Value: "c"}},
expected: labels.Labels{{Name: "a", Value: "b"}},
},
} {
require.Equal(t, tc.expected, processExternalLabels(tc.labels, tc.externalLabels))
}
}
func TestCalculateDesiredShards(t *testing.T) {
c := NewTestWriteClient()
cfg := config.DefaultQueueConfig
dir, err := ioutil.TempDir("", "TestCalculateDesiredShards")
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric())
// Need to start the queue manager so the proper metrics are initialized.
// However we can stop it right away since we don't need to do any actual
// processing.
m.Start()
m.Stop()
inputRate := int64(50000)
var pendingSamples int64
// Two minute startup, no samples are sent.
startedAt := time.Now().Add(-2 * time.Minute)
// helper function for adding samples.
addSamples := func(s int64, ts time.Duration) {
pendingSamples += s
samplesIn.incr(s)
samplesIn.tick()
m.highestRecvTimestamp.Set(float64(startedAt.Add(ts).Unix()))
}
// helper function for sending samples.
sendSamples := func(s int64, ts time.Duration) {
pendingSamples -= s
m.samplesOut.incr(s)
m.samplesOutDuration.incr(int64(m.numShards) * int64(shardUpdateDuration))
// highest sent is how far back pending samples would be at our input rate.
highestSent := startedAt.Add(ts - time.Duration(pendingSamples/inputRate)*time.Second)
m.metrics.highestSentTimestamp.Set(float64(highestSent.Unix()))
m.lastSendTimestamp.Store(time.Now().Unix())
}
ts := time.Duration(0)
for ; ts < 120*time.Second; ts += shardUpdateDuration {
addSamples(inputRate*int64(shardUpdateDuration/time.Second), ts)
m.numShards = m.calculateDesiredShards()
require.Equal(t, 1, m.numShards)
}
// Assume 100ms per request, or 10 requests per second per shard.
// Shard calculation should never drop below barely keeping up.
minShards := int(inputRate) / cfg.MaxSamplesPerSend / 10
// This test should never go above 200 shards, that would be more resources than needed.
maxShards := 200
for ; ts < 15*time.Minute; ts += shardUpdateDuration {
sin := inputRate * int64(shardUpdateDuration/time.Second)
addSamples(sin, ts)
sout := int64(m.numShards*cfg.MaxSamplesPerSend) * int64(shardUpdateDuration/(100*time.Millisecond))
// You can't send samples that don't exist so cap at the number of pending samples.
if sout > pendingSamples {
sout = pendingSamples
}
sendSamples(sout, ts)
t.Log("desiredShards", m.numShards, "pendingSamples", pendingSamples)
m.numShards = m.calculateDesiredShards()
require.GreaterOrEqual(t, m.numShards, minShards, "Shards are too low. desiredShards=%d, minShards=%d, t_seconds=%d", m.numShards, minShards, ts/time.Second)
require.LessOrEqual(t, m.numShards, maxShards, "Shards are too high. desiredShards=%d, maxShards=%d, t_seconds=%d", m.numShards, maxShards, ts/time.Second)
}
require.Equal(t, int64(0), pendingSamples, "Remote write never caught up, there are still %d pending samples.", pendingSamples)
}
func TestQueueManagerMetrics(t *testing.T) {
reg := prometheus.NewPedanticRegistry()
metrics := newQueueManagerMetrics(reg, "name", "http://localhost:1234")
// Make sure metrics pass linting.
problems, err := client_testutil.GatherAndLint(reg)
require.NoError(t, err)
require.Equal(t, 0, len(problems), "Metric linting problems detected: %v", problems)
// Make sure all metrics were unregistered. A failure here means you need
// unregister a metric in `queueManagerMetrics.unregister()`.
metrics.unregister()
err = client_testutil.GatherAndCompare(reg, strings.NewReader(""))
require.NoError(t, err)
}
|
[
"\"WALDIR\""
] |
[] |
[
"WALDIR"
] |
[]
|
["WALDIR"]
|
go
| 1 | 0 | |
pkg/provider/retriever/ghcr.go
|
package retriever
import (
"context"
"fmt"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/aserto-dev/aserto-idp/pkg/x"
"github.com/containerd/containerd/remotes/docker"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
"github.com/pkg/errors"
"oras.land/oras-go/pkg/content"
"oras.land/oras-go/pkg/oras"
)
var defaultRepoAddress = "ghcr.io/aserto-dev"
type GhcrRetriever struct {
Store *content.File
RemoteStoreLocation string
LocalStoreLocation string
extension string
}
func NewGhcrRetriever() *GhcrRetriever {
opSys := runtime.GOOS
ext := ""
if opSys == "windows" {
ext = ".exe"
}
repoAdrress := os.Getenv("IDP_PLUGIN_REPO_ADDRESS")
if repoAdrress == "" {
repoAdrress = defaultRepoAddress
}
return &GhcrRetriever{
extension: ext,
RemoteStoreLocation: fmt.Sprintf("%s/aserto-idp-plugins_%s_%s", repoAdrress, opSys, runtime.GOARCH),
}
}
func (o *GhcrRetriever) Connect() error {
homeDir, err := os.UserHomeDir()
if err != nil {
return err
}
o.LocalStoreLocation = filepath.Join(homeDir, ".aserto", "idpplugins")
err = os.MkdirAll(o.LocalStoreLocation, 0777)
if err != nil {
return err
}
file := content.NewFile(o.LocalStoreLocation)
o.Store = file
return nil
}
func (o *GhcrRetriever) Disconnect() {
}
func (o *GhcrRetriever) List() ([]string, error) {
repoName := o.RemoteStoreLocation
repo, err := name.NewRepository(repoName)
if err != nil {
return nil, errors.Wrapf(err, "invalid repo name [%s]", repoName)
}
tags, err := remote.List(repo)
if err != nil {
if tErr, ok := err.(*transport.Error); ok {
switch tErr.StatusCode {
case http.StatusUnauthorized:
return nil, errors.Wrap(err, "authentication to docker registry failed")
case http.StatusNotFound:
return []string{}, nil
}
}
return nil, errors.Wrap(err, "failed to list tags from registry")
}
return tags, nil
}
func (o *GhcrRetriever) Download(pluginName, version string) error {
vers := strings.Split(version, ".")
if vers[0] != IdpMajVersion() {
return errors.New("incompatible version was provided for download; abort...") //nolint : revive : tbd
}
plgName := x.PluginPrefix + pluginName + o.extension
destFilePath := filepath.Join(o.LocalStoreLocation, plgName)
_, err := os.Stat(destFilePath)
if err == nil {
er := os.Remove(destFilePath)
if er != nil {
return errors.Wrap(err, "failed to remove old binary file")
}
}
ref := fmt.Sprintf("%s:%s-%s", o.RemoteStoreLocation, pluginName, version)
err = o.pull(ref)
if err != nil {
return err
}
err = os.Chmod(destFilePath, 0777)
if err != nil {
return errors.Wrapf(err, "failed to provide rights to output file [%s]", destFilePath)
}
return nil
}
func (o *GhcrRetriever) pull(ref string) error {
resolver := docker.NewResolver(docker.ResolverOptions{
Hosts: func(s string) ([]docker.RegistryHost, error) {
client := &http.Client{}
return []docker.RegistryHost{
{
Host: s,
Scheme: "https",
Capabilities: docker.HostCapabilityPull | docker.HostCapabilityResolve | docker.HostCapabilityPush,
Client: client,
Path: "/v2",
Authorizer: docker.NewDockerAuthorizer(
docker.WithAuthClient(client)),
},
}, nil
},
})
allowedMediaTypes := []string{"application/vnd.unknown.layer.v1+txt", "application/vnd.unknown.config.v1+json"}
opts := []oras.CopyOpt{
oras.WithAllowedMediaTypes(allowedMediaTypes),
oras.WithAdditionalCachedMediaTypes(allowedMediaTypes...),
}
_, err := oras.Copy(context.Background(), resolver, ref, o.Store, "", opts...)
if err != nil {
return errors.Wrapf(err, "download for '%s' failed", ref)
}
return nil
}
|
[
"\"IDP_PLUGIN_REPO_ADDRESS\""
] |
[] |
[
"IDP_PLUGIN_REPO_ADDRESS"
] |
[]
|
["IDP_PLUGIN_REPO_ADDRESS"]
|
go
| 1 | 0 | |
train/t2m2/run.py
|
import random
from random import shuffle
import numpy as np
from datetime import datetime
import time
import queue
import threading
import logging
from PIL import Image
import itertools
import re
import os
import glob
import shutil
import sys
import copy
import h5py
from typing import Any, List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.parallel.data_parallel import data_parallel
import torch.utils.checkpoint as cp
from collections import OrderedDict
from torch import Tensor
target_city = 'ANTWERP'
other_city_list = ['ANTWERP', 'BANGKOK', 'BARCELONA', 'MOSCOW', 'BERLIN', 'CHICAGO', 'ISTANBUL', 'MELBOURNE', ]
input_train_data_folder_path = '../../0_data/' + target_city + '/' + 'training'
input_static_data_path = '../../0_data/' + target_city + '/' + target_city + "_static.h5"
out_dir = 'output'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
SEED = int(time.time())
num_train_file = 180
num_frame_per_day = 288
num_frame_before = 12
num_frame_sequence = 24
num_frame_out = 6
num_sequence_per_day = num_frame_per_day - num_frame_sequence + 1
height=495
width =436
num_channel=8
num_channel_out=8
num_channel_static = 9
visual_input_channels=105
visual_output_channels=48
vector_input_channels=1
num_epoch_to_train = 100000000
save_per_iteration = 5000
global_step_start = 0
initial_checkpoint = None
initial_checkpoint_optimizer = None
LEARNING_RATE = 3e-4
batch_size = 2
batch_size_val = 1
num_thread=2
num_groups = 8
EPS = 1e-12
np.set_printoptions(precision=8)
NUM_INPUT_CHANNEL = visual_input_channels
NUM_OUTPUT_CHANNEL = visual_output_channels
def get_data_filepath_list_by_year(input_data_folder_path):
data_filepath_list_1 = []
data_filepath_list_2 = []
for filename in os.listdir(input_data_folder_path):
if filename.split('.')[-1] != 'h5':
continue
if filename.startswith('2019'):
data_filepath_list_1.append(os.path.join(input_data_folder_path, filename))
elif filename.startswith('2020'):
data_filepath_list_2.append(os.path.join(input_data_folder_path, filename))
else:
print('Error - Unknown data year\t', filename)
exit(-1)
data_filepath_list_1 = sorted(data_filepath_list_1)
data_filepath_list_2 = sorted(data_filepath_list_2)
return data_filepath_list_1, data_filepath_list_2
class Deconv3x3Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Deconv3x3Block, self).__init__()
self.add_module('deconv', nn.ConvTranspose2d(in_size, h_size, kernel_size=3, stride=2, padding=1, bias=True))
self.add_module('elu', nn.ELU(inplace=True))
self.add_module('norm', nn.GroupNorm(num_groups=num_groups, num_channels=h_size))
class Conv1x1Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Conv1x1Block, self).__init__()
self.add_module('conv', nn.Conv2d(in_size, h_size, kernel_size=1, stride=1, padding=0, bias=True))
class Conv3x3Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Conv3x3Block, self).__init__()
self.add_module('conv', nn.Conv2d(in_size, h_size, kernel_size=3, stride=1, padding=1, bias=True))
self.add_module('elu', nn.ELU(inplace=True))
self.add_module('norm', nn.GroupNorm(num_groups=num_groups, num_channels=h_size))
class AvgBlock(nn.Sequential):
def __init__(self,
kernel_size: int,
stride: int,
padding: int) -> None:
super(AvgBlock, self).__init__()
self.add_module('pool', nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
class MaxBlock(nn.Sequential):
def __init__(self,
kernel_size: int,
stride: int,
padding: int) -> None:
super(MaxBlock, self).__init__()
self.add_module('pool', nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
class DownBlock(nn.Module):
def __init__(self,
in_size: int,
h_size: int,
out_size: int,
do_pool: int = True):
super(DownBlock, self).__init__()
self.do_pool = do_pool
in_size_cum = in_size
self.conv_1 = Conv3x3Block( in_size=in_size_cum, h_size=h_size)
in_size_cum += h_size
self.conv_3 = Conv3x3Block( in_size=in_size_cum, h_size=h_size)
in_size_cum += h_size
self.conv_2 = Conv1x1Block( in_size=in_size_cum, h_size=out_size)
def forward(self, x):
batch_size = len(x)
if self.do_pool:
x = F.interpolate(x, scale_factor=0.7, mode='bilinear', align_corners=False, recompute_scale_factor=None)
x_list = []
x_list.append(x)
x = self.conv_1(x)
x_list.append(x)
x = torch.cat(x_list, 1)
x = self.conv_3(x)
x_list.append(x)
x = torch.cat(x_list, 1)
x = self.conv_2(x)
return x
def cuda(self, ):
super(DownBlock, self).cuda()
self.conv_1.cuda()
self.conv_3.cuda()
self.conv_2.cuda()
return self
class UpBlock(nn.Module):
def __init__(self,
in_size: int,
in_size_2: int,
h_size: int,
out_size: int,
):
super(UpBlock, self).__init__()
self.deconv = Conv3x3Block( in_size=in_size, h_size=h_size)
self.out_conv = Conv3x3Block( in_size=h_size + in_size_2, h_size=out_size)
def forward(self, x1, x2):
x1 = self.deconv(x1)
x1 = F.interpolate(x1, size=x2.size()[2:4], scale_factor=None, mode='bilinear', align_corners=False, recompute_scale_factor=None)
x = torch.cat([x2, x1], dim=1)
return self.out_conv(x)
def cuda(self, ):
super(UpBlock, self).cuda()
self.deconv.cuda()
self.out_conv.cuda()
return self
class NetA(nn.Module):
def __init__(self,):
super(NetA, self).__init__()
self.block0 = DownBlock(in_size=NUM_INPUT_CHANNEL, h_size=128, out_size=128, do_pool=False)
self.block1 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block2 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block3 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block4 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block5 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block6 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block7 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block20 = Conv3x3Block(in_size=128, h_size=128)
self.block16 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block15 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block14 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block13 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block12 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block11 = UpBlock(in_size=128, in_size_2=128 , h_size=128, out_size=128,)
self.block10 = UpBlock(in_size=128, in_size_2=128 , h_size=128, out_size=128,)
self.out_conv = nn.Sequential(nn.Conv2d(128*1, NUM_OUTPUT_CHANNEL, kernel_size=3, stride=1, padding=1, bias=True))
if 1:
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
batch_size = len(x)
x0 = self.block0(x)
x1 = self.block1(x0)
x2 = self.block2(x1)
x3 = self.block3(x2)
x4 = self.block4(x3)
x5 = self.block5(x4)
x6 = self.block6(x5)
x7 = self.block7(x6)
x = self.block20(x7)
x = self.block16(x, x6)
x = self.block15(x, x5)
x = self.block14(x, x4)
x = self.block13(x, x3)
x = self.block12(x, x2)
x = self.block11(x, x1)
x = self.block10(x, x0)
x = self.out_conv(x)
x = torch.sigmoid(x)
return x
def cuda(self, ):
super(NetA, self).cuda()
self.block0.cuda()
self.block1.cuda()
self.block2.cuda()
self.block3.cuda()
self.block4.cuda()
self.block5.cuda()
self.block6.cuda()
self.block7.cuda()
self.block20.cuda()
self.block16.cuda()
self.block15.cuda()
self.block14.cuda()
self.block13.cuda()
self.block12.cuda()
self.block11.cuda()
self.block10.cuda()
self.out_conv.cuda()
return self
if __name__ == '__main__':
if initial_checkpoint == None:
assert global_step_start == 0
else:
assert global_step_start > 0
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
try:
if not os.path.exists(out_dir):
os.makedirs(out_dir)
except Exception:
print('out_dir not made')
net = NetA().cuda()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),lr=LEARNING_RATE)
loss_func2 = nn.MSELoss()
if initial_checkpoint is not None:
print('Loading ', initial_checkpoint)
state_dict = torch.load(initial_checkpoint, map_location=lambda storage, loc: storage)
net.load_state_dict(state_dict, strict=True)
optimizer_state_dict_ = torch.load(initial_checkpoint_optimizer, map_location=lambda storage, loc: storage)
optimizer_state_dict = optimizer_state_dict_['optimizer']
optimizer.load_state_dict(optimizer_state_dict)
static_data = None
if 1:
file_path = input_static_data_path
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = np.asarray(fr[a_group_key], np.uint8)
static_data = data[np.newaxis,:,:,:]
static_data = static_data.astype(np.float32)
static_data = static_data / 255.0
static_data_list = []
if 1:
for other_city in other_city_list:
file_path = '../../0_data/' + other_city + '/' + other_city + "_static.h5"
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = np.asarray(fr[a_group_key], np.uint8)
static_data_ = data[np.newaxis,:,:,:]
static_data_ = static_data_.astype(np.float32)
static_data_ = static_data_ / 255.0
static_data_list.append(static_data_)
train_static_data_index_list = []
train_data_filepath_list, val_data_filepath_list = get_data_filepath_list_by_year(input_train_data_folder_path)
target_city_i = other_city_list.index(target_city)
for _ in range(len(train_data_filepath_list)):
train_static_data_index_list.append(target_city_i)
for o, other_city in enumerate(other_city_list):
if o == target_city_i:
continue
train_data_filepath_list_one, _ = get_data_filepath_list_by_year('../../0_data/' + other_city + '/' + 'training')
for _ in range(len(train_data_filepath_list_one)):
train_static_data_index_list.append(o)
train_data_filepath_list += train_data_filepath_list_one
train_set = []
for i in range(len(train_data_filepath_list)):
for j in range(num_sequence_per_day):
train_set.append( (i,j) )
num_iteration_per_epoch = int(len(train_set) / batch_size)
print('num_iteration_per_epoch:', num_iteration_per_epoch)
assert num_iteration_per_epoch > 10
val_set = []
val_skip_k = 0
val_skip_ratio = 5
for i in range(len(val_data_filepath_list)):
for j in range(0, num_sequence_per_day, num_frame_sequence):
val_skip_k += 1
if val_skip_k % val_skip_ratio == 0:
val_set.append( (i,j) )
num_val_iteration_per_epoch = int(len(val_set) / batch_size_val)
print('num_val_iteration_per_epoch:', num_val_iteration_per_epoch)
train_input_queue = queue.Queue()
train_output_queue = queue.Queue()
def load_train_multithread():
while True:
if train_input_queue.empty() or train_output_queue.qsize() > 8:
time.sleep(0.1)
continue
i_j_list = train_input_queue.get()
train_orig_data_batch_list = []
train_data_batch_list = []
train_data_mask_list = []
train_stat_batch_list = []
train_static_data_batch_list = []
for train_i_j in i_j_list:
(i,j) = train_i_j
file_path = train_data_filepath_list[i]
train_static_data_batch_list.append(static_data_list[train_static_data_index_list[i]])
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = fr[a_group_key]
train_data_batch_list.append(data[j:j+num_frame_sequence,:,:,:][np.newaxis,:,:,:,:])
train_data_batch = np.concatenate(train_data_batch_list, axis=0)
train_static_data_batch = np.concatenate(train_static_data_batch_list,axis=0)
input_data = train_data_batch[:,:num_frame_before ,:,:,:]
orig_label = train_data_batch[:, num_frame_before:,:,:,:num_channel_out]
true_label = np.concatenate((orig_label[:, 0:3, :,:,:], orig_label[:, 5::3,:,:,:] ), axis=1)
input_data = input_data.astype(np.float32)
true_label = true_label.astype(np.float32)
input_data = input_data / 255.0
true_label = true_label / 255.0
flip_dr = np.random.randint(0,2)
if flip_dr == 1:
input_data_flipped = copy.deepcopy(input_data)
input_data_flipped[:,:,:,:,4:8] = input_data[:,:,:,:,0:4]
input_data_flipped[:,:,:,:,0:4] = input_data[:,:,:,:,4:8]
input_data = input_data_flipped[:,:,::-1,::-1,:]
true_label_flipped = copy.deepcopy(true_label)
true_label_flipped[:,:,:,:,4:8] = true_label[:,:,:,:,0:4]
true_label_flipped[:,:,:,:,0:4] = true_label[:,:,:,:,4:8]
true_label = true_label_flipped[:,:,::-1,::-1,:]
train_static_data_batch_flipped = copy.deepcopy(train_static_data_batch)
train_static_data_batch_flipped[:,5:9,:,:] = train_static_data_batch[:,1:5,:,:]
train_static_data_batch_flipped[:,1:5,:,:] = train_static_data_batch[:,5:9,:,:]
train_static_data_batch = train_static_data_batch_flipped[:,:,::-1,::-1]
input_data = np.moveaxis(input_data, -1, 2).reshape((batch_size, -1, height, width))
true_label = np.moveaxis(true_label, -1, 2).reshape((batch_size, -1, height, width))
input_data = np.concatenate((input_data, train_static_data_batch), axis=1)
train_output_queue.put( (input_data, true_label) )
thread_list = []
assert num_thread > 0
for i in range(num_thread):
t = threading.Thread(target=load_train_multithread)
t.start()
net.train()
sum_train_loss = 0.0
sum_train_iter = 0
global_step = global_step_start
for epoch in range(num_epoch_to_train):
np.random.shuffle(train_set)
for a in range(num_iteration_per_epoch):
i_j_list = []
for train_i_j in train_set[a * batch_size : (a+1) * batch_size]:
i_j_list.append(train_i_j)
train_input_queue.put(i_j_list)
for a in range(num_iteration_per_epoch):
if global_step % save_per_iteration == 0:
net.eval()
state_dict_0 = copy.deepcopy(net.state_dict())
torch.save(state_dict_0, out_dir + '/%09d_model.pth' % (global_step))
torch.save(
{
'optimizer': optimizer.state_dict(),
'global_step': global_step,
'epoch': epoch,
},
out_dir + '/%09d_optimizer.pth' % (global_step))
eval_loss_list = list()
eval_loss_list = [0]
with torch.no_grad():
for a in range(num_val_iteration_per_epoch):
val_orig_data_batch_list = []
val_data_batch_list = []
val_data_mask_list = []
val_stat_batch_list = []
for i_j in val_set[a * batch_size_val : (a+1) * batch_size_val]:
(i,j) = i_j
file_path = val_data_filepath_list[i]
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = fr[a_group_key]
val_data_batch_list.append(data[j:j+num_frame_sequence,:,:,:][np.newaxis,:,:,:,:])
val_data_batch = np.concatenate(val_data_batch_list, axis=0)
input_data = val_data_batch[:,:num_frame_before ,:,:,:]
orig_label = val_data_batch[:, num_frame_before:,:,:,:num_channel_out]
true_label = np.concatenate((orig_label[:, 0:3, :,:,:], orig_label[:, 5::3,:,:,:]), axis=1)
input_data = input_data.astype(np.float32)
true_label = true_label.astype(np.float32)
input_data = input_data / 255.0
true_label = true_label / 255.0
input_data = np.moveaxis(input_data, -1, 2).reshape((batch_size_val, -1, height, width))
true_label = np.moveaxis(true_label, -1, 2).reshape((batch_size_val, -1, height, width))
input_data = np.concatenate((input_data,np.repeat(static_data, batch_size_val, axis=0)), axis=1)
input = torch.from_numpy(input_data).float().cuda()
target = torch.from_numpy(true_label).float().cuda()
prediction = net(input)
loss = loss_func2(prediction, target)
eval_loss_list.append(loss.item())
avg_train_loss = sum_train_loss / (float(sum_train_iter)+EPS)
sum_train_loss = 0.0
sum_train_iter = 0
print('global_step:', global_step, '\t', 'epoch:', epoch, \
'\t', 'train_loss:', avg_train_loss, \
'\t', 'eval_loss:', np.mean(eval_loss_list), \
'\t', datetime.now(), )
debug_out = open('res.txt', 'a')
debug_out.write(str(global_step))
debug_out.write('\t')
debug_out.write('%.8f' % float(avg_train_loss))
debug_out.write('\t')
debug_out.write('%.8f' % float(np.mean(eval_loss_list)))
debug_out.write('\n')
debug_out.close()
net.train()
while train_output_queue.empty():
time.sleep(0.1)
(input_data, true_label) = train_output_queue.get()
optimizer.zero_grad()
input = torch.from_numpy(input_data).float().cuda()
target = torch.from_numpy(true_label).float().cuda()
prediction = net(input)
loss = loss_func2(prediction, target)
sum_train_iter += 1
sum_train_loss += loss.item()
loss.backward()
optimizer.step()
global_step += 1
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
emtmp.py
|
import smtplib
import imghdr
import os
from email.message import EmailMessage
MY_EMAIL = os.environ.get('MY_EMAIL')
def send_email(to):
message = EmailMessage()
message['subject'] = "Swapnil Sagar CyberGeeks"
message['from'] = MY_EMAIL
message['to'] = "[email protected]"
message.set_content('Welcome to Cybergeek')
html_message = open('template.html').read()
message.add_alternative(html_message, subtype='html')
# To Add any ATTACHMENT
# with open('blurb_01.png','rb')as attach_file:
# image_name = attach_file.name
# image_type = imghdr.what(attach_file.name)
# image_data = attach_file.read()
# message.add_attachment(image_data, maintype="image", subtype = image_type, filename =image_name)
with smtplib.SMTP_SSL("smtp.gmail.com", 465)as smtp:
smtp.login(os.environ.get('MY_EMAIL'), os.environ.get('MY_PASS'))
smtp.send_message(message)
send_email(MY_EMAIL)
|
[] |
[] |
[
"MY_PASS",
"MY_EMAIL"
] |
[]
|
["MY_PASS", "MY_EMAIL"]
|
python
| 2 | 0 | |
drive/v3/drive-gen.go
|
// Copyright 2020 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated file. DO NOT EDIT.
// Package drive provides access to the Drive API.
//
// For product documentation, see: https://developers.google.com/drive/
//
// Creating a client
//
// Usage example:
//
// import "google.golang.org/api/drive/v3"
// ...
// ctx := context.Background()
// driveService, err := drive.NewService(ctx)
//
// In this example, Google Application Default Credentials are used for authentication.
//
// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
//
// Other authentication options
//
// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes:
//
// driveService, err := drive.NewService(ctx, option.WithScopes(drive.DriveScriptsScope))
//
// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
//
// driveService, err := drive.NewService(ctx, option.WithAPIKey("AIza..."))
//
// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
//
// config := &oauth2.Config{...}
// // ...
// token, err := config.Exchange(ctx, ...)
// driveService, err := drive.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
//
// See https://godoc.org/google.golang.org/api/option/ for details on options.
package drive // import "google.golang.org/api/drive/v3"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
googleapi "google.golang.org/api/googleapi"
gensupport "google.golang.org/api/internal/gensupport"
option "google.golang.org/api/option"
internaloption "google.golang.org/api/option/internaloption"
htransport "google.golang.org/api/transport/http"
)
// Always reference these packages, just in case the auto-generated code
// below doesn't.
var _ = bytes.NewBuffer
var _ = strconv.Itoa
var _ = fmt.Sprintf
var _ = json.NewDecoder
var _ = io.Copy
var _ = url.Parse
var _ = gensupport.MarshalJSON
var _ = googleapi.Version
var _ = errors.New
var _ = strings.Replace
var _ = context.Canceled
var _ = internaloption.WithDefaultEndpoint
const apiId = "drive:v3"
const apiName = "drive"
const apiVersion = "v3"
const basePath = "https://www.googleapis.com/drive/v3/"
// OAuth2 scopes used by this API.
const (
// See, edit, create, and delete all of your Google Drive files
DriveScope = "https://www.googleapis.com/auth/drive"
// View and manage its own configuration data in your Google Drive
DriveAppdataScope = "https://www.googleapis.com/auth/drive.appdata"
// View and manage Google Drive files and folders that you have opened
// or created with this app
DriveFileScope = "https://www.googleapis.com/auth/drive.file"
// View and manage metadata of files in your Google Drive
DriveMetadataScope = "https://www.googleapis.com/auth/drive.metadata"
// View metadata for files in your Google Drive
DriveMetadataReadonlyScope = "https://www.googleapis.com/auth/drive.metadata.readonly"
// View the photos, videos and albums in your Google Photos
DrivePhotosReadonlyScope = "https://www.googleapis.com/auth/drive.photos.readonly"
// See and download all your Google Drive files
DriveReadonlyScope = "https://www.googleapis.com/auth/drive.readonly"
// Modify your Google Apps Script scripts' behavior
DriveScriptsScope = "https://www.googleapis.com/auth/drive.scripts"
)
// NewService creates a new Service.
func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) {
scopesOption := option.WithScopes(
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/drive.appdata",
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive.metadata",
"https://www.googleapis.com/auth/drive.metadata.readonly",
"https://www.googleapis.com/auth/drive.photos.readonly",
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/drive.scripts",
)
// NOTE: prepend, so we don't override user-specified scopes.
opts = append([]option.ClientOption{scopesOption}, opts...)
opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
client, endpoint, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
s, err := New(client)
if err != nil {
return nil, err
}
if endpoint != "" {
s.BasePath = endpoint
}
return s, nil
}
// New creates a new Service. It uses the provided http.Client for requests.
//
// Deprecated: please use NewService instead.
// To provide a custom HTTP client, use option.WithHTTPClient.
// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
func New(client *http.Client) (*Service, error) {
if client == nil {
return nil, errors.New("client is nil")
}
s := &Service{client: client, BasePath: basePath}
s.About = NewAboutService(s)
s.Changes = NewChangesService(s)
s.Channels = NewChannelsService(s)
s.Comments = NewCommentsService(s)
s.Drives = NewDrivesService(s)
s.Files = NewFilesService(s)
s.Permissions = NewPermissionsService(s)
s.Replies = NewRepliesService(s)
s.Revisions = NewRevisionsService(s)
s.Teamdrives = NewTeamdrivesService(s)
return s, nil
}
type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
About *AboutService
Changes *ChangesService
Channels *ChannelsService
Comments *CommentsService
Drives *DrivesService
Files *FilesService
Permissions *PermissionsService
Replies *RepliesService
Revisions *RevisionsService
Teamdrives *TeamdrivesService
}
func (s *Service) userAgent() string {
if s.UserAgent == "" {
return googleapi.UserAgent
}
return googleapi.UserAgent + " " + s.UserAgent
}
func NewAboutService(s *Service) *AboutService {
rs := &AboutService{s: s}
return rs
}
type AboutService struct {
s *Service
}
func NewChangesService(s *Service) *ChangesService {
rs := &ChangesService{s: s}
return rs
}
type ChangesService struct {
s *Service
}
func NewChannelsService(s *Service) *ChannelsService {
rs := &ChannelsService{s: s}
return rs
}
type ChannelsService struct {
s *Service
}
func NewCommentsService(s *Service) *CommentsService {
rs := &CommentsService{s: s}
return rs
}
type CommentsService struct {
s *Service
}
func NewDrivesService(s *Service) *DrivesService {
rs := &DrivesService{s: s}
return rs
}
type DrivesService struct {
s *Service
}
func NewFilesService(s *Service) *FilesService {
rs := &FilesService{s: s}
return rs
}
type FilesService struct {
s *Service
}
func NewPermissionsService(s *Service) *PermissionsService {
rs := &PermissionsService{s: s}
return rs
}
type PermissionsService struct {
s *Service
}
func NewRepliesService(s *Service) *RepliesService {
rs := &RepliesService{s: s}
return rs
}
type RepliesService struct {
s *Service
}
func NewRevisionsService(s *Service) *RevisionsService {
rs := &RevisionsService{s: s}
return rs
}
type RevisionsService struct {
s *Service
}
func NewTeamdrivesService(s *Service) *TeamdrivesService {
rs := &TeamdrivesService{s: s}
return rs
}
type TeamdrivesService struct {
s *Service
}
// About: Information about the user, the user's Drive, and system
// capabilities.
type About struct {
// AppInstalled: Whether the user has installed the requesting app.
AppInstalled bool `json:"appInstalled,omitempty"`
// CanCreateDrives: Whether the user can create shared drives.
CanCreateDrives bool `json:"canCreateDrives,omitempty"`
// CanCreateTeamDrives: Deprecated - use canCreateDrives instead.
CanCreateTeamDrives bool `json:"canCreateTeamDrives,omitempty"`
// DriveThemes: A list of themes that are supported for shared drives.
DriveThemes []*AboutDriveThemes `json:"driveThemes,omitempty"`
// ExportFormats: A map of source MIME type to possible targets for all
// supported exports.
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
// FolderColorPalette: The currently supported folder colors as RGB hex
// strings.
FolderColorPalette []string `json:"folderColorPalette,omitempty"`
// ImportFormats: A map of source MIME type to possible targets for all
// supported imports.
ImportFormats map[string][]string `json:"importFormats,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#about".
Kind string `json:"kind,omitempty"`
// MaxImportSizes: A map of maximum import sizes by MIME type, in bytes.
MaxImportSizes map[string]string `json:"maxImportSizes,omitempty"`
// MaxUploadSize: The maximum upload size in bytes.
MaxUploadSize int64 `json:"maxUploadSize,omitempty,string"`
// StorageQuota: The user's storage quota limits and usage. All fields
// are measured in bytes.
StorageQuota *AboutStorageQuota `json:"storageQuota,omitempty"`
// TeamDriveThemes: Deprecated - use driveThemes instead.
TeamDriveThemes []*AboutTeamDriveThemes `json:"teamDriveThemes,omitempty"`
// User: The authenticated user.
User *User `json:"user,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "AppInstalled") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AppInstalled") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *About) MarshalJSON() ([]byte, error) {
type NoMethod About
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
type AboutDriveThemes struct {
// BackgroundImageLink: A link to this theme's background image.
BackgroundImageLink string `json:"backgroundImageLink,omitempty"`
// ColorRgb: The color of this theme as an RGB hex string.
ColorRgb string `json:"colorRgb,omitempty"`
// Id: The ID of the theme.
Id string `json:"id,omitempty"`
// ForceSendFields is a list of field names (e.g. "BackgroundImageLink")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "BackgroundImageLink") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *AboutDriveThemes) MarshalJSON() ([]byte, error) {
type NoMethod AboutDriveThemes
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AboutStorageQuota: The user's storage quota limits and usage. All
// fields are measured in bytes.
type AboutStorageQuota struct {
// Limit: The usage limit, if applicable. This will not be present if
// the user has unlimited storage.
Limit int64 `json:"limit,omitempty,string"`
// Usage: The total usage across all services.
Usage int64 `json:"usage,omitempty,string"`
// UsageInDrive: The usage by all files in Google Drive.
UsageInDrive int64 `json:"usageInDrive,omitempty,string"`
// UsageInDriveTrash: The usage by trashed files in Google Drive.
UsageInDriveTrash int64 `json:"usageInDriveTrash,omitempty,string"`
// ForceSendFields is a list of field names (e.g. "Limit") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Limit") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *AboutStorageQuota) MarshalJSON() ([]byte, error) {
type NoMethod AboutStorageQuota
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
type AboutTeamDriveThemes struct {
// BackgroundImageLink: Deprecated - use driveThemes/backgroundImageLink
// instead.
BackgroundImageLink string `json:"backgroundImageLink,omitempty"`
// ColorRgb: Deprecated - use driveThemes/colorRgb instead.
ColorRgb string `json:"colorRgb,omitempty"`
// Id: Deprecated - use driveThemes/id instead.
Id string `json:"id,omitempty"`
// ForceSendFields is a list of field names (e.g. "BackgroundImageLink")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "BackgroundImageLink") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *AboutTeamDriveThemes) MarshalJSON() ([]byte, error) {
type NoMethod AboutTeamDriveThemes
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Change: A change to a file or shared drive.
type Change struct {
// ChangeType: The type of the change. Possible values are file and
// drive.
ChangeType string `json:"changeType,omitempty"`
// Drive: The updated state of the shared drive. Present if the
// changeType is drive, the user is still a member of the shared drive,
// and the shared drive has not been deleted.
Drive *Drive `json:"drive,omitempty"`
// DriveId: The ID of the shared drive associated with this change.
DriveId string `json:"driveId,omitempty"`
// File: The updated state of the file. Present if the type is file and
// the file has not been removed from this list of changes.
File *File `json:"file,omitempty"`
// FileId: The ID of the file which has changed.
FileId string `json:"fileId,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#change".
Kind string `json:"kind,omitempty"`
// Removed: Whether the file or shared drive has been removed from this
// list of changes, for example by deletion or loss of access.
Removed bool `json:"removed,omitempty"`
// TeamDrive: Deprecated - use drive instead.
TeamDrive *TeamDrive `json:"teamDrive,omitempty"`
// TeamDriveId: Deprecated - use driveId instead.
TeamDriveId string `json:"teamDriveId,omitempty"`
// Time: The time of this change (RFC 3339 date-time).
Time string `json:"time,omitempty"`
// Type: Deprecated - use changeType instead.
Type string `json:"type,omitempty"`
// ForceSendFields is a list of field names (e.g. "ChangeType") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ChangeType") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Change) MarshalJSON() ([]byte, error) {
type NoMethod Change
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ChangeList: A list of changes for a user.
type ChangeList struct {
// Changes: The list of changes. If nextPageToken is populated, then
// this list may be incomplete and an additional page of results should
// be fetched.
Changes []*Change `json:"changes,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#changeList".
Kind string `json:"kind,omitempty"`
// NewStartPageToken: The starting page token for future changes. This
// will be present only if the end of the current changes list has been
// reached.
NewStartPageToken string `json:"newStartPageToken,omitempty"`
// NextPageToken: The page token for the next page of changes. This will
// be absent if the end of the changes list has been reached. If the
// token is rejected for any reason, it should be discarded, and
// pagination should be restarted from the first page of results.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Changes") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Changes") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ChangeList) MarshalJSON() ([]byte, error) {
type NoMethod ChangeList
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Channel: An notification channel used to watch for resource changes.
type Channel struct {
// Address: The address where notifications are delivered for this
// channel.
Address string `json:"address,omitempty"`
// Expiration: Date and time of notification channel expiration,
// expressed as a Unix timestamp, in milliseconds. Optional.
Expiration int64 `json:"expiration,omitempty,string"`
// Id: A UUID or similar unique string that identifies this channel.
Id string `json:"id,omitempty"`
// Kind: Identifies this as a notification channel used to watch for
// changes to a resource, which is "api#channel".
Kind string `json:"kind,omitempty"`
// Params: Additional parameters controlling delivery channel behavior.
// Optional.
Params map[string]string `json:"params,omitempty"`
// Payload: A Boolean value to indicate whether payload is wanted.
// Optional.
Payload bool `json:"payload,omitempty"`
// ResourceId: An opaque ID that identifies the resource being watched
// on this channel. Stable across different API versions.
ResourceId string `json:"resourceId,omitempty"`
// ResourceUri: A version-specific identifier for the watched resource.
ResourceUri string `json:"resourceUri,omitempty"`
// Token: An arbitrary string delivered to the target address with each
// notification delivered over this channel. Optional.
Token string `json:"token,omitempty"`
// Type: The type of delivery mechanism used for this channel.
Type string `json:"type,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Address") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Address") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Channel) MarshalJSON() ([]byte, error) {
type NoMethod Channel
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Comment: A comment on a file.
type Comment struct {
// Anchor: A region of the document represented as a JSON string. See
// anchor documentation for details on how to define and interpret
// anchor properties.
Anchor string `json:"anchor,omitempty"`
// Author: The author of the comment. The author's email address and
// permission ID will not be populated.
Author *User `json:"author,omitempty"`
// Content: The plain text content of the comment. This field is used
// for setting the content, while htmlContent should be displayed.
Content string `json:"content,omitempty"`
// CreatedTime: The time at which the comment was created (RFC 3339
// date-time).
CreatedTime string `json:"createdTime,omitempty"`
// Deleted: Whether the comment has been deleted. A deleted comment has
// no content.
Deleted bool `json:"deleted,omitempty"`
// HtmlContent: The content of the comment with HTML formatting.
HtmlContent string `json:"htmlContent,omitempty"`
// Id: The ID of the comment.
Id string `json:"id,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#comment".
Kind string `json:"kind,omitempty"`
// ModifiedTime: The last time the comment or any of its replies was
// modified (RFC 3339 date-time).
ModifiedTime string `json:"modifiedTime,omitempty"`
// QuotedFileContent: The file content to which the comment refers,
// typically within the anchor region. For a text file, for example,
// this would be the text at the location of the comment.
QuotedFileContent *CommentQuotedFileContent `json:"quotedFileContent,omitempty"`
// Replies: The full list of replies to the comment in chronological
// order.
Replies []*Reply `json:"replies,omitempty"`
// Resolved: Whether the comment has been resolved by one of its
// replies.
Resolved bool `json:"resolved,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Anchor") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Anchor") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Comment) MarshalJSON() ([]byte, error) {
type NoMethod Comment
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CommentQuotedFileContent: The file content to which the comment
// refers, typically within the anchor region. For a text file, for
// example, this would be the text at the location of the comment.
type CommentQuotedFileContent struct {
// MimeType: The MIME type of the quoted content.
MimeType string `json:"mimeType,omitempty"`
// Value: The quoted content itself. This is interpreted as plain text
// if set through the API.
Value string `json:"value,omitempty"`
// ForceSendFields is a list of field names (e.g. "MimeType") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MimeType") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CommentQuotedFileContent) MarshalJSON() ([]byte, error) {
type NoMethod CommentQuotedFileContent
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CommentList: A list of comments on a file.
type CommentList struct {
// Comments: The list of comments. If nextPageToken is populated, then
// this list may be incomplete and an additional page of results should
// be fetched.
Comments []*Comment `json:"comments,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#commentList".
Kind string `json:"kind,omitempty"`
// NextPageToken: The page token for the next page of comments. This
// will be absent if the end of the comments list has been reached. If
// the token is rejected for any reason, it should be discarded, and
// pagination should be restarted from the first page of results.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Comments") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Comments") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CommentList) MarshalJSON() ([]byte, error) {
type NoMethod CommentList
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Drive: Representation of a shared drive.
type Drive struct {
// BackgroundImageFile: An image file and cropping parameters from which
// a background image for this shared drive is set. This is a write only
// field; it can only be set on drive.drives.update requests that don't
// set themeId. When specified, all fields of the backgroundImageFile
// must be set.
BackgroundImageFile *DriveBackgroundImageFile `json:"backgroundImageFile,omitempty"`
// BackgroundImageLink: A short-lived link to this shared drive's
// background image.
BackgroundImageLink string `json:"backgroundImageLink,omitempty"`
// Capabilities: Capabilities the current user has on this shared drive.
Capabilities *DriveCapabilities `json:"capabilities,omitempty"`
// ColorRgb: The color of this shared drive as an RGB hex string. It can
// only be set on a drive.drives.update request that does not set
// themeId.
ColorRgb string `json:"colorRgb,omitempty"`
// CreatedTime: The time at which the shared drive was created (RFC 3339
// date-time).
CreatedTime string `json:"createdTime,omitempty"`
// Hidden: Whether the shared drive is hidden from default view.
Hidden bool `json:"hidden,omitempty"`
// Id: The ID of this shared drive which is also the ID of the top level
// folder of this shared drive.
Id string `json:"id,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#drive".
Kind string `json:"kind,omitempty"`
// Name: The name of this shared drive.
Name string `json:"name,omitempty"`
// Restrictions: A set of restrictions that apply to this shared drive
// or items inside this shared drive.
Restrictions *DriveRestrictions `json:"restrictions,omitempty"`
// ThemeId: The ID of the theme from which the background image and
// color will be set. The set of possible driveThemes can be retrieved
// from a drive.about.get response. When not specified on a
// drive.drives.create request, a random theme is chosen from which the
// background image and color are set. This is a write-only field; it
// can only be set on requests that don't set colorRgb or
// backgroundImageFile.
ThemeId string `json:"themeId,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "BackgroundImageFile")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "BackgroundImageFile") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Drive) MarshalJSON() ([]byte, error) {
type NoMethod Drive
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DriveBackgroundImageFile: An image file and cropping parameters from
// which a background image for this shared drive is set. This is a
// write only field; it can only be set on drive.drives.update requests
// that don't set themeId. When specified, all fields of the
// backgroundImageFile must be set.
type DriveBackgroundImageFile struct {
// Id: The ID of an image file in Google Drive to use for the background
// image.
Id string `json:"id,omitempty"`
// Width: The width of the cropped image in the closed range of 0 to 1.
// This value represents the width of the cropped image divided by the
// width of the entire image. The height is computed by applying a width
// to height aspect ratio of 80 to 9. The resulting image must be at
// least 1280 pixels wide and 144 pixels high.
Width float64 `json:"width,omitempty"`
// XCoordinate: The X coordinate of the upper left corner of the
// cropping area in the background image. This is a value in the closed
// range of 0 to 1. This value represents the horizontal distance from
// the left side of the entire image to the left side of the cropping
// area divided by the width of the entire image.
XCoordinate float64 `json:"xCoordinate,omitempty"`
// YCoordinate: The Y coordinate of the upper left corner of the
// cropping area in the background image. This is a value in the closed
// range of 0 to 1. This value represents the vertical distance from the
// top side of the entire image to the top side of the cropping area
// divided by the height of the entire image.
YCoordinate float64 `json:"yCoordinate,omitempty"`
// ForceSendFields is a list of field names (e.g. "Id") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Id") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *DriveBackgroundImageFile) MarshalJSON() ([]byte, error) {
type NoMethod DriveBackgroundImageFile
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *DriveBackgroundImageFile) UnmarshalJSON(data []byte) error {
type NoMethod DriveBackgroundImageFile
var s1 struct {
Width gensupport.JSONFloat64 `json:"width"`
XCoordinate gensupport.JSONFloat64 `json:"xCoordinate"`
YCoordinate gensupport.JSONFloat64 `json:"yCoordinate"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Width = float64(s1.Width)
s.XCoordinate = float64(s1.XCoordinate)
s.YCoordinate = float64(s1.YCoordinate)
return nil
}
// DriveCapabilities: Capabilities the current user has on this shared
// drive.
type DriveCapabilities struct {
// CanAddChildren: Whether the current user can add children to folders
// in this shared drive.
CanAddChildren bool `json:"canAddChildren,omitempty"`
// CanChangeCopyRequiresWriterPermissionRestriction: Whether the current
// user can change the copyRequiresWriterPermission restriction of this
// shared drive.
CanChangeCopyRequiresWriterPermissionRestriction bool `json:"canChangeCopyRequiresWriterPermissionRestriction,omitempty"`
// CanChangeDomainUsersOnlyRestriction: Whether the current user can
// change the domainUsersOnly restriction of this shared drive.
CanChangeDomainUsersOnlyRestriction bool `json:"canChangeDomainUsersOnlyRestriction,omitempty"`
// CanChangeDriveBackground: Whether the current user can change the
// background of this shared drive.
CanChangeDriveBackground bool `json:"canChangeDriveBackground,omitempty"`
// CanChangeDriveMembersOnlyRestriction: Whether the current user can
// change the driveMembersOnly restriction of this shared drive.
CanChangeDriveMembersOnlyRestriction bool `json:"canChangeDriveMembersOnlyRestriction,omitempty"`
// CanComment: Whether the current user can comment on files in this
// shared drive.
CanComment bool `json:"canComment,omitempty"`
// CanCopy: Whether the current user can copy files in this shared
// drive.
CanCopy bool `json:"canCopy,omitempty"`
// CanDeleteChildren: Whether the current user can delete children from
// folders in this shared drive.
CanDeleteChildren bool `json:"canDeleteChildren,omitempty"`
// CanDeleteDrive: Whether the current user can delete this shared
// drive. Attempting to delete the shared drive may still fail if there
// are untrashed items inside the shared drive.
CanDeleteDrive bool `json:"canDeleteDrive,omitempty"`
// CanDownload: Whether the current user can download files in this
// shared drive.
CanDownload bool `json:"canDownload,omitempty"`
// CanEdit: Whether the current user can edit files in this shared drive
CanEdit bool `json:"canEdit,omitempty"`
// CanListChildren: Whether the current user can list the children of
// folders in this shared drive.
CanListChildren bool `json:"canListChildren,omitempty"`
// CanManageMembers: Whether the current user can add members to this
// shared drive or remove them or change their role.
CanManageMembers bool `json:"canManageMembers,omitempty"`
// CanReadRevisions: Whether the current user can read the revisions
// resource of files in this shared drive.
CanReadRevisions bool `json:"canReadRevisions,omitempty"`
// CanRename: Whether the current user can rename files or folders in
// this shared drive.
CanRename bool `json:"canRename,omitempty"`
// CanRenameDrive: Whether the current user can rename this shared
// drive.
CanRenameDrive bool `json:"canRenameDrive,omitempty"`
// CanShare: Whether the current user can share files or folders in this
// shared drive.
CanShare bool `json:"canShare,omitempty"`
// CanTrashChildren: Whether the current user can trash children from
// folders in this shared drive.
CanTrashChildren bool `json:"canTrashChildren,omitempty"`
// ForceSendFields is a list of field names (e.g. "CanAddChildren") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CanAddChildren") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *DriveCapabilities) MarshalJSON() ([]byte, error) {
type NoMethod DriveCapabilities
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DriveRestrictions: A set of restrictions that apply to this shared
// drive or items inside this shared drive.
type DriveRestrictions struct {
// AdminManagedRestrictions: Whether administrative privileges on this
// shared drive are required to modify restrictions.
AdminManagedRestrictions bool `json:"adminManagedRestrictions,omitempty"`
// CopyRequiresWriterPermission: Whether the options to copy, print, or
// download files inside this shared drive, should be disabled for
// readers and commenters. When this restriction is set to true, it will
// override the similarly named field to true for any file inside this
// shared drive.
CopyRequiresWriterPermission bool `json:"copyRequiresWriterPermission,omitempty"`
// DomainUsersOnly: Whether access to this shared drive and items inside
// this shared drive is restricted to users of the domain to which this
// shared drive belongs. This restriction may be overridden by other
// sharing policies controlled outside of this shared drive.
DomainUsersOnly bool `json:"domainUsersOnly,omitempty"`
// DriveMembersOnly: Whether access to items inside this shared drive is
// restricted to its members.
DriveMembersOnly bool `json:"driveMembersOnly,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "AdminManagedRestrictions") to unconditionally include in API
// requests. By default, fields with empty values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AdminManagedRestrictions")
// to include in API requests with the JSON null value. By default,
// fields with empty values are omitted from API requests. However, any
// field with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *DriveRestrictions) MarshalJSON() ([]byte, error) {
type NoMethod DriveRestrictions
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DriveList: A list of shared drives.
type DriveList struct {
// Drives: The list of shared drives. If nextPageToken is populated,
// then this list may be incomplete and an additional page of results
// should be fetched.
Drives []*Drive `json:"drives,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#driveList".
Kind string `json:"kind,omitempty"`
// NextPageToken: The page token for the next page of shared drives.
// This will be absent if the end of the list has been reached. If the
// token is rejected for any reason, it should be discarded, and
// pagination should be restarted from the first page of results.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Drives") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Drives") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *DriveList) MarshalJSON() ([]byte, error) {
type NoMethod DriveList
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// File: The metadata for a file.
type File struct {
// AppProperties: A collection of arbitrary key-value pairs which are
// private to the requesting app.
// Entries with null values are cleared in update and copy requests.
AppProperties map[string]string `json:"appProperties,omitempty"`
// Capabilities: Capabilities the current user has on this file. Each
// capability corresponds to a fine-grained action that a user may take.
Capabilities *FileCapabilities `json:"capabilities,omitempty"`
// ContentHints: Additional information about the content of the file.
// These fields are never populated in responses.
ContentHints *FileContentHints `json:"contentHints,omitempty"`
// CopyRequiresWriterPermission: Whether the options to copy, print, or
// download this file, should be disabled for readers and commenters.
CopyRequiresWriterPermission bool `json:"copyRequiresWriterPermission,omitempty"`
// CreatedTime: The time at which the file was created (RFC 3339
// date-time).
CreatedTime string `json:"createdTime,omitempty"`
// Description: A short description of the file.
Description string `json:"description,omitempty"`
// DriveId: ID of the shared drive the file resides in. Only populated
// for items in shared drives.
DriveId string `json:"driveId,omitempty"`
// ExplicitlyTrashed: Whether the file has been explicitly trashed, as
// opposed to recursively trashed from a parent folder.
ExplicitlyTrashed bool `json:"explicitlyTrashed,omitempty"`
// ExportLinks: Links for exporting Google Docs to specific formats.
ExportLinks map[string]string `json:"exportLinks,omitempty"`
// FileExtension: The final component of fullFileExtension. This is only
// available for files with binary content in Google Drive.
FileExtension string `json:"fileExtension,omitempty"`
// FolderColorRgb: The color for a folder as an RGB hex string. The
// supported colors are published in the folderColorPalette field of the
// About resource.
// If an unsupported color is specified, the closest color in the
// palette will be used instead.
FolderColorRgb string `json:"folderColorRgb,omitempty"`
// FullFileExtension: The full file extension extracted from the name
// field. May contain multiple concatenated extensions, such as
// "tar.gz". This is only available for files with binary content in
// Google Drive.
// This is automatically updated when the name field changes, however it
// is not cleared if the new name does not contain a valid extension.
FullFileExtension string `json:"fullFileExtension,omitempty"`
// HasAugmentedPermissions: Whether there are permissions directly on
// this file. This field is only populated for items in shared drives.
HasAugmentedPermissions bool `json:"hasAugmentedPermissions,omitempty"`
// HasThumbnail: Whether this file has a thumbnail. This does not
// indicate whether the requesting app has access to the thumbnail. To
// check access, look for the presence of the thumbnailLink field.
HasThumbnail bool `json:"hasThumbnail,omitempty"`
// HeadRevisionId: The ID of the file's head revision. This is currently
// only available for files with binary content in Google Drive.
HeadRevisionId string `json:"headRevisionId,omitempty"`
// IconLink: A static, unauthenticated link to the file's icon.
IconLink string `json:"iconLink,omitempty"`
// Id: The ID of the file.
Id string `json:"id,omitempty"`
// ImageMediaMetadata: Additional metadata about image media, if
// available.
ImageMediaMetadata *FileImageMediaMetadata `json:"imageMediaMetadata,omitempty"`
// IsAppAuthorized: Whether the file was created or opened by the
// requesting app.
IsAppAuthorized bool `json:"isAppAuthorized,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#file".
Kind string `json:"kind,omitempty"`
// LastModifyingUser: The last user to modify the file.
LastModifyingUser *User `json:"lastModifyingUser,omitempty"`
// Md5Checksum: The MD5 checksum for the content of the file. This is
// only applicable to files with binary content in Google Drive.
Md5Checksum string `json:"md5Checksum,omitempty"`
// MimeType: The MIME type of the file.
// Google Drive will attempt to automatically detect an appropriate
// value from uploaded content if no value is provided. The value cannot
// be changed unless a new revision is uploaded.
// If a file is created with a Google Doc MIME type, the uploaded
// content will be imported if possible. The supported import formats
// are published in the About resource.
MimeType string `json:"mimeType,omitempty"`
// ModifiedByMe: Whether the file has been modified by this user.
ModifiedByMe bool `json:"modifiedByMe,omitempty"`
// ModifiedByMeTime: The last time the file was modified by the user
// (RFC 3339 date-time).
ModifiedByMeTime string `json:"modifiedByMeTime,omitempty"`
// ModifiedTime: The last time the file was modified by anyone (RFC 3339
// date-time).
// Note that setting modifiedTime will also update modifiedByMeTime for
// the user.
ModifiedTime string `json:"modifiedTime,omitempty"`
// Name: The name of the file. This is not necessarily unique within a
// folder. Note that for immutable items such as the top level folders
// of shared drives, My Drive root folder, and Application Data folder
// the name is constant.
Name string `json:"name,omitempty"`
// OriginalFilename: The original filename of the uploaded content if
// available, or else the original value of the name field. This is only
// available for files with binary content in Google Drive.
OriginalFilename string `json:"originalFilename,omitempty"`
// OwnedByMe: Whether the user owns the file. Not populated for items in
// shared drives.
OwnedByMe bool `json:"ownedByMe,omitempty"`
// Owners: The owners of the file. Currently, only certain legacy files
// may have more than one owner. Not populated for items in shared
// drives.
Owners []*User `json:"owners,omitempty"`
// Parents: The IDs of the parent folders which contain the file.
// If not specified as part of a create request, the file will be placed
// directly in the user's My Drive folder. If not specified as part of a
// copy request, the file will inherit any discoverable parents of the
// source file. Update requests must use the addParents and
// removeParents parameters to modify the parents list.
Parents []string `json:"parents,omitempty"`
// PermissionIds: List of permission IDs for users with access to this
// file.
PermissionIds []string `json:"permissionIds,omitempty"`
// Permissions: The full list of permissions for the file. This is only
// available if the requesting user can share the file. Not populated
// for items in shared drives.
Permissions []*Permission `json:"permissions,omitempty"`
// Properties: A collection of arbitrary key-value pairs which are
// visible to all apps.
// Entries with null values are cleared in update and copy requests.
Properties map[string]string `json:"properties,omitempty"`
// QuotaBytesUsed: The number of storage quota bytes used by the file.
// This includes the head revision as well as previous revisions with
// keepForever enabled.
QuotaBytesUsed int64 `json:"quotaBytesUsed,omitempty,string"`
// Shared: Whether the file has been shared. Not populated for items in
// shared drives.
Shared bool `json:"shared,omitempty"`
// SharedWithMeTime: The time at which the file was shared with the
// user, if applicable (RFC 3339 date-time).
SharedWithMeTime string `json:"sharedWithMeTime,omitempty"`
// SharingUser: The user who shared the file with the requesting user,
// if applicable.
SharingUser *User `json:"sharingUser,omitempty"`
// Size: The size of the file's content in bytes. This is only
// applicable to files with binary content in Google Drive.
Size int64 `json:"size,omitempty,string"`
// Spaces: The list of spaces which contain the file. The currently
// supported values are 'drive', 'appDataFolder' and 'photos'.
Spaces []string `json:"spaces,omitempty"`
// Starred: Whether the user has starred the file.
Starred bool `json:"starred,omitempty"`
// TeamDriveId: Deprecated - use driveId instead.
TeamDriveId string `json:"teamDriveId,omitempty"`
// ThumbnailLink: A short-lived link to the file's thumbnail, if
// available. Typically lasts on the order of hours. Only populated when
// the requesting app can access the file's content.
ThumbnailLink string `json:"thumbnailLink,omitempty"`
// ThumbnailVersion: The thumbnail version for use in thumbnail cache
// invalidation.
ThumbnailVersion int64 `json:"thumbnailVersion,omitempty,string"`
// Trashed: Whether the file has been trashed, either explicitly or from
// a trashed parent folder. Only the owner may trash a file, and other
// users cannot see files in the owner's trash.
Trashed bool `json:"trashed,omitempty"`
// TrashedTime: The time that the item was trashed (RFC 3339 date-time).
// Only populated for items in shared drives.
TrashedTime string `json:"trashedTime,omitempty"`
// TrashingUser: If the file has been explicitly trashed, the user who
// trashed it. Only populated for items in shared drives.
TrashingUser *User `json:"trashingUser,omitempty"`
// Version: A monotonically increasing version number for the file. This
// reflects every change made to the file on the server, even those not
// visible to the user.
Version int64 `json:"version,omitempty,string"`
// VideoMediaMetadata: Additional metadata about video media. This may
// not be available immediately upon upload.
VideoMediaMetadata *FileVideoMediaMetadata `json:"videoMediaMetadata,omitempty"`
// ViewedByMe: Whether the file has been viewed by this user.
ViewedByMe bool `json:"viewedByMe,omitempty"`
// ViewedByMeTime: The last time the file was viewed by the user (RFC
// 3339 date-time).
ViewedByMeTime string `json:"viewedByMeTime,omitempty"`
// ViewersCanCopyContent: Deprecated - use copyRequiresWriterPermission
// instead.
ViewersCanCopyContent bool `json:"viewersCanCopyContent,omitempty"`
// WebContentLink: A link for downloading the content of the file in a
// browser. This is only available for files with binary content in
// Google Drive.
WebContentLink string `json:"webContentLink,omitempty"`
// WebViewLink: A link for opening the file in a relevant Google editor
// or viewer in a browser.
WebViewLink string `json:"webViewLink,omitempty"`
// WritersCanShare: Whether users with only writer permission can modify
// the file's permissions. Not populated for items in shared drives.
WritersCanShare bool `json:"writersCanShare,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "AppProperties") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AppProperties") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *File) MarshalJSON() ([]byte, error) {
type NoMethod File
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// FileCapabilities: Capabilities the current user has on this file.
// Each capability corresponds to a fine-grained action that a user may
// take.
type FileCapabilities struct {
// CanAddChildren: Whether the current user can add children to this
// folder. This is always false when the item is not a folder.
CanAddChildren bool `json:"canAddChildren,omitempty"`
// CanAddMyDriveParent: Whether the current user can add a parent for
// the item without removing an existing parent in the same request. Not
// populated for shared drive files.
CanAddMyDriveParent bool `json:"canAddMyDriveParent,omitempty"`
// CanChangeCopyRequiresWriterPermission: Whether the current user can
// change the copyRequiresWriterPermission restriction of this file.
CanChangeCopyRequiresWriterPermission bool `json:"canChangeCopyRequiresWriterPermission,omitempty"`
// CanChangeViewersCanCopyContent: Deprecated
CanChangeViewersCanCopyContent bool `json:"canChangeViewersCanCopyContent,omitempty"`
// CanComment: Whether the current user can comment on this file.
CanComment bool `json:"canComment,omitempty"`
// CanCopy: Whether the current user can copy this file. For an item in
// a shared drive, whether the current user can copy non-folder
// descendants of this item, or this item itself if it is not a folder.
CanCopy bool `json:"canCopy,omitempty"`
// CanDelete: Whether the current user can delete this file.
CanDelete bool `json:"canDelete,omitempty"`
// CanDeleteChildren: Whether the current user can delete children of
// this folder. This is false when the item is not a folder. Only
// populated for items in shared drives.
CanDeleteChildren bool `json:"canDeleteChildren,omitempty"`
// CanDownload: Whether the current user can download this file.
CanDownload bool `json:"canDownload,omitempty"`
// CanEdit: Whether the current user can edit this file. Other factors
// may limit the type of changes a user can make to a file. For example,
// see canChangeCopyRequiresWriterPermission or canModifyContent.
CanEdit bool `json:"canEdit,omitempty"`
// CanListChildren: Whether the current user can list the children of
// this folder. This is always false when the item is not a folder.
CanListChildren bool `json:"canListChildren,omitempty"`
// CanModifyContent: Whether the current user can modify the content of
// this file.
CanModifyContent bool `json:"canModifyContent,omitempty"`
// CanMoveChildrenOutOfDrive: Whether the current user can move children
// of this folder outside of the shared drive. This is false when the
// item is not a folder. Only populated for items in shared drives.
CanMoveChildrenOutOfDrive bool `json:"canMoveChildrenOutOfDrive,omitempty"`
// CanMoveChildrenOutOfTeamDrive: Deprecated - use
// canMoveChildrenOutOfDrive instead.
CanMoveChildrenOutOfTeamDrive bool `json:"canMoveChildrenOutOfTeamDrive,omitempty"`
// CanMoveChildrenWithinDrive: Whether the current user can move
// children of this folder within the shared drive. This is false when
// the item is not a folder. Only populated for items in shared drives.
CanMoveChildrenWithinDrive bool `json:"canMoveChildrenWithinDrive,omitempty"`
// CanMoveChildrenWithinTeamDrive: Deprecated - use
// canMoveChildrenWithinDrive instead.
CanMoveChildrenWithinTeamDrive bool `json:"canMoveChildrenWithinTeamDrive,omitempty"`
// CanMoveItemIntoTeamDrive: Deprecated - use canMoveItemOutOfDrive
// instead.
CanMoveItemIntoTeamDrive bool `json:"canMoveItemIntoTeamDrive,omitempty"`
// CanMoveItemOutOfDrive: Whether the current user can move this item
// outside of this drive by changing its parent. Note that a request to
// change the parent of the item may still fail depending on the new
// parent that is being added.
CanMoveItemOutOfDrive bool `json:"canMoveItemOutOfDrive,omitempty"`
// CanMoveItemOutOfTeamDrive: Deprecated - use canMoveItemOutOfDrive
// instead.
CanMoveItemOutOfTeamDrive bool `json:"canMoveItemOutOfTeamDrive,omitempty"`
// CanMoveItemWithinDrive: Whether the current user can move this item
// within this shared drive. Note that a request to change the parent of
// the item may still fail depending on the new parent that is being
// added. Only populated for items in shared drives.
CanMoveItemWithinDrive bool `json:"canMoveItemWithinDrive,omitempty"`
// CanMoveItemWithinTeamDrive: Deprecated - use canMoveItemWithinDrive
// instead.
CanMoveItemWithinTeamDrive bool `json:"canMoveItemWithinTeamDrive,omitempty"`
// CanMoveTeamDriveItem: Deprecated - use canMoveItemWithinDrive or
// canMoveItemOutOfDrive instead.
CanMoveTeamDriveItem bool `json:"canMoveTeamDriveItem,omitempty"`
// CanReadDrive: Whether the current user can read the shared drive to
// which this file belongs. Only populated for items in shared drives.
CanReadDrive bool `json:"canReadDrive,omitempty"`
// CanReadRevisions: Whether the current user can read the revisions
// resource of this file. For a shared drive item, whether revisions of
// non-folder descendants of this item, or this item itself if it is not
// a folder, can be read.
CanReadRevisions bool `json:"canReadRevisions,omitempty"`
// CanReadTeamDrive: Deprecated - use canReadDrive instead.
CanReadTeamDrive bool `json:"canReadTeamDrive,omitempty"`
// CanRemoveChildren: Whether the current user can remove children from
// this folder. This is always false when the item is not a folder. For
// a folder in a shared drive, use canDeleteChildren or canTrashChildren
// instead.
CanRemoveChildren bool `json:"canRemoveChildren,omitempty"`
// CanRemoveMyDriveParent: Whether the current user can remove a parent
// from the item without adding another parent in the same request. Not
// populated for shared drive files.
CanRemoveMyDriveParent bool `json:"canRemoveMyDriveParent,omitempty"`
// CanRename: Whether the current user can rename this file.
CanRename bool `json:"canRename,omitempty"`
// CanShare: Whether the current user can modify the sharing settings
// for this file.
CanShare bool `json:"canShare,omitempty"`
// CanTrash: Whether the current user can move this file to trash.
CanTrash bool `json:"canTrash,omitempty"`
// CanTrashChildren: Whether the current user can trash children of this
// folder. This is false when the item is not a folder. Only populated
// for items in shared drives.
CanTrashChildren bool `json:"canTrashChildren,omitempty"`
// CanUntrash: Whether the current user can restore this file from
// trash.
CanUntrash bool `json:"canUntrash,omitempty"`
// ForceSendFields is a list of field names (e.g. "CanAddChildren") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CanAddChildren") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *FileCapabilities) MarshalJSON() ([]byte, error) {
type NoMethod FileCapabilities
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// FileContentHints: Additional information about the content of the
// file. These fields are never populated in responses.
type FileContentHints struct {
// IndexableText: Text to be indexed for the file to improve fullText
// queries. This is limited to 128KB in length and may contain HTML
// elements.
IndexableText string `json:"indexableText,omitempty"`
// Thumbnail: A thumbnail for the file. This will only be used if Google
// Drive cannot generate a standard thumbnail.
Thumbnail *FileContentHintsThumbnail `json:"thumbnail,omitempty"`
// ForceSendFields is a list of field names (e.g. "IndexableText") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "IndexableText") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *FileContentHints) MarshalJSON() ([]byte, error) {
type NoMethod FileContentHints
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// FileContentHintsThumbnail: A thumbnail for the file. This will only
// be used if Google Drive cannot generate a standard thumbnail.
type FileContentHintsThumbnail struct {
// Image: The thumbnail data encoded with URL-safe Base64 (RFC 4648
// section 5).
Image string `json:"image,omitempty"`
// MimeType: The MIME type of the thumbnail.
MimeType string `json:"mimeType,omitempty"`
// ForceSendFields is a list of field names (e.g. "Image") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Image") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *FileContentHintsThumbnail) MarshalJSON() ([]byte, error) {
type NoMethod FileContentHintsThumbnail
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// FileImageMediaMetadata: Additional metadata about image media, if
// available.
type FileImageMediaMetadata struct {
// Aperture: The aperture used to create the photo (f-number).
Aperture float64 `json:"aperture,omitempty"`
// CameraMake: The make of the camera used to create the photo.
CameraMake string `json:"cameraMake,omitempty"`
// CameraModel: The model of the camera used to create the photo.
CameraModel string `json:"cameraModel,omitempty"`
// ColorSpace: The color space of the photo.
ColorSpace string `json:"colorSpace,omitempty"`
// ExposureBias: The exposure bias of the photo (APEX value).
ExposureBias float64 `json:"exposureBias,omitempty"`
// ExposureMode: The exposure mode used to create the photo.
ExposureMode string `json:"exposureMode,omitempty"`
// ExposureTime: The length of the exposure, in seconds.
ExposureTime float64 `json:"exposureTime,omitempty"`
// FlashUsed: Whether a flash was used to create the photo.
FlashUsed bool `json:"flashUsed,omitempty"`
// FocalLength: The focal length used to create the photo, in
// millimeters.
FocalLength float64 `json:"focalLength,omitempty"`
// Height: The height of the image in pixels.
Height int64 `json:"height,omitempty"`
// IsoSpeed: The ISO speed used to create the photo.
IsoSpeed int64 `json:"isoSpeed,omitempty"`
// Lens: The lens used to create the photo.
Lens string `json:"lens,omitempty"`
// Location: Geographic location information stored in the image.
Location *FileImageMediaMetadataLocation `json:"location,omitempty"`
// MaxApertureValue: The smallest f-number of the lens at the focal
// length used to create the photo (APEX value).
MaxApertureValue float64 `json:"maxApertureValue,omitempty"`
// MeteringMode: The metering mode used to create the photo.
MeteringMode string `json:"meteringMode,omitempty"`
// Rotation: The rotation in clockwise degrees from the image's original
// orientation.
Rotation int64 `json:"rotation,omitempty"`
// Sensor: The type of sensor used to create the photo.
Sensor string `json:"sensor,omitempty"`
// SubjectDistance: The distance to the subject of the photo, in meters.
SubjectDistance int64 `json:"subjectDistance,omitempty"`
// Time: The date and time the photo was taken (EXIF DateTime).
Time string `json:"time,omitempty"`
// WhiteBalance: The white balance mode used to create the photo.
WhiteBalance string `json:"whiteBalance,omitempty"`
// Width: The width of the image in pixels.
Width int64 `json:"width,omitempty"`
// ForceSendFields is a list of field names (e.g. "Aperture") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Aperture") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *FileImageMediaMetadata) MarshalJSON() ([]byte, error) {
type NoMethod FileImageMediaMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *FileImageMediaMetadata) UnmarshalJSON(data []byte) error {
type NoMethod FileImageMediaMetadata
var s1 struct {
Aperture gensupport.JSONFloat64 `json:"aperture"`
ExposureBias gensupport.JSONFloat64 `json:"exposureBias"`
ExposureTime gensupport.JSONFloat64 `json:"exposureTime"`
FocalLength gensupport.JSONFloat64 `json:"focalLength"`
MaxApertureValue gensupport.JSONFloat64 `json:"maxApertureValue"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Aperture = float64(s1.Aperture)
s.ExposureBias = float64(s1.ExposureBias)
s.ExposureTime = float64(s1.ExposureTime)
s.FocalLength = float64(s1.FocalLength)
s.MaxApertureValue = float64(s1.MaxApertureValue)
return nil
}
// FileImageMediaMetadataLocation: Geographic location information
// stored in the image.
type FileImageMediaMetadataLocation struct {
// Altitude: The altitude stored in the image.
Altitude float64 `json:"altitude,omitempty"`
// Latitude: The latitude stored in the image.
Latitude float64 `json:"latitude,omitempty"`
// Longitude: The longitude stored in the image.
Longitude float64 `json:"longitude,omitempty"`
// ForceSendFields is a list of field names (e.g. "Altitude") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Altitude") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *FileImageMediaMetadataLocation) MarshalJSON() ([]byte, error) {
type NoMethod FileImageMediaMetadataLocation
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *FileImageMediaMetadataLocation) UnmarshalJSON(data []byte) error {
type NoMethod FileImageMediaMetadataLocation
var s1 struct {
Altitude gensupport.JSONFloat64 `json:"altitude"`
Latitude gensupport.JSONFloat64 `json:"latitude"`
Longitude gensupport.JSONFloat64 `json:"longitude"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Altitude = float64(s1.Altitude)
s.Latitude = float64(s1.Latitude)
s.Longitude = float64(s1.Longitude)
return nil
}
// FileVideoMediaMetadata: Additional metadata about video media. This
// may not be available immediately upon upload.
type FileVideoMediaMetadata struct {
// DurationMillis: The duration of the video in milliseconds.
DurationMillis int64 `json:"durationMillis,omitempty,string"`
// Height: The height of the video in pixels.
Height int64 `json:"height,omitempty"`
// Width: The width of the video in pixels.
Width int64 `json:"width,omitempty"`
// ForceSendFields is a list of field names (e.g. "DurationMillis") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DurationMillis") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *FileVideoMediaMetadata) MarshalJSON() ([]byte, error) {
type NoMethod FileVideoMediaMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// FileList: A list of files.
type FileList struct {
// Files: The list of files. If nextPageToken is populated, then this
// list may be incomplete and an additional page of results should be
// fetched.
Files []*File `json:"files,omitempty"`
// IncompleteSearch: Whether the search process was incomplete. If true,
// then some search results may be missing, since all documents were not
// searched. This may occur when searching multiple drives with the
// "allDrives" corpora, but all corpora could not be searched. When this
// happens, it is suggested that clients narrow their query by choosing
// a different corpus such as "user" or "drive".
IncompleteSearch bool `json:"incompleteSearch,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#fileList".
Kind string `json:"kind,omitempty"`
// NextPageToken: The page token for the next page of files. This will
// be absent if the end of the files list has been reached. If the token
// is rejected for any reason, it should be discarded, and pagination
// should be restarted from the first page of results.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Files") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Files") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *FileList) MarshalJSON() ([]byte, error) {
type NoMethod FileList
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GeneratedIds: A list of generated file IDs which can be provided in
// create requests.
type GeneratedIds struct {
// Ids: The IDs generated for the requesting user in the specified
// space.
Ids []string `json:"ids,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#generatedIds".
Kind string `json:"kind,omitempty"`
// Space: The type of file that can be created with these IDs.
Space string `json:"space,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Ids") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Ids") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GeneratedIds) MarshalJSON() ([]byte, error) {
type NoMethod GeneratedIds
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Permission: A permission for a file. A permission grants a user,
// group, domain or the world access to a file or a folder hierarchy.
type Permission struct {
// AllowFileDiscovery: Whether the permission allows the file to be
// discovered through search. This is only applicable for permissions of
// type domain or anyone.
AllowFileDiscovery bool `json:"allowFileDiscovery,omitempty"`
// Deleted: Whether the account associated with this permission has been
// deleted. This field only pertains to user and group permissions.
Deleted bool `json:"deleted,omitempty"`
// DisplayName: The "pretty" name of the value of the permission. The
// following is a list of examples for each type of permission:
// - user - User's full name, as defined for their Google account, such
// as "Joe Smith."
// - group - Name of the Google Group, such as "The Company
// Administrators."
// - domain - String domain name, such as "thecompany.com."
// - anyone - No displayName is present.
DisplayName string `json:"displayName,omitempty"`
// Domain: The domain to which this permission refers.
Domain string `json:"domain,omitempty"`
// EmailAddress: The email address of the user or group to which this
// permission refers.
EmailAddress string `json:"emailAddress,omitempty"`
// ExpirationTime: The time at which this permission will expire (RFC
// 3339 date-time). Expiration times have the following restrictions:
//
// - They can only be set on user and group permissions
// - The time must be in the future
// - The time cannot be more than a year in the future
ExpirationTime string `json:"expirationTime,omitempty"`
// Id: The ID of this permission. This is a unique identifier for the
// grantee, and is published in User resources as permissionId. IDs
// should be treated as opaque values.
Id string `json:"id,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#permission".
Kind string `json:"kind,omitempty"`
// PermissionDetails: Details of whether the permissions on this shared
// drive item are inherited or directly on this item. This is an
// output-only field which is present only for shared drive items.
PermissionDetails []*PermissionPermissionDetails `json:"permissionDetails,omitempty"`
// PhotoLink: A link to the user's profile photo, if available.
PhotoLink string `json:"photoLink,omitempty"`
// Role: The role granted by this permission. While new values may be
// supported in the future, the following are currently allowed:
// - owner
// - organizer
// - fileOrganizer
// - writer
// - commenter
// - reader
Role string `json:"role,omitempty"`
// TeamDrivePermissionDetails: Deprecated - use permissionDetails
// instead.
TeamDrivePermissionDetails []*PermissionTeamDrivePermissionDetails `json:"teamDrivePermissionDetails,omitempty"`
// Type: The type of the grantee. Valid values are:
// - user
// - group
// - domain
// - anyone When creating a permission, if type is user or group, you
// must provide an emailAddress for the user or group. When type is
// domain, you must provide a domain. There isn't extra information
// required for a anyone type.
Type string `json:"type,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "AllowFileDiscovery")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AllowFileDiscovery") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Permission) MarshalJSON() ([]byte, error) {
type NoMethod Permission
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
type PermissionPermissionDetails struct {
// Inherited: Whether this permission is inherited. This field is always
// populated. This is an output-only field.
Inherited bool `json:"inherited,omitempty"`
// InheritedFrom: The ID of the item from which this permission is
// inherited. This is an output-only field and is only populated for
// members of the shared drive.
InheritedFrom string `json:"inheritedFrom,omitempty"`
// PermissionType: The permission type for this user. While new values
// may be added in future, the following are currently possible:
// - file
// - member
PermissionType string `json:"permissionType,omitempty"`
// Role: The primary role for this user. While new values may be added
// in the future, the following are currently possible:
// - organizer
// - fileOrganizer
// - writer
// - commenter
// - reader
Role string `json:"role,omitempty"`
// ForceSendFields is a list of field names (e.g. "Inherited") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Inherited") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *PermissionPermissionDetails) MarshalJSON() ([]byte, error) {
type NoMethod PermissionPermissionDetails
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
type PermissionTeamDrivePermissionDetails struct {
// Inherited: Deprecated - use permissionDetails/inherited instead.
Inherited bool `json:"inherited,omitempty"`
// InheritedFrom: Deprecated - use permissionDetails/inheritedFrom
// instead.
InheritedFrom string `json:"inheritedFrom,omitempty"`
// Role: Deprecated - use permissionDetails/role instead.
Role string `json:"role,omitempty"`
// TeamDrivePermissionType: Deprecated - use
// permissionDetails/permissionType instead.
TeamDrivePermissionType string `json:"teamDrivePermissionType,omitempty"`
// ForceSendFields is a list of field names (e.g. "Inherited") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Inherited") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *PermissionTeamDrivePermissionDetails) MarshalJSON() ([]byte, error) {
type NoMethod PermissionTeamDrivePermissionDetails
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// PermissionList: A list of permissions for a file.
type PermissionList struct {
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#permissionList".
Kind string `json:"kind,omitempty"`
// NextPageToken: The page token for the next page of permissions. This
// field will be absent if the end of the permissions list has been
// reached. If the token is rejected for any reason, it should be
// discarded, and pagination should be restarted from the first page of
// results.
NextPageToken string `json:"nextPageToken,omitempty"`
// Permissions: The list of permissions. If nextPageToken is populated,
// then this list may be incomplete and an additional page of results
// should be fetched.
Permissions []*Permission `json:"permissions,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Kind") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Kind") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *PermissionList) MarshalJSON() ([]byte, error) {
type NoMethod PermissionList
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Reply: A reply to a comment on a file.
type Reply struct {
// Action: The action the reply performed to the parent comment. Valid
// values are:
// - resolve
// - reopen
Action string `json:"action,omitempty"`
// Author: The author of the reply. The author's email address and
// permission ID will not be populated.
Author *User `json:"author,omitempty"`
// Content: The plain text content of the reply. This field is used for
// setting the content, while htmlContent should be displayed. This is
// required on creates if no action is specified.
Content string `json:"content,omitempty"`
// CreatedTime: The time at which the reply was created (RFC 3339
// date-time).
CreatedTime string `json:"createdTime,omitempty"`
// Deleted: Whether the reply has been deleted. A deleted reply has no
// content.
Deleted bool `json:"deleted,omitempty"`
// HtmlContent: The content of the reply with HTML formatting.
HtmlContent string `json:"htmlContent,omitempty"`
// Id: The ID of the reply.
Id string `json:"id,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#reply".
Kind string `json:"kind,omitempty"`
// ModifiedTime: The last time the reply was modified (RFC 3339
// date-time).
ModifiedTime string `json:"modifiedTime,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Action") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Action") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Reply) MarshalJSON() ([]byte, error) {
type NoMethod Reply
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ReplyList: A list of replies to a comment on a file.
type ReplyList struct {
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#replyList".
Kind string `json:"kind,omitempty"`
// NextPageToken: The page token for the next page of replies. This will
// be absent if the end of the replies list has been reached. If the
// token is rejected for any reason, it should be discarded, and
// pagination should be restarted from the first page of results.
NextPageToken string `json:"nextPageToken,omitempty"`
// Replies: The list of replies. If nextPageToken is populated, then
// this list may be incomplete and an additional page of results should
// be fetched.
Replies []*Reply `json:"replies,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Kind") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Kind") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ReplyList) MarshalJSON() ([]byte, error) {
type NoMethod ReplyList
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Revision: The metadata for a revision to a file.
type Revision struct {
// ExportLinks: Links for exporting Google Docs to specific formats.
ExportLinks map[string]string `json:"exportLinks,omitempty"`
// Id: The ID of the revision.
Id string `json:"id,omitempty"`
// KeepForever: Whether to keep this revision forever, even if it is no
// longer the head revision. If not set, the revision will be
// automatically purged 30 days after newer content is uploaded. This
// can be set on a maximum of 200 revisions for a file.
// This field is only applicable to files with binary content in Drive.
KeepForever bool `json:"keepForever,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#revision".
Kind string `json:"kind,omitempty"`
// LastModifyingUser: The last user to modify this revision.
LastModifyingUser *User `json:"lastModifyingUser,omitempty"`
// Md5Checksum: The MD5 checksum of the revision's content. This is only
// applicable to files with binary content in Drive.
Md5Checksum string `json:"md5Checksum,omitempty"`
// MimeType: The MIME type of the revision.
MimeType string `json:"mimeType,omitempty"`
// ModifiedTime: The last time the revision was modified (RFC 3339
// date-time).
ModifiedTime string `json:"modifiedTime,omitempty"`
// OriginalFilename: The original filename used to create this revision.
// This is only applicable to files with binary content in Drive.
OriginalFilename string `json:"originalFilename,omitempty"`
// PublishAuto: Whether subsequent revisions will be automatically
// republished. This is only applicable to Google Docs.
PublishAuto bool `json:"publishAuto,omitempty"`
// Published: Whether this revision is published. This is only
// applicable to Google Docs.
Published bool `json:"published,omitempty"`
// PublishedOutsideDomain: Whether this revision is published outside
// the domain. This is only applicable to Google Docs.
PublishedOutsideDomain bool `json:"publishedOutsideDomain,omitempty"`
// Size: The size of the revision's content in bytes. This is only
// applicable to files with binary content in Drive.
Size int64 `json:"size,omitempty,string"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "ExportLinks") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ExportLinks") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Revision) MarshalJSON() ([]byte, error) {
type NoMethod Revision
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// RevisionList: A list of revisions of a file.
type RevisionList struct {
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#revisionList".
Kind string `json:"kind,omitempty"`
// NextPageToken: The page token for the next page of revisions. This
// will be absent if the end of the revisions list has been reached. If
// the token is rejected for any reason, it should be discarded, and
// pagination should be restarted from the first page of results.
NextPageToken string `json:"nextPageToken,omitempty"`
// Revisions: The list of revisions. If nextPageToken is populated, then
// this list may be incomplete and an additional page of results should
// be fetched.
Revisions []*Revision `json:"revisions,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Kind") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Kind") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *RevisionList) MarshalJSON() ([]byte, error) {
type NoMethod RevisionList
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
type StartPageToken struct {
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#startPageToken".
Kind string `json:"kind,omitempty"`
// StartPageToken: The starting page token for listing changes.
StartPageToken string `json:"startPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Kind") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Kind") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *StartPageToken) MarshalJSON() ([]byte, error) {
type NoMethod StartPageToken
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TeamDrive: Deprecated: use the drive collection instead.
type TeamDrive struct {
// BackgroundImageFile: An image file and cropping parameters from which
// a background image for this Team Drive is set. This is a write only
// field; it can only be set on drive.teamdrives.update requests that
// don't set themeId. When specified, all fields of the
// backgroundImageFile must be set.
BackgroundImageFile *TeamDriveBackgroundImageFile `json:"backgroundImageFile,omitempty"`
// BackgroundImageLink: A short-lived link to this Team Drive's
// background image.
BackgroundImageLink string `json:"backgroundImageLink,omitempty"`
// Capabilities: Capabilities the current user has on this Team Drive.
Capabilities *TeamDriveCapabilities `json:"capabilities,omitempty"`
// ColorRgb: The color of this Team Drive as an RGB hex string. It can
// only be set on a drive.teamdrives.update request that does not set
// themeId.
ColorRgb string `json:"colorRgb,omitempty"`
// CreatedTime: The time at which the Team Drive was created (RFC 3339
// date-time).
CreatedTime string `json:"createdTime,omitempty"`
// Id: The ID of this Team Drive which is also the ID of the top level
// folder of this Team Drive.
Id string `json:"id,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#teamDrive".
Kind string `json:"kind,omitempty"`
// Name: The name of this Team Drive.
Name string `json:"name,omitempty"`
// Restrictions: A set of restrictions that apply to this Team Drive or
// items inside this Team Drive.
Restrictions *TeamDriveRestrictions `json:"restrictions,omitempty"`
// ThemeId: The ID of the theme from which the background image and
// color will be set. The set of possible teamDriveThemes can be
// retrieved from a drive.about.get response. When not specified on a
// drive.teamdrives.create request, a random theme is chosen from which
// the background image and color are set. This is a write-only field;
// it can only be set on requests that don't set colorRgb or
// backgroundImageFile.
ThemeId string `json:"themeId,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "BackgroundImageFile")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "BackgroundImageFile") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *TeamDrive) MarshalJSON() ([]byte, error) {
type NoMethod TeamDrive
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TeamDriveBackgroundImageFile: An image file and cropping parameters
// from which a background image for this Team Drive is set. This is a
// write only field; it can only be set on drive.teamdrives.update
// requests that don't set themeId. When specified, all fields of the
// backgroundImageFile must be set.
type TeamDriveBackgroundImageFile struct {
// Id: The ID of an image file in Drive to use for the background image.
Id string `json:"id,omitempty"`
// Width: The width of the cropped image in the closed range of 0 to 1.
// This value represents the width of the cropped image divided by the
// width of the entire image. The height is computed by applying a width
// to height aspect ratio of 80 to 9. The resulting image must be at
// least 1280 pixels wide and 144 pixels high.
Width float64 `json:"width,omitempty"`
// XCoordinate: The X coordinate of the upper left corner of the
// cropping area in the background image. This is a value in the closed
// range of 0 to 1. This value represents the horizontal distance from
// the left side of the entire image to the left side of the cropping
// area divided by the width of the entire image.
XCoordinate float64 `json:"xCoordinate,omitempty"`
// YCoordinate: The Y coordinate of the upper left corner of the
// cropping area in the background image. This is a value in the closed
// range of 0 to 1. This value represents the vertical distance from the
// top side of the entire image to the top side of the cropping area
// divided by the height of the entire image.
YCoordinate float64 `json:"yCoordinate,omitempty"`
// ForceSendFields is a list of field names (e.g. "Id") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Id") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TeamDriveBackgroundImageFile) MarshalJSON() ([]byte, error) {
type NoMethod TeamDriveBackgroundImageFile
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *TeamDriveBackgroundImageFile) UnmarshalJSON(data []byte) error {
type NoMethod TeamDriveBackgroundImageFile
var s1 struct {
Width gensupport.JSONFloat64 `json:"width"`
XCoordinate gensupport.JSONFloat64 `json:"xCoordinate"`
YCoordinate gensupport.JSONFloat64 `json:"yCoordinate"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Width = float64(s1.Width)
s.XCoordinate = float64(s1.XCoordinate)
s.YCoordinate = float64(s1.YCoordinate)
return nil
}
// TeamDriveCapabilities: Capabilities the current user has on this Team
// Drive.
type TeamDriveCapabilities struct {
// CanAddChildren: Whether the current user can add children to folders
// in this Team Drive.
CanAddChildren bool `json:"canAddChildren,omitempty"`
// CanChangeCopyRequiresWriterPermissionRestriction: Whether the current
// user can change the copyRequiresWriterPermission restriction of this
// Team Drive.
CanChangeCopyRequiresWriterPermissionRestriction bool `json:"canChangeCopyRequiresWriterPermissionRestriction,omitempty"`
// CanChangeDomainUsersOnlyRestriction: Whether the current user can
// change the domainUsersOnly restriction of this Team Drive.
CanChangeDomainUsersOnlyRestriction bool `json:"canChangeDomainUsersOnlyRestriction,omitempty"`
// CanChangeTeamDriveBackground: Whether the current user can change the
// background of this Team Drive.
CanChangeTeamDriveBackground bool `json:"canChangeTeamDriveBackground,omitempty"`
// CanChangeTeamMembersOnlyRestriction: Whether the current user can
// change the teamMembersOnly restriction of this Team Drive.
CanChangeTeamMembersOnlyRestriction bool `json:"canChangeTeamMembersOnlyRestriction,omitempty"`
// CanComment: Whether the current user can comment on files in this
// Team Drive.
CanComment bool `json:"canComment,omitempty"`
// CanCopy: Whether the current user can copy files in this Team Drive.
CanCopy bool `json:"canCopy,omitempty"`
// CanDeleteChildren: Whether the current user can delete children from
// folders in this Team Drive.
CanDeleteChildren bool `json:"canDeleteChildren,omitempty"`
// CanDeleteTeamDrive: Whether the current user can delete this Team
// Drive. Attempting to delete the Team Drive may still fail if there
// are untrashed items inside the Team Drive.
CanDeleteTeamDrive bool `json:"canDeleteTeamDrive,omitempty"`
// CanDownload: Whether the current user can download files in this Team
// Drive.
CanDownload bool `json:"canDownload,omitempty"`
// CanEdit: Whether the current user can edit files in this Team Drive
CanEdit bool `json:"canEdit,omitempty"`
// CanListChildren: Whether the current user can list the children of
// folders in this Team Drive.
CanListChildren bool `json:"canListChildren,omitempty"`
// CanManageMembers: Whether the current user can add members to this
// Team Drive or remove them or change their role.
CanManageMembers bool `json:"canManageMembers,omitempty"`
// CanReadRevisions: Whether the current user can read the revisions
// resource of files in this Team Drive.
CanReadRevisions bool `json:"canReadRevisions,omitempty"`
// CanRemoveChildren: Deprecated - use canDeleteChildren or
// canTrashChildren instead.
CanRemoveChildren bool `json:"canRemoveChildren,omitempty"`
// CanRename: Whether the current user can rename files or folders in
// this Team Drive.
CanRename bool `json:"canRename,omitempty"`
// CanRenameTeamDrive: Whether the current user can rename this Team
// Drive.
CanRenameTeamDrive bool `json:"canRenameTeamDrive,omitempty"`
// CanShare: Whether the current user can share files or folders in this
// Team Drive.
CanShare bool `json:"canShare,omitempty"`
// CanTrashChildren: Whether the current user can trash children from
// folders in this Team Drive.
CanTrashChildren bool `json:"canTrashChildren,omitempty"`
// ForceSendFields is a list of field names (e.g. "CanAddChildren") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CanAddChildren") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *TeamDriveCapabilities) MarshalJSON() ([]byte, error) {
type NoMethod TeamDriveCapabilities
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TeamDriveRestrictions: A set of restrictions that apply to this Team
// Drive or items inside this Team Drive.
type TeamDriveRestrictions struct {
// AdminManagedRestrictions: Whether administrative privileges on this
// Team Drive are required to modify restrictions.
AdminManagedRestrictions bool `json:"adminManagedRestrictions,omitempty"`
// CopyRequiresWriterPermission: Whether the options to copy, print, or
// download files inside this Team Drive, should be disabled for readers
// and commenters. When this restriction is set to true, it will
// override the similarly named field to true for any file inside this
// Team Drive.
CopyRequiresWriterPermission bool `json:"copyRequiresWriterPermission,omitempty"`
// DomainUsersOnly: Whether access to this Team Drive and items inside
// this Team Drive is restricted to users of the domain to which this
// Team Drive belongs. This restriction may be overridden by other
// sharing policies controlled outside of this Team Drive.
DomainUsersOnly bool `json:"domainUsersOnly,omitempty"`
// TeamMembersOnly: Whether access to items inside this Team Drive is
// restricted to members of this Team Drive.
TeamMembersOnly bool `json:"teamMembersOnly,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "AdminManagedRestrictions") to unconditionally include in API
// requests. By default, fields with empty values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AdminManagedRestrictions")
// to include in API requests with the JSON null value. By default,
// fields with empty values are omitted from API requests. However, any
// field with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *TeamDriveRestrictions) MarshalJSON() ([]byte, error) {
type NoMethod TeamDriveRestrictions
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TeamDriveList: A list of Team Drives.
type TeamDriveList struct {
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#teamDriveList".
Kind string `json:"kind,omitempty"`
// NextPageToken: The page token for the next page of Team Drives. This
// will be absent if the end of the Team Drives list has been reached.
// If the token is rejected for any reason, it should be discarded, and
// pagination should be restarted from the first page of results.
NextPageToken string `json:"nextPageToken,omitempty"`
// TeamDrives: The list of Team Drives. If nextPageToken is populated,
// then this list may be incomplete and an additional page of results
// should be fetched.
TeamDrives []*TeamDrive `json:"teamDrives,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Kind") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Kind") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TeamDriveList) MarshalJSON() ([]byte, error) {
type NoMethod TeamDriveList
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// User: Information about a Drive user.
type User struct {
// DisplayName: A plain text displayable name for this user.
DisplayName string `json:"displayName,omitempty"`
// EmailAddress: The email address of the user. This may not be present
// in certain contexts if the user has not made their email address
// visible to the requester.
EmailAddress string `json:"emailAddress,omitempty"`
// Kind: Identifies what kind of resource this is. Value: the fixed
// string "drive#user".
Kind string `json:"kind,omitempty"`
// Me: Whether this user is the requesting user.
Me bool `json:"me,omitempty"`
// PermissionId: The user's ID as visible in Permission resources.
PermissionId string `json:"permissionId,omitempty"`
// PhotoLink: A link to the user's profile photo, if available.
PhotoLink string `json:"photoLink,omitempty"`
// ForceSendFields is a list of field names (e.g. "DisplayName") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DisplayName") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *User) MarshalJSON() ([]byte, error) {
type NoMethod User
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// method id "drive.about.get":
type AboutGetCall struct {
s *Service
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets information about the user, the user's Drive, and system
// capabilities.
func (r *AboutService) Get() *AboutGetCall {
c := &AboutGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *AboutGetCall) Fields(s ...googleapi.Field) *AboutGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *AboutGetCall) IfNoneMatch(entityTag string) *AboutGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *AboutGetCall) Context(ctx context.Context) *AboutGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *AboutGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *AboutGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "about")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.about.get" call.
// Exactly one of *About or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *About.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *AboutGetCall) Do(opts ...googleapi.CallOption) (*About, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &About{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets information about the user, the user's Drive, and system capabilities.",
// "httpMethod": "GET",
// "id": "drive.about.get",
// "path": "about",
// "response": {
// "$ref": "About"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.metadata",
// "https://www.googleapis.com/auth/drive.metadata.readonly",
// "https://www.googleapis.com/auth/drive.photos.readonly",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// method id "drive.changes.getStartPageToken":
type ChangesGetStartPageTokenCall struct {
s *Service
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// GetStartPageToken: Gets the starting pageToken for listing future
// changes.
func (r *ChangesService) GetStartPageToken() *ChangesGetStartPageTokenCall {
c := &ChangesGetStartPageTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)}
return c
}
// DriveId sets the optional parameter "driveId": The ID of the shared
// drive for which the starting pageToken for listing future changes
// from that shared drive will be returned.
func (c *ChangesGetStartPageTokenCall) DriveId(driveId string) *ChangesGetStartPageTokenCall {
c.urlParams_.Set("driveId", driveId)
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *ChangesGetStartPageTokenCall) SupportsAllDrives(supportsAllDrives bool) *ChangesGetStartPageTokenCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *ChangesGetStartPageTokenCall) SupportsTeamDrives(supportsTeamDrives bool) *ChangesGetStartPageTokenCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// TeamDriveId sets the optional parameter "teamDriveId": Deprecated use
// driveId instead.
func (c *ChangesGetStartPageTokenCall) TeamDriveId(teamDriveId string) *ChangesGetStartPageTokenCall {
c.urlParams_.Set("teamDriveId", teamDriveId)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ChangesGetStartPageTokenCall) Fields(s ...googleapi.Field) *ChangesGetStartPageTokenCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ChangesGetStartPageTokenCall) IfNoneMatch(entityTag string) *ChangesGetStartPageTokenCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ChangesGetStartPageTokenCall) Context(ctx context.Context) *ChangesGetStartPageTokenCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ChangesGetStartPageTokenCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ChangesGetStartPageTokenCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "changes/startPageToken")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.changes.getStartPageToken" call.
// Exactly one of *StartPageToken or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *StartPageToken.ServerResponse.Header or (if a response was returned
// at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ChangesGetStartPageTokenCall) Do(opts ...googleapi.CallOption) (*StartPageToken, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &StartPageToken{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets the starting pageToken for listing future changes.",
// "httpMethod": "GET",
// "id": "drive.changes.getStartPageToken",
// "parameters": {
// "driveId": {
// "description": "The ID of the shared drive for which the starting pageToken for listing future changes from that shared drive will be returned.",
// "location": "query",
// "type": "string"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "teamDriveId": {
// "description": "Deprecated use driveId instead.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "changes/startPageToken",
// "response": {
// "$ref": "StartPageToken"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.metadata",
// "https://www.googleapis.com/auth/drive.metadata.readonly",
// "https://www.googleapis.com/auth/drive.photos.readonly",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// method id "drive.changes.list":
type ChangesListCall struct {
s *Service
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the changes for a user or shared drive.
func (r *ChangesService) List(pageToken string) *ChangesListCall {
c := &ChangesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.urlParams_.Set("pageToken", pageToken)
return c
}
// DriveId sets the optional parameter "driveId": The shared drive from
// which changes will be returned. If specified the change IDs will be
// reflective of the shared drive; use the combined drive ID and change
// ID as an identifier.
func (c *ChangesListCall) DriveId(driveId string) *ChangesListCall {
c.urlParams_.Set("driveId", driveId)
return c
}
// IncludeCorpusRemovals sets the optional parameter
// "includeCorpusRemovals": Whether changes should include the file
// resource if the file is still accessible by the user at the time of
// the request, even when a file was removed from the list of changes
// and there will be no further change entries for this file.
func (c *ChangesListCall) IncludeCorpusRemovals(includeCorpusRemovals bool) *ChangesListCall {
c.urlParams_.Set("includeCorpusRemovals", fmt.Sprint(includeCorpusRemovals))
return c
}
// IncludeItemsFromAllDrives sets the optional parameter
// "includeItemsFromAllDrives": Deprecated - Whether both My Drive and
// shared drive items should be included in results. This parameter will
// only be effective until June 1, 2020. Afterwards shared drive items
// will be included in the results.
func (c *ChangesListCall) IncludeItemsFromAllDrives(includeItemsFromAllDrives bool) *ChangesListCall {
c.urlParams_.Set("includeItemsFromAllDrives", fmt.Sprint(includeItemsFromAllDrives))
return c
}
// IncludeRemoved sets the optional parameter "includeRemoved": Whether
// to include changes indicating that items have been removed from the
// list of changes, for example by deletion or loss of access.
func (c *ChangesListCall) IncludeRemoved(includeRemoved bool) *ChangesListCall {
c.urlParams_.Set("includeRemoved", fmt.Sprint(includeRemoved))
return c
}
// IncludeTeamDriveItems sets the optional parameter
// "includeTeamDriveItems": Deprecated use includeItemsFromAllDrives
// instead.
func (c *ChangesListCall) IncludeTeamDriveItems(includeTeamDriveItems bool) *ChangesListCall {
c.urlParams_.Set("includeTeamDriveItems", fmt.Sprint(includeTeamDriveItems))
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of changes to return per page.
func (c *ChangesListCall) PageSize(pageSize int64) *ChangesListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// RestrictToMyDrive sets the optional parameter "restrictToMyDrive":
// Whether to restrict the results to changes inside the My Drive
// hierarchy. This omits changes to files such as those in the
// Application Data folder or shared files which have not been added to
// My Drive.
func (c *ChangesListCall) RestrictToMyDrive(restrictToMyDrive bool) *ChangesListCall {
c.urlParams_.Set("restrictToMyDrive", fmt.Sprint(restrictToMyDrive))
return c
}
// Spaces sets the optional parameter "spaces": A comma-separated list
// of spaces to query within the user corpus. Supported values are
// 'drive', 'appDataFolder' and 'photos'.
func (c *ChangesListCall) Spaces(spaces string) *ChangesListCall {
c.urlParams_.Set("spaces", spaces)
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *ChangesListCall) SupportsAllDrives(supportsAllDrives bool) *ChangesListCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *ChangesListCall) SupportsTeamDrives(supportsTeamDrives bool) *ChangesListCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// TeamDriveId sets the optional parameter "teamDriveId": Deprecated use
// driveId instead.
func (c *ChangesListCall) TeamDriveId(teamDriveId string) *ChangesListCall {
c.urlParams_.Set("teamDriveId", teamDriveId)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ChangesListCall) Fields(s ...googleapi.Field) *ChangesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ChangesListCall) IfNoneMatch(entityTag string) *ChangesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ChangesListCall) Context(ctx context.Context) *ChangesListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ChangesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "changes")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.changes.list" call.
// Exactly one of *ChangeList or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *ChangeList.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangeList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ChangeList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the changes for a user or shared drive.",
// "httpMethod": "GET",
// "id": "drive.changes.list",
// "parameterOrder": [
// "pageToken"
// ],
// "parameters": {
// "driveId": {
// "description": "The shared drive from which changes will be returned. If specified the change IDs will be reflective of the shared drive; use the combined drive ID and change ID as an identifier.",
// "location": "query",
// "type": "string"
// },
// "includeCorpusRemovals": {
// "default": "false",
// "description": "Whether changes should include the file resource if the file is still accessible by the user at the time of the request, even when a file was removed from the list of changes and there will be no further change entries for this file.",
// "location": "query",
// "type": "boolean"
// },
// "includeItemsFromAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether both My Drive and shared drive items should be included in results. This parameter will only be effective until June 1, 2020. Afterwards shared drive items will be included in the results.",
// "location": "query",
// "type": "boolean"
// },
// "includeRemoved": {
// "default": "true",
// "description": "Whether to include changes indicating that items have been removed from the list of changes, for example by deletion or loss of access.",
// "location": "query",
// "type": "boolean"
// },
// "includeTeamDriveItems": {
// "default": "false",
// "description": "Deprecated use includeItemsFromAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "pageSize": {
// "default": "100",
// "description": "The maximum number of changes to return per page.",
// "format": "int32",
// "location": "query",
// "maximum": "1000",
// "minimum": "1",
// "type": "integer"
// },
// "pageToken": {
// "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.",
// "location": "query",
// "required": true,
// "type": "string"
// },
// "restrictToMyDrive": {
// "default": "false",
// "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.",
// "location": "query",
// "type": "boolean"
// },
// "spaces": {
// "default": "drive",
// "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.",
// "location": "query",
// "type": "string"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "teamDriveId": {
// "description": "Deprecated use driveId instead.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "changes",
// "response": {
// "$ref": "ChangeList"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.metadata",
// "https://www.googleapis.com/auth/drive.metadata.readonly",
// "https://www.googleapis.com/auth/drive.photos.readonly",
// "https://www.googleapis.com/auth/drive.readonly"
// ],
// "supportsSubscription": true
// }
}
// method id "drive.changes.watch":
type ChangesWatchCall struct {
s *Service
channel *Channel
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Watch: Subscribes to changes for a user.
func (r *ChangesService) Watch(pageToken string, channel *Channel) *ChangesWatchCall {
c := &ChangesWatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.urlParams_.Set("pageToken", pageToken)
c.channel = channel
return c
}
// DriveId sets the optional parameter "driveId": The shared drive from
// which changes will be returned. If specified the change IDs will be
// reflective of the shared drive; use the combined drive ID and change
// ID as an identifier.
func (c *ChangesWatchCall) DriveId(driveId string) *ChangesWatchCall {
c.urlParams_.Set("driveId", driveId)
return c
}
// IncludeCorpusRemovals sets the optional parameter
// "includeCorpusRemovals": Whether changes should include the file
// resource if the file is still accessible by the user at the time of
// the request, even when a file was removed from the list of changes
// and there will be no further change entries for this file.
func (c *ChangesWatchCall) IncludeCorpusRemovals(includeCorpusRemovals bool) *ChangesWatchCall {
c.urlParams_.Set("includeCorpusRemovals", fmt.Sprint(includeCorpusRemovals))
return c
}
// IncludeItemsFromAllDrives sets the optional parameter
// "includeItemsFromAllDrives": Deprecated - Whether both My Drive and
// shared drive items should be included in results. This parameter will
// only be effective until June 1, 2020. Afterwards shared drive items
// will be included in the results.
func (c *ChangesWatchCall) IncludeItemsFromAllDrives(includeItemsFromAllDrives bool) *ChangesWatchCall {
c.urlParams_.Set("includeItemsFromAllDrives", fmt.Sprint(includeItemsFromAllDrives))
return c
}
// IncludeRemoved sets the optional parameter "includeRemoved": Whether
// to include changes indicating that items have been removed from the
// list of changes, for example by deletion or loss of access.
func (c *ChangesWatchCall) IncludeRemoved(includeRemoved bool) *ChangesWatchCall {
c.urlParams_.Set("includeRemoved", fmt.Sprint(includeRemoved))
return c
}
// IncludeTeamDriveItems sets the optional parameter
// "includeTeamDriveItems": Deprecated use includeItemsFromAllDrives
// instead.
func (c *ChangesWatchCall) IncludeTeamDriveItems(includeTeamDriveItems bool) *ChangesWatchCall {
c.urlParams_.Set("includeTeamDriveItems", fmt.Sprint(includeTeamDriveItems))
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of changes to return per page.
func (c *ChangesWatchCall) PageSize(pageSize int64) *ChangesWatchCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// RestrictToMyDrive sets the optional parameter "restrictToMyDrive":
// Whether to restrict the results to changes inside the My Drive
// hierarchy. This omits changes to files such as those in the
// Application Data folder or shared files which have not been added to
// My Drive.
func (c *ChangesWatchCall) RestrictToMyDrive(restrictToMyDrive bool) *ChangesWatchCall {
c.urlParams_.Set("restrictToMyDrive", fmt.Sprint(restrictToMyDrive))
return c
}
// Spaces sets the optional parameter "spaces": A comma-separated list
// of spaces to query within the user corpus. Supported values are
// 'drive', 'appDataFolder' and 'photos'.
func (c *ChangesWatchCall) Spaces(spaces string) *ChangesWatchCall {
c.urlParams_.Set("spaces", spaces)
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *ChangesWatchCall) SupportsAllDrives(supportsAllDrives bool) *ChangesWatchCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *ChangesWatchCall) SupportsTeamDrives(supportsTeamDrives bool) *ChangesWatchCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// TeamDriveId sets the optional parameter "teamDriveId": Deprecated use
// driveId instead.
func (c *ChangesWatchCall) TeamDriveId(teamDriveId string) *ChangesWatchCall {
c.urlParams_.Set("teamDriveId", teamDriveId)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ChangesWatchCall) Fields(s ...googleapi.Field) *ChangesWatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ChangesWatchCall) Context(ctx context.Context) *ChangesWatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ChangesWatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ChangesWatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "changes/watch")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.changes.watch" call.
// Exactly one of *Channel or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Channel.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ChangesWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Channel{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Subscribes to changes for a user.",
// "httpMethod": "POST",
// "id": "drive.changes.watch",
// "parameterOrder": [
// "pageToken"
// ],
// "parameters": {
// "driveId": {
// "description": "The shared drive from which changes will be returned. If specified the change IDs will be reflective of the shared drive; use the combined drive ID and change ID as an identifier.",
// "location": "query",
// "type": "string"
// },
// "includeCorpusRemovals": {
// "default": "false",
// "description": "Whether changes should include the file resource if the file is still accessible by the user at the time of the request, even when a file was removed from the list of changes and there will be no further change entries for this file.",
// "location": "query",
// "type": "boolean"
// },
// "includeItemsFromAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether both My Drive and shared drive items should be included in results. This parameter will only be effective until June 1, 2020. Afterwards shared drive items will be included in the results.",
// "location": "query",
// "type": "boolean"
// },
// "includeRemoved": {
// "default": "true",
// "description": "Whether to include changes indicating that items have been removed from the list of changes, for example by deletion or loss of access.",
// "location": "query",
// "type": "boolean"
// },
// "includeTeamDriveItems": {
// "default": "false",
// "description": "Deprecated use includeItemsFromAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "pageSize": {
// "default": "100",
// "description": "The maximum number of changes to return per page.",
// "format": "int32",
// "location": "query",
// "maximum": "1000",
// "minimum": "1",
// "type": "integer"
// },
// "pageToken": {
// "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to the response from the getStartPageToken method.",
// "location": "query",
// "required": true,
// "type": "string"
// },
// "restrictToMyDrive": {
// "default": "false",
// "description": "Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or shared files which have not been added to My Drive.",
// "location": "query",
// "type": "boolean"
// },
// "spaces": {
// "default": "drive",
// "description": "A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.",
// "location": "query",
// "type": "string"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "teamDriveId": {
// "description": "Deprecated use driveId instead.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "changes/watch",
// "request": {
// "$ref": "Channel",
// "parameterName": "resource"
// },
// "response": {
// "$ref": "Channel"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.metadata",
// "https://www.googleapis.com/auth/drive.metadata.readonly",
// "https://www.googleapis.com/auth/drive.photos.readonly",
// "https://www.googleapis.com/auth/drive.readonly"
// ],
// "supportsSubscription": true
// }
}
// method id "drive.channels.stop":
type ChannelsStopCall struct {
s *Service
channel *Channel
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Stop: Stop watching resources through this channel
func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall {
c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.channel = channel
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ChannelsStopCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.channels.stop" call.
func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Stop watching resources through this channel",
// "httpMethod": "POST",
// "id": "drive.channels.stop",
// "path": "channels/stop",
// "request": {
// "$ref": "Channel",
// "parameterName": "resource"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.metadata",
// "https://www.googleapis.com/auth/drive.metadata.readonly",
// "https://www.googleapis.com/auth/drive.photos.readonly",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// method id "drive.comments.create":
type CommentsCreateCall struct {
s *Service
fileId string
comment *Comment
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a new comment on a file.
func (r *CommentsService) Create(fileId string, comment *Comment) *CommentsCreateCall {
c := &CommentsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.comment = comment
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *CommentsCreateCall) Fields(s ...googleapi.Field) *CommentsCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *CommentsCreateCall) Context(ctx context.Context) *CommentsCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *CommentsCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *CommentsCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.comments.create" call.
// Exactly one of *Comment or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Comment.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *CommentsCreateCall) Do(opts ...googleapi.CallOption) (*Comment, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Comment{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new comment on a file.",
// "httpMethod": "POST",
// "id": "drive.comments.create",
// "parameterOrder": [
// "fileId"
// ],
// "parameters": {
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "files/{fileId}/comments",
// "request": {
// "$ref": "Comment"
// },
// "response": {
// "$ref": "Comment"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file"
// ]
// }
}
// method id "drive.comments.delete":
type CommentsDeleteCall struct {
s *Service
fileId string
commentId string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a comment.
func (r *CommentsService) Delete(fileId string, commentId string) *CommentsDeleteCall {
c := &CommentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.commentId = commentId
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *CommentsDeleteCall) Fields(s ...googleapi.Field) *CommentsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *CommentsDeleteCall) Context(ctx context.Context) *CommentsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *CommentsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *CommentsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"commentId": c.commentId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.comments.delete" call.
func (c *CommentsDeleteCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Deletes a comment.",
// "httpMethod": "DELETE",
// "id": "drive.comments.delete",
// "parameterOrder": [
// "fileId",
// "commentId"
// ],
// "parameters": {
// "commentId": {
// "description": "The ID of the comment.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "files/{fileId}/comments/{commentId}",
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file"
// ]
// }
}
// method id "drive.comments.get":
type CommentsGetCall struct {
s *Service
fileId string
commentId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a comment by ID.
func (r *CommentsService) Get(fileId string, commentId string) *CommentsGetCall {
c := &CommentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.commentId = commentId
return c
}
// IncludeDeleted sets the optional parameter "includeDeleted": Whether
// to return deleted comments. Deleted comments will not include their
// original content.
func (c *CommentsGetCall) IncludeDeleted(includeDeleted bool) *CommentsGetCall {
c.urlParams_.Set("includeDeleted", fmt.Sprint(includeDeleted))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *CommentsGetCall) Fields(s ...googleapi.Field) *CommentsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *CommentsGetCall) IfNoneMatch(entityTag string) *CommentsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *CommentsGetCall) Context(ctx context.Context) *CommentsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *CommentsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *CommentsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"commentId": c.commentId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.comments.get" call.
// Exactly one of *Comment or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Comment.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *CommentsGetCall) Do(opts ...googleapi.CallOption) (*Comment, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Comment{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a comment by ID.",
// "httpMethod": "GET",
// "id": "drive.comments.get",
// "parameterOrder": [
// "fileId",
// "commentId"
// ],
// "parameters": {
// "commentId": {
// "description": "The ID of the comment.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "includeDeleted": {
// "default": "false",
// "description": "Whether to return deleted comments. Deleted comments will not include their original content.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "files/{fileId}/comments/{commentId}",
// "response": {
// "$ref": "Comment"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// method id "drive.comments.list":
type CommentsListCall struct {
s *Service
fileId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists a file's comments.
func (r *CommentsService) List(fileId string) *CommentsListCall {
c := &CommentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
return c
}
// IncludeDeleted sets the optional parameter "includeDeleted": Whether
// to include deleted comments. Deleted comments will not include their
// original content.
func (c *CommentsListCall) IncludeDeleted(includeDeleted bool) *CommentsListCall {
c.urlParams_.Set("includeDeleted", fmt.Sprint(includeDeleted))
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of comments to return per page.
func (c *CommentsListCall) PageSize(pageSize int64) *CommentsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The token for
// continuing a previous list request on the next page. This should be
// set to the value of 'nextPageToken' from the previous response.
func (c *CommentsListCall) PageToken(pageToken string) *CommentsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// StartModifiedTime sets the optional parameter "startModifiedTime":
// The minimum value of 'modifiedTime' for the result comments (RFC 3339
// date-time).
func (c *CommentsListCall) StartModifiedTime(startModifiedTime string) *CommentsListCall {
c.urlParams_.Set("startModifiedTime", startModifiedTime)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *CommentsListCall) Fields(s ...googleapi.Field) *CommentsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *CommentsListCall) IfNoneMatch(entityTag string) *CommentsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *CommentsListCall) Context(ctx context.Context) *CommentsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *CommentsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *CommentsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.comments.list" call.
// Exactly one of *CommentList or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *CommentList.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *CommentsListCall) Do(opts ...googleapi.CallOption) (*CommentList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &CommentList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists a file's comments.",
// "httpMethod": "GET",
// "id": "drive.comments.list",
// "parameterOrder": [
// "fileId"
// ],
// "parameters": {
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "includeDeleted": {
// "default": "false",
// "description": "Whether to include deleted comments. Deleted comments will not include their original content.",
// "location": "query",
// "type": "boolean"
// },
// "pageSize": {
// "default": "20",
// "description": "The maximum number of comments to return per page.",
// "format": "int32",
// "location": "query",
// "maximum": "100",
// "minimum": "1",
// "type": "integer"
// },
// "pageToken": {
// "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.",
// "location": "query",
// "type": "string"
// },
// "startModifiedTime": {
// "description": "The minimum value of 'modifiedTime' for the result comments (RFC 3339 date-time).",
// "location": "query",
// "type": "string"
// }
// },
// "path": "files/{fileId}/comments",
// "response": {
// "$ref": "CommentList"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *CommentsListCall) Pages(ctx context.Context, f func(*CommentList) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "drive.comments.update":
type CommentsUpdateCall struct {
s *Service
fileId string
commentId string
comment *Comment
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates a comment with patch semantics.
func (r *CommentsService) Update(fileId string, commentId string, comment *Comment) *CommentsUpdateCall {
c := &CommentsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.commentId = commentId
c.comment = comment
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *CommentsUpdateCall) Fields(s ...googleapi.Field) *CommentsUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *CommentsUpdateCall) Context(ctx context.Context) *CommentsUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *CommentsUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *CommentsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.comment)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"commentId": c.commentId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.comments.update" call.
// Exactly one of *Comment or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Comment.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *CommentsUpdateCall) Do(opts ...googleapi.CallOption) (*Comment, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Comment{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a comment with patch semantics.",
// "httpMethod": "PATCH",
// "id": "drive.comments.update",
// "parameterOrder": [
// "fileId",
// "commentId"
// ],
// "parameters": {
// "commentId": {
// "description": "The ID of the comment.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "files/{fileId}/comments/{commentId}",
// "request": {
// "$ref": "Comment"
// },
// "response": {
// "$ref": "Comment"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file"
// ]
// }
}
// method id "drive.drives.create":
type DrivesCreateCall struct {
s *Service
drive *Drive
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a new shared drive.
func (r *DrivesService) Create(requestId string, drive *Drive) *DrivesCreateCall {
c := &DrivesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.urlParams_.Set("requestId", requestId)
c.drive = drive
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *DrivesCreateCall) Fields(s ...googleapi.Field) *DrivesCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *DrivesCreateCall) Context(ctx context.Context) *DrivesCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *DrivesCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *DrivesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.drive)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "drives")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.drives.create" call.
// Exactly one of *Drive or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Drive.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *DrivesCreateCall) Do(opts ...googleapi.CallOption) (*Drive, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Drive{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new shared drive.",
// "httpMethod": "POST",
// "id": "drive.drives.create",
// "parameterOrder": [
// "requestId"
// ],
// "parameters": {
// "requestId": {
// "description": "An ID, such as a random UUID, which uniquely identifies this user's request for idempotent creation of a shared drive. A repeated request by the same user and with the same request ID will avoid creating duplicates by attempting to create the same shared drive. If the shared drive already exists a 409 error will be returned.",
// "location": "query",
// "required": true,
// "type": "string"
// }
// },
// "path": "drives",
// "request": {
// "$ref": "Drive"
// },
// "response": {
// "$ref": "Drive"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive"
// ]
// }
}
// method id "drive.drives.delete":
type DrivesDeleteCall struct {
s *Service
driveId string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Permanently deletes a shared drive for which the user is an
// organizer. The shared drive cannot contain any untrashed items.
func (r *DrivesService) Delete(driveId string) *DrivesDeleteCall {
c := &DrivesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.driveId = driveId
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *DrivesDeleteCall) Fields(s ...googleapi.Field) *DrivesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *DrivesDeleteCall) Context(ctx context.Context) *DrivesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *DrivesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *DrivesDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "drives/{driveId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"driveId": c.driveId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.drives.delete" call.
func (c *DrivesDeleteCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Permanently deletes a shared drive for which the user is an organizer. The shared drive cannot contain any untrashed items.",
// "httpMethod": "DELETE",
// "id": "drive.drives.delete",
// "parameterOrder": [
// "driveId"
// ],
// "parameters": {
// "driveId": {
// "description": "The ID of the shared drive.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "drives/{driveId}",
// "scopes": [
// "https://www.googleapis.com/auth/drive"
// ]
// }
}
// method id "drive.drives.get":
type DrivesGetCall struct {
s *Service
driveId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a shared drive's metadata by ID.
func (r *DrivesService) Get(driveId string) *DrivesGetCall {
c := &DrivesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.driveId = driveId
return c
}
// UseDomainAdminAccess sets the optional parameter
// "useDomainAdminAccess": Issue the request as a domain administrator;
// if set to true, then the requester will be granted access if they are
// an administrator of the domain to which the shared drive belongs.
func (c *DrivesGetCall) UseDomainAdminAccess(useDomainAdminAccess bool) *DrivesGetCall {
c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *DrivesGetCall) Fields(s ...googleapi.Field) *DrivesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *DrivesGetCall) IfNoneMatch(entityTag string) *DrivesGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *DrivesGetCall) Context(ctx context.Context) *DrivesGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *DrivesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *DrivesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "drives/{driveId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"driveId": c.driveId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.drives.get" call.
// Exactly one of *Drive or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Drive.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *DrivesGetCall) Do(opts ...googleapi.CallOption) (*Drive, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Drive{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a shared drive's metadata by ID.",
// "httpMethod": "GET",
// "id": "drive.drives.get",
// "parameterOrder": [
// "driveId"
// ],
// "parameters": {
// "driveId": {
// "description": "The ID of the shared drive.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "useDomainAdminAccess": {
// "default": "false",
// "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the shared drive belongs.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "drives/{driveId}",
// "response": {
// "$ref": "Drive"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// method id "drive.drives.hide":
type DrivesHideCall struct {
s *Service
driveId string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Hide: Hides a shared drive from the default view.
func (r *DrivesService) Hide(driveId string) *DrivesHideCall {
c := &DrivesHideCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.driveId = driveId
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *DrivesHideCall) Fields(s ...googleapi.Field) *DrivesHideCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *DrivesHideCall) Context(ctx context.Context) *DrivesHideCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *DrivesHideCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *DrivesHideCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "drives/{driveId}/hide")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"driveId": c.driveId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.drives.hide" call.
// Exactly one of *Drive or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Drive.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *DrivesHideCall) Do(opts ...googleapi.CallOption) (*Drive, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Drive{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Hides a shared drive from the default view.",
// "httpMethod": "POST",
// "id": "drive.drives.hide",
// "parameterOrder": [
// "driveId"
// ],
// "parameters": {
// "driveId": {
// "description": "The ID of the shared drive.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "drives/{driveId}/hide",
// "response": {
// "$ref": "Drive"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive"
// ]
// }
}
// method id "drive.drives.list":
type DrivesListCall struct {
s *Service
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the user's shared drives.
func (r *DrivesService) List() *DrivesListCall {
c := &DrivesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
return c
}
// PageSize sets the optional parameter "pageSize": Maximum number of
// shared drives to return.
func (c *DrivesListCall) PageSize(pageSize int64) *DrivesListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": Page token for
// shared drives.
func (c *DrivesListCall) PageToken(pageToken string) *DrivesListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Q sets the optional parameter "q": Query string for searching shared
// drives.
func (c *DrivesListCall) Q(q string) *DrivesListCall {
c.urlParams_.Set("q", q)
return c
}
// UseDomainAdminAccess sets the optional parameter
// "useDomainAdminAccess": Issue the request as a domain administrator;
// if set to true, then all shared drives of the domain in which the
// requester is an administrator are returned.
func (c *DrivesListCall) UseDomainAdminAccess(useDomainAdminAccess bool) *DrivesListCall {
c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *DrivesListCall) Fields(s ...googleapi.Field) *DrivesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *DrivesListCall) IfNoneMatch(entityTag string) *DrivesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *DrivesListCall) Context(ctx context.Context) *DrivesListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *DrivesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *DrivesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "drives")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.drives.list" call.
// Exactly one of *DriveList or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *DriveList.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *DrivesListCall) Do(opts ...googleapi.CallOption) (*DriveList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &DriveList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the user's shared drives.",
// "httpMethod": "GET",
// "id": "drive.drives.list",
// "parameters": {
// "pageSize": {
// "default": "10",
// "description": "Maximum number of shared drives to return.",
// "format": "int32",
// "location": "query",
// "maximum": "100",
// "minimum": "1",
// "type": "integer"
// },
// "pageToken": {
// "description": "Page token for shared drives.",
// "location": "query",
// "type": "string"
// },
// "q": {
// "description": "Query string for searching shared drives.",
// "location": "query",
// "type": "string"
// },
// "useDomainAdminAccess": {
// "default": "false",
// "description": "Issue the request as a domain administrator; if set to true, then all shared drives of the domain in which the requester is an administrator are returned.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "drives",
// "response": {
// "$ref": "DriveList"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *DrivesListCall) Pages(ctx context.Context, f func(*DriveList) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "drive.drives.unhide":
type DrivesUnhideCall struct {
s *Service
driveId string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Unhide: Restores a shared drive to the default view.
func (r *DrivesService) Unhide(driveId string) *DrivesUnhideCall {
c := &DrivesUnhideCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.driveId = driveId
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *DrivesUnhideCall) Fields(s ...googleapi.Field) *DrivesUnhideCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *DrivesUnhideCall) Context(ctx context.Context) *DrivesUnhideCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *DrivesUnhideCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *DrivesUnhideCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "drives/{driveId}/unhide")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"driveId": c.driveId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.drives.unhide" call.
// Exactly one of *Drive or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Drive.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *DrivesUnhideCall) Do(opts ...googleapi.CallOption) (*Drive, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Drive{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Restores a shared drive to the default view.",
// "httpMethod": "POST",
// "id": "drive.drives.unhide",
// "parameterOrder": [
// "driveId"
// ],
// "parameters": {
// "driveId": {
// "description": "The ID of the shared drive.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "drives/{driveId}/unhide",
// "response": {
// "$ref": "Drive"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive"
// ]
// }
}
// method id "drive.drives.update":
type DrivesUpdateCall struct {
s *Service
driveId string
drive *Drive
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates the metadate for a shared drive.
func (r *DrivesService) Update(driveId string, drive *Drive) *DrivesUpdateCall {
c := &DrivesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.driveId = driveId
c.drive = drive
return c
}
// UseDomainAdminAccess sets the optional parameter
// "useDomainAdminAccess": Issue the request as a domain administrator;
// if set to true, then the requester will be granted access if they are
// an administrator of the domain to which the shared drive belongs.
func (c *DrivesUpdateCall) UseDomainAdminAccess(useDomainAdminAccess bool) *DrivesUpdateCall {
c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *DrivesUpdateCall) Fields(s ...googleapi.Field) *DrivesUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *DrivesUpdateCall) Context(ctx context.Context) *DrivesUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *DrivesUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *DrivesUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.drive)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "drives/{driveId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"driveId": c.driveId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.drives.update" call.
// Exactly one of *Drive or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Drive.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *DrivesUpdateCall) Do(opts ...googleapi.CallOption) (*Drive, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Drive{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates the metadate for a shared drive.",
// "httpMethod": "PATCH",
// "id": "drive.drives.update",
// "parameterOrder": [
// "driveId"
// ],
// "parameters": {
// "driveId": {
// "description": "The ID of the shared drive.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "useDomainAdminAccess": {
// "default": "false",
// "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the shared drive belongs.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "drives/{driveId}",
// "request": {
// "$ref": "Drive"
// },
// "response": {
// "$ref": "Drive"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive"
// ]
// }
}
// method id "drive.files.copy":
type FilesCopyCall struct {
s *Service
fileId string
file *File
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Copy: Creates a copy of a file and applies any requested updates with
// patch semantics.
func (r *FilesService) Copy(fileId string, file *File) *FilesCopyCall {
c := &FilesCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.file = file
return c
}
// EnforceSingleParent sets the optional parameter
// "enforceSingleParent": Set to true to opt in to API behavior that
// aims for all items to have exactly one parent. This parameter will
// only take effect if the item is not in a shared drive. Requests that
// specify more than one parent will fail.
func (c *FilesCopyCall) EnforceSingleParent(enforceSingleParent bool) *FilesCopyCall {
c.urlParams_.Set("enforceSingleParent", fmt.Sprint(enforceSingleParent))
return c
}
// IgnoreDefaultVisibility sets the optional parameter
// "ignoreDefaultVisibility": Whether to ignore the domain's default
// visibility settings for the created file. Domain administrators can
// choose to make all uploaded files visible to the domain by default;
// this parameter bypasses that behavior for the request. Permissions
// are still inherited from parent folders.
func (c *FilesCopyCall) IgnoreDefaultVisibility(ignoreDefaultVisibility bool) *FilesCopyCall {
c.urlParams_.Set("ignoreDefaultVisibility", fmt.Sprint(ignoreDefaultVisibility))
return c
}
// KeepRevisionForever sets the optional parameter
// "keepRevisionForever": Whether to set the 'keepForever' field in the
// new head revision. This is only applicable to files with binary
// content in Google Drive. Only 200 revisions for the file can be kept
// forever. If the limit is reached, try deleting pinned revisions.
func (c *FilesCopyCall) KeepRevisionForever(keepRevisionForever bool) *FilesCopyCall {
c.urlParams_.Set("keepRevisionForever", fmt.Sprint(keepRevisionForever))
return c
}
// OcrLanguage sets the optional parameter "ocrLanguage": A language
// hint for OCR processing during image import (ISO 639-1 code).
func (c *FilesCopyCall) OcrLanguage(ocrLanguage string) *FilesCopyCall {
c.urlParams_.Set("ocrLanguage", ocrLanguage)
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *FilesCopyCall) SupportsAllDrives(supportsAllDrives bool) *FilesCopyCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *FilesCopyCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesCopyCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FilesCopyCall) Fields(s ...googleapi.Field) *FilesCopyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *FilesCopyCall) Context(ctx context.Context) *FilesCopyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FilesCopyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FilesCopyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.file)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/copy")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.files.copy" call.
// Exactly one of *File or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *File.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *FilesCopyCall) Do(opts ...googleapi.CallOption) (*File, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &File{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a copy of a file and applies any requested updates with patch semantics.",
// "httpMethod": "POST",
// "id": "drive.files.copy",
// "parameterOrder": [
// "fileId"
// ],
// "parameters": {
// "enforceSingleParent": {
// "default": "false",
// "description": "Set to true to opt in to API behavior that aims for all items to have exactly one parent. This parameter will only take effect if the item is not in a shared drive. Requests that specify more than one parent will fail.",
// "location": "query",
// "type": "boolean"
// },
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "ignoreDefaultVisibility": {
// "default": "false",
// "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.",
// "location": "query",
// "type": "boolean"
// },
// "keepRevisionForever": {
// "default": "false",
// "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Google Drive. Only 200 revisions for the file can be kept forever. If the limit is reached, try deleting pinned revisions.",
// "location": "query",
// "type": "boolean"
// },
// "ocrLanguage": {
// "description": "A language hint for OCR processing during image import (ISO 639-1 code).",
// "location": "query",
// "type": "string"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "files/{fileId}/copy",
// "request": {
// "$ref": "File"
// },
// "response": {
// "$ref": "File"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.photos.readonly"
// ]
// }
}
// method id "drive.files.create":
type FilesCreateCall struct {
s *Service
file *File
urlParams_ gensupport.URLParams
mediaInfo_ *gensupport.MediaInfo
ctx_ context.Context
header_ http.Header
}
// Create: Creates a new file.
func (r *FilesService) Create(file *File) *FilesCreateCall {
c := &FilesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.file = file
return c
}
// EnforceSingleParent sets the optional parameter
// "enforceSingleParent": Set to true to opt in to API behavior that
// aims for all items to have exactly one parent. This parameter will
// only take effect if the item is not in a shared drive. Requests that
// specify more than one parent will fail.
func (c *FilesCreateCall) EnforceSingleParent(enforceSingleParent bool) *FilesCreateCall {
c.urlParams_.Set("enforceSingleParent", fmt.Sprint(enforceSingleParent))
return c
}
// IgnoreDefaultVisibility sets the optional parameter
// "ignoreDefaultVisibility": Whether to ignore the domain's default
// visibility settings for the created file. Domain administrators can
// choose to make all uploaded files visible to the domain by default;
// this parameter bypasses that behavior for the request. Permissions
// are still inherited from parent folders.
func (c *FilesCreateCall) IgnoreDefaultVisibility(ignoreDefaultVisibility bool) *FilesCreateCall {
c.urlParams_.Set("ignoreDefaultVisibility", fmt.Sprint(ignoreDefaultVisibility))
return c
}
// KeepRevisionForever sets the optional parameter
// "keepRevisionForever": Whether to set the 'keepForever' field in the
// new head revision. This is only applicable to files with binary
// content in Google Drive. Only 200 revisions for the file can be kept
// forever. If the limit is reached, try deleting pinned revisions.
func (c *FilesCreateCall) KeepRevisionForever(keepRevisionForever bool) *FilesCreateCall {
c.urlParams_.Set("keepRevisionForever", fmt.Sprint(keepRevisionForever))
return c
}
// OcrLanguage sets the optional parameter "ocrLanguage": A language
// hint for OCR processing during image import (ISO 639-1 code).
func (c *FilesCreateCall) OcrLanguage(ocrLanguage string) *FilesCreateCall {
c.urlParams_.Set("ocrLanguage", ocrLanguage)
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *FilesCreateCall) SupportsAllDrives(supportsAllDrives bool) *FilesCreateCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *FilesCreateCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesCreateCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// UseContentAsIndexableText sets the optional parameter
// "useContentAsIndexableText": Whether to use the uploaded content as
// indexable text.
func (c *FilesCreateCall) UseContentAsIndexableText(useContentAsIndexableText bool) *FilesCreateCall {
c.urlParams_.Set("useContentAsIndexableText", fmt.Sprint(useContentAsIndexableText))
return c
}
// Media specifies the media to upload in one or more chunks. The chunk
// size may be controlled by supplying a MediaOption generated by
// googleapi.ChunkSize. The chunk size defaults to
// googleapi.DefaultUploadChunkSize.The Content-Type header used in the
// upload request will be determined by sniffing the contents of r,
// unless a MediaOption generated by googleapi.ContentType is
// supplied.
// At most one of Media and ResumableMedia may be set.
func (c *FilesCreateCall) Media(r io.Reader, options ...googleapi.MediaOption) *FilesCreateCall {
c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options)
return c
}
// ResumableMedia specifies the media to upload in chunks and can be
// canceled with ctx.
//
// Deprecated: use Media instead.
//
// At most one of Media and ResumableMedia may be set. mediaType
// identifies the MIME media type of the upload, such as "image/png". If
// mediaType is "", it will be auto-detected. The provided ctx will
// supersede any context previously provided to the Context method.
func (c *FilesCreateCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *FilesCreateCall {
c.ctx_ = ctx
c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType)
return c
}
// ProgressUpdater provides a callback function that will be called
// after every chunk. It should be a low-latency function in order to
// not slow down the upload operation. This should only be called when
// using ResumableMedia (as opposed to Media).
func (c *FilesCreateCall) ProgressUpdater(pu googleapi.ProgressUpdater) *FilesCreateCall {
c.mediaInfo_.SetProgressUpdater(pu)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FilesCreateCall) Fields(s ...googleapi.Field) *FilesCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
// This context will supersede any context previously provided to the
// ResumableMedia method.
func (c *FilesCreateCall) Context(ctx context.Context) *FilesCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FilesCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FilesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.file)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files")
if c.mediaInfo_ != nil {
urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/drive/v3/files")
c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType())
}
if body == nil {
body = new(bytes.Buffer)
reqHeaders.Set("Content-Type", "application/json")
}
body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body)
defer cleanup()
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
req.GetBody = getBody
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.files.create" call.
// Exactly one of *File or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *File.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *FilesCreateCall) Do(opts ...googleapi.CallOption) (*File, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location"))
if rx != nil {
rx.Client = c.s.client
rx.UserAgent = c.s.userAgent()
ctx := c.ctx_
if ctx == nil {
ctx = context.TODO()
}
res, err = rx.Upload(ctx)
if err != nil {
return nil, err
}
defer res.Body.Close()
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
}
ret := &File{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new file.",
// "httpMethod": "POST",
// "id": "drive.files.create",
// "mediaUpload": {
// "accept": [
// "*/*"
// ],
// "maxSize": "5120GB",
// "protocols": {
// "resumable": {
// "multipart": true,
// "path": "/resumable/upload/drive/v3/files"
// },
// "simple": {
// "multipart": true,
// "path": "/upload/drive/v3/files"
// }
// }
// },
// "parameters": {
// "enforceSingleParent": {
// "default": "false",
// "description": "Set to true to opt in to API behavior that aims for all items to have exactly one parent. This parameter will only take effect if the item is not in a shared drive. Requests that specify more than one parent will fail.",
// "location": "query",
// "type": "boolean"
// },
// "ignoreDefaultVisibility": {
// "default": "false",
// "description": "Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.",
// "location": "query",
// "type": "boolean"
// },
// "keepRevisionForever": {
// "default": "false",
// "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Google Drive. Only 200 revisions for the file can be kept forever. If the limit is reached, try deleting pinned revisions.",
// "location": "query",
// "type": "boolean"
// },
// "ocrLanguage": {
// "description": "A language hint for OCR processing during image import (ISO 639-1 code).",
// "location": "query",
// "type": "string"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "useContentAsIndexableText": {
// "default": "false",
// "description": "Whether to use the uploaded content as indexable text.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "files",
// "request": {
// "$ref": "File"
// },
// "response": {
// "$ref": "File"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file"
// ],
// "supportsMediaUpload": true,
// "supportsSubscription": true
// }
}
// method id "drive.files.delete":
type FilesDeleteCall struct {
s *Service
fileId string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Permanently deletes a file owned by the user without moving
// it to the trash. If the file belongs to a shared drive the user must
// be an organizer on the parent. If the target is a folder, all
// descendants owned by the user are also deleted.
func (r *FilesService) Delete(fileId string) *FilesDeleteCall {
c := &FilesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *FilesDeleteCall) SupportsAllDrives(supportsAllDrives bool) *FilesDeleteCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *FilesDeleteCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesDeleteCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FilesDeleteCall) Fields(s ...googleapi.Field) *FilesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *FilesDeleteCall) Context(ctx context.Context) *FilesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FilesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FilesDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.files.delete" call.
func (c *FilesDeleteCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Permanently deletes a file owned by the user without moving it to the trash. If the file belongs to a shared drive the user must be an organizer on the parent. If the target is a folder, all descendants owned by the user are also deleted.",
// "httpMethod": "DELETE",
// "id": "drive.files.delete",
// "parameterOrder": [
// "fileId"
// ],
// "parameters": {
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "files/{fileId}",
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file"
// ]
// }
}
// method id "drive.files.emptyTrash":
type FilesEmptyTrashCall struct {
s *Service
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// EmptyTrash: Permanently deletes all of the user's trashed files.
func (r *FilesService) EmptyTrash() *FilesEmptyTrashCall {
c := &FilesEmptyTrashCall{s: r.s, urlParams_: make(gensupport.URLParams)}
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FilesEmptyTrashCall) Fields(s ...googleapi.Field) *FilesEmptyTrashCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *FilesEmptyTrashCall) Context(ctx context.Context) *FilesEmptyTrashCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FilesEmptyTrashCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FilesEmptyTrashCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/trash")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.files.emptyTrash" call.
func (c *FilesEmptyTrashCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Permanently deletes all of the user's trashed files.",
// "httpMethod": "DELETE",
// "id": "drive.files.emptyTrash",
// "path": "files/trash",
// "scopes": [
// "https://www.googleapis.com/auth/drive"
// ]
// }
}
// method id "drive.files.export":
type FilesExportCall struct {
s *Service
fileId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Export: Exports a Google Doc to the requested MIME type and returns
// the exported content. Please note that the exported content is
// limited to 10MB.
func (r *FilesService) Export(fileId string, mimeType string) *FilesExportCall {
c := &FilesExportCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.urlParams_.Set("mimeType", mimeType)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FilesExportCall) Fields(s ...googleapi.Field) *FilesExportCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *FilesExportCall) IfNoneMatch(entityTag string) *FilesExportCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do and Download
// methods. Any pending HTTP request will be aborted if the provided
// context is canceled.
func (c *FilesExportCall) Context(ctx context.Context) *FilesExportCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FilesExportCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FilesExportCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/export")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Download fetches the API endpoint's "media" value, instead of the normal
// API response value. If the returned error is nil, the Response is guaranteed to
// have a 2xx status code. Callers must close the Response.Body as usual.
func (c *FilesExportCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("media")
if err != nil {
return nil, err
}
if err := googleapi.CheckMediaResponse(res); err != nil {
res.Body.Close()
return nil, err
}
return res, nil
}
// Do executes the "drive.files.export" call.
func (c *FilesExportCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Exports a Google Doc to the requested MIME type and returns the exported content. Please note that the exported content is limited to 10MB.",
// "httpMethod": "GET",
// "id": "drive.files.export",
// "parameterOrder": [
// "fileId",
// "mimeType"
// ],
// "parameters": {
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "mimeType": {
// "description": "The MIME type of the format requested for this export.",
// "location": "query",
// "required": true,
// "type": "string"
// }
// },
// "path": "files/{fileId}/export",
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.readonly"
// ],
// "supportsMediaDownload": true
// }
}
// method id "drive.files.generateIds":
type FilesGenerateIdsCall struct {
s *Service
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// GenerateIds: Generates a set of file IDs which can be provided in
// create or copy requests.
func (r *FilesService) GenerateIds() *FilesGenerateIdsCall {
c := &FilesGenerateIdsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
return c
}
// Count sets the optional parameter "count": The number of IDs to
// return.
func (c *FilesGenerateIdsCall) Count(count int64) *FilesGenerateIdsCall {
c.urlParams_.Set("count", fmt.Sprint(count))
return c
}
// Space sets the optional parameter "space": The space in which the IDs
// can be used to create new files. Supported values are 'drive' and
// 'appDataFolder'.
func (c *FilesGenerateIdsCall) Space(space string) *FilesGenerateIdsCall {
c.urlParams_.Set("space", space)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FilesGenerateIdsCall) Fields(s ...googleapi.Field) *FilesGenerateIdsCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *FilesGenerateIdsCall) IfNoneMatch(entityTag string) *FilesGenerateIdsCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *FilesGenerateIdsCall) Context(ctx context.Context) *FilesGenerateIdsCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FilesGenerateIdsCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FilesGenerateIdsCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/generateIds")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.files.generateIds" call.
// Exactly one of *GeneratedIds or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *GeneratedIds.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *FilesGenerateIdsCall) Do(opts ...googleapi.CallOption) (*GeneratedIds, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &GeneratedIds{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Generates a set of file IDs which can be provided in create or copy requests.",
// "httpMethod": "GET",
// "id": "drive.files.generateIds",
// "parameters": {
// "count": {
// "default": "10",
// "description": "The number of IDs to return.",
// "format": "int32",
// "location": "query",
// "maximum": "1000",
// "minimum": "1",
// "type": "integer"
// },
// "space": {
// "default": "drive",
// "description": "The space in which the IDs can be used to create new files. Supported values are 'drive' and 'appDataFolder'.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "files/generateIds",
// "response": {
// "$ref": "GeneratedIds"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file"
// ]
// }
}
// method id "drive.files.get":
type FilesGetCall struct {
s *Service
fileId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a file's metadata or content by ID.
func (r *FilesService) Get(fileId string) *FilesGetCall {
c := &FilesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
return c
}
// AcknowledgeAbuse sets the optional parameter "acknowledgeAbuse":
// Whether the user is acknowledging the risk of downloading known
// malware or other abusive files. This is only applicable when
// alt=media.
func (c *FilesGetCall) AcknowledgeAbuse(acknowledgeAbuse bool) *FilesGetCall {
c.urlParams_.Set("acknowledgeAbuse", fmt.Sprint(acknowledgeAbuse))
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *FilesGetCall) SupportsAllDrives(supportsAllDrives bool) *FilesGetCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *FilesGetCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesGetCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FilesGetCall) Fields(s ...googleapi.Field) *FilesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *FilesGetCall) IfNoneMatch(entityTag string) *FilesGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do and Download
// methods. Any pending HTTP request will be aborted if the provided
// context is canceled.
func (c *FilesGetCall) Context(ctx context.Context) *FilesGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FilesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FilesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Download fetches the API endpoint's "media" value, instead of the normal
// API response value. If the returned error is nil, the Response is guaranteed to
// have a 2xx status code. Callers must close the Response.Body as usual.
func (c *FilesGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("media")
if err != nil {
return nil, err
}
if err := googleapi.CheckMediaResponse(res); err != nil {
res.Body.Close()
return nil, err
}
return res, nil
}
// Do executes the "drive.files.get" call.
// Exactly one of *File or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *File.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *FilesGetCall) Do(opts ...googleapi.CallOption) (*File, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &File{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a file's metadata or content by ID.",
// "httpMethod": "GET",
// "id": "drive.files.get",
// "parameterOrder": [
// "fileId"
// ],
// "parameters": {
// "acknowledgeAbuse": {
// "default": "false",
// "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.",
// "location": "query",
// "type": "boolean"
// },
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "files/{fileId}",
// "response": {
// "$ref": "File"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.metadata",
// "https://www.googleapis.com/auth/drive.metadata.readonly",
// "https://www.googleapis.com/auth/drive.photos.readonly",
// "https://www.googleapis.com/auth/drive.readonly"
// ],
// "supportsMediaDownload": true,
// "supportsSubscription": true,
// "useMediaDownloadService": true
// }
}
// method id "drive.files.list":
type FilesListCall struct {
s *Service
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists or searches files.
func (r *FilesService) List() *FilesListCall {
c := &FilesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
return c
}
// Corpora sets the optional parameter "corpora": Bodies of items
// (files/documents) to which the query applies. Supported bodies are
// 'user', 'domain', 'drive' and 'allDrives'. Prefer 'user' or 'drive'
// to 'allDrives' for efficiency.
func (c *FilesListCall) Corpora(corpora string) *FilesListCall {
c.urlParams_.Set("corpora", corpora)
return c
}
// Corpus sets the optional parameter "corpus": The source of files to
// list. Deprecated: use 'corpora' instead.
//
// Possible values:
// "domain" - Files shared to the user's domain.
// "user" - Files owned by or shared to the user.
func (c *FilesListCall) Corpus(corpus string) *FilesListCall {
c.urlParams_.Set("corpus", corpus)
return c
}
// DriveId sets the optional parameter "driveId": ID of the shared drive
// to search.
func (c *FilesListCall) DriveId(driveId string) *FilesListCall {
c.urlParams_.Set("driveId", driveId)
return c
}
// IncludeItemsFromAllDrives sets the optional parameter
// "includeItemsFromAllDrives": Deprecated - Whether both My Drive and
// shared drive items should be included in results. This parameter will
// only be effective until June 1, 2020. Afterwards shared drive items
// will be included in the results.
func (c *FilesListCall) IncludeItemsFromAllDrives(includeItemsFromAllDrives bool) *FilesListCall {
c.urlParams_.Set("includeItemsFromAllDrives", fmt.Sprint(includeItemsFromAllDrives))
return c
}
// IncludeTeamDriveItems sets the optional parameter
// "includeTeamDriveItems": Deprecated use includeItemsFromAllDrives
// instead.
func (c *FilesListCall) IncludeTeamDriveItems(includeTeamDriveItems bool) *FilesListCall {
c.urlParams_.Set("includeTeamDriveItems", fmt.Sprint(includeTeamDriveItems))
return c
}
// OrderBy sets the optional parameter "orderBy": A comma-separated list
// of sort keys. Valid keys are 'createdTime', 'folder',
// 'modifiedByMeTime', 'modifiedTime', 'name', 'name_natural',
// 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and
// 'viewedByMeTime'. Each key sorts ascending by default, but may be
// reversed with the 'desc' modifier. Example usage:
// ?orderBy=folder,modifiedTime desc,name. Please note that there is a
// current limitation for users with approximately one million files in
// which the requested sort order is ignored.
func (c *FilesListCall) OrderBy(orderBy string) *FilesListCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of files to return per page. Partial or empty result pages are
// possible even before the end of the files list has been reached.
func (c *FilesListCall) PageSize(pageSize int64) *FilesListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The token for
// continuing a previous list request on the next page. This should be
// set to the value of 'nextPageToken' from the previous response.
func (c *FilesListCall) PageToken(pageToken string) *FilesListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Q sets the optional parameter "q": A query for filtering the file
// results. See the "Search for Files" guide for supported syntax.
func (c *FilesListCall) Q(q string) *FilesListCall {
c.urlParams_.Set("q", q)
return c
}
// Spaces sets the optional parameter "spaces": A comma-separated list
// of spaces to query within the corpus. Supported values are 'drive',
// 'appDataFolder' and 'photos'.
func (c *FilesListCall) Spaces(spaces string) *FilesListCall {
c.urlParams_.Set("spaces", spaces)
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *FilesListCall) SupportsAllDrives(supportsAllDrives bool) *FilesListCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *FilesListCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesListCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// TeamDriveId sets the optional parameter "teamDriveId": Deprecated use
// driveId instead.
func (c *FilesListCall) TeamDriveId(teamDriveId string) *FilesListCall {
c.urlParams_.Set("teamDriveId", teamDriveId)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FilesListCall) Fields(s ...googleapi.Field) *FilesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *FilesListCall) IfNoneMatch(entityTag string) *FilesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *FilesListCall) Context(ctx context.Context) *FilesListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FilesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FilesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.files.list" call.
// Exactly one of *FileList or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *FileList.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *FilesListCall) Do(opts ...googleapi.CallOption) (*FileList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &FileList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists or searches files.",
// "httpMethod": "GET",
// "id": "drive.files.list",
// "parameters": {
// "corpora": {
// "description": "Bodies of items (files/documents) to which the query applies. Supported bodies are 'user', 'domain', 'drive' and 'allDrives'. Prefer 'user' or 'drive' to 'allDrives' for efficiency.",
// "location": "query",
// "type": "string"
// },
// "corpus": {
// "description": "The source of files to list. Deprecated: use 'corpora' instead.",
// "enum": [
// "domain",
// "user"
// ],
// "enumDescriptions": [
// "Files shared to the user's domain.",
// "Files owned by or shared to the user."
// ],
// "location": "query",
// "type": "string"
// },
// "driveId": {
// "description": "ID of the shared drive to search.",
// "location": "query",
// "type": "string"
// },
// "includeItemsFromAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether both My Drive and shared drive items should be included in results. This parameter will only be effective until June 1, 2020. Afterwards shared drive items will be included in the results.",
// "location": "query",
// "type": "boolean"
// },
// "includeTeamDriveItems": {
// "default": "false",
// "description": "Deprecated use includeItemsFromAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "orderBy": {
// "description": "A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder', 'modifiedByMeTime', 'modifiedTime', 'name', 'name_natural', 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the 'desc' modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. Please note that there is a current limitation for users with approximately one million files in which the requested sort order is ignored.",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "default": "100",
// "description": "The maximum number of files to return per page. Partial or empty result pages are possible even before the end of the files list has been reached.",
// "format": "int32",
// "location": "query",
// "maximum": "1000",
// "minimum": "1",
// "type": "integer"
// },
// "pageToken": {
// "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.",
// "location": "query",
// "type": "string"
// },
// "q": {
// "description": "A query for filtering the file results. See the \"Search for Files\" guide for supported syntax.",
// "location": "query",
// "type": "string"
// },
// "spaces": {
// "default": "drive",
// "description": "A comma-separated list of spaces to query within the corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.",
// "location": "query",
// "type": "string"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "teamDriveId": {
// "description": "Deprecated use driveId instead.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "files",
// "response": {
// "$ref": "FileList"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.metadata",
// "https://www.googleapis.com/auth/drive.metadata.readonly",
// "https://www.googleapis.com/auth/drive.photos.readonly",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *FilesListCall) Pages(ctx context.Context, f func(*FileList) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "drive.files.update":
type FilesUpdateCall struct {
s *Service
fileId string
file *File
urlParams_ gensupport.URLParams
mediaInfo_ *gensupport.MediaInfo
ctx_ context.Context
header_ http.Header
}
// Update: Updates a file's metadata and/or content with patch
// semantics.
func (r *FilesService) Update(fileId string, file *File) *FilesUpdateCall {
c := &FilesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.file = file
return c
}
// AddParents sets the optional parameter "addParents": A
// comma-separated list of parent IDs to add.
func (c *FilesUpdateCall) AddParents(addParents string) *FilesUpdateCall {
c.urlParams_.Set("addParents", addParents)
return c
}
// EnforceSingleParent sets the optional parameter
// "enforceSingleParent": Set to true to opt in to API behavior that
// aims for all items to have exactly one parent. This parameter will
// only take effect if the item is not in a shared drive. If the item's
// owner makes a request to add a single parent, the item will be
// removed from all current folders and placed in the requested folder.
// Other requests that increase the number of parents will fail, except
// when the canAddMyDriveParent file capability is true and a single
// parent is being added.
func (c *FilesUpdateCall) EnforceSingleParent(enforceSingleParent bool) *FilesUpdateCall {
c.urlParams_.Set("enforceSingleParent", fmt.Sprint(enforceSingleParent))
return c
}
// KeepRevisionForever sets the optional parameter
// "keepRevisionForever": Whether to set the 'keepForever' field in the
// new head revision. This is only applicable to files with binary
// content in Google Drive. Only 200 revisions for the file can be kept
// forever. If the limit is reached, try deleting pinned revisions.
func (c *FilesUpdateCall) KeepRevisionForever(keepRevisionForever bool) *FilesUpdateCall {
c.urlParams_.Set("keepRevisionForever", fmt.Sprint(keepRevisionForever))
return c
}
// OcrLanguage sets the optional parameter "ocrLanguage": A language
// hint for OCR processing during image import (ISO 639-1 code).
func (c *FilesUpdateCall) OcrLanguage(ocrLanguage string) *FilesUpdateCall {
c.urlParams_.Set("ocrLanguage", ocrLanguage)
return c
}
// RemoveParents sets the optional parameter "removeParents": A
// comma-separated list of parent IDs to remove.
func (c *FilesUpdateCall) RemoveParents(removeParents string) *FilesUpdateCall {
c.urlParams_.Set("removeParents", removeParents)
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *FilesUpdateCall) SupportsAllDrives(supportsAllDrives bool) *FilesUpdateCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *FilesUpdateCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesUpdateCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// UseContentAsIndexableText sets the optional parameter
// "useContentAsIndexableText": Whether to use the uploaded content as
// indexable text.
func (c *FilesUpdateCall) UseContentAsIndexableText(useContentAsIndexableText bool) *FilesUpdateCall {
c.urlParams_.Set("useContentAsIndexableText", fmt.Sprint(useContentAsIndexableText))
return c
}
// Media specifies the media to upload in one or more chunks. The chunk
// size may be controlled by supplying a MediaOption generated by
// googleapi.ChunkSize. The chunk size defaults to
// googleapi.DefaultUploadChunkSize.The Content-Type header used in the
// upload request will be determined by sniffing the contents of r,
// unless a MediaOption generated by googleapi.ContentType is
// supplied.
// At most one of Media and ResumableMedia may be set.
func (c *FilesUpdateCall) Media(r io.Reader, options ...googleapi.MediaOption) *FilesUpdateCall {
c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options)
return c
}
// ResumableMedia specifies the media to upload in chunks and can be
// canceled with ctx.
//
// Deprecated: use Media instead.
//
// At most one of Media and ResumableMedia may be set. mediaType
// identifies the MIME media type of the upload, such as "image/png". If
// mediaType is "", it will be auto-detected. The provided ctx will
// supersede any context previously provided to the Context method.
func (c *FilesUpdateCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *FilesUpdateCall {
c.ctx_ = ctx
c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType)
return c
}
// ProgressUpdater provides a callback function that will be called
// after every chunk. It should be a low-latency function in order to
// not slow down the upload operation. This should only be called when
// using ResumableMedia (as opposed to Media).
func (c *FilesUpdateCall) ProgressUpdater(pu googleapi.ProgressUpdater) *FilesUpdateCall {
c.mediaInfo_.SetProgressUpdater(pu)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FilesUpdateCall) Fields(s ...googleapi.Field) *FilesUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
// This context will supersede any context previously provided to the
// ResumableMedia method.
func (c *FilesUpdateCall) Context(ctx context.Context) *FilesUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FilesUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FilesUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.file)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}")
if c.mediaInfo_ != nil {
urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/drive/v3/files/{fileId}")
c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType())
}
if body == nil {
body = new(bytes.Buffer)
reqHeaders.Set("Content-Type", "application/json")
}
body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body)
defer cleanup()
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
req.GetBody = getBody
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.files.update" call.
// Exactly one of *File or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *File.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *FilesUpdateCall) Do(opts ...googleapi.CallOption) (*File, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location"))
if rx != nil {
rx.Client = c.s.client
rx.UserAgent = c.s.userAgent()
ctx := c.ctx_
if ctx == nil {
ctx = context.TODO()
}
res, err = rx.Upload(ctx)
if err != nil {
return nil, err
}
defer res.Body.Close()
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
}
ret := &File{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a file's metadata and/or content with patch semantics.",
// "httpMethod": "PATCH",
// "id": "drive.files.update",
// "mediaUpload": {
// "accept": [
// "*/*"
// ],
// "maxSize": "5120GB",
// "protocols": {
// "resumable": {
// "multipart": true,
// "path": "/resumable/upload/drive/v3/files/{fileId}"
// },
// "simple": {
// "multipart": true,
// "path": "/upload/drive/v3/files/{fileId}"
// }
// }
// },
// "parameterOrder": [
// "fileId"
// ],
// "parameters": {
// "addParents": {
// "description": "A comma-separated list of parent IDs to add.",
// "location": "query",
// "type": "string"
// },
// "enforceSingleParent": {
// "default": "false",
// "description": "Set to true to opt in to API behavior that aims for all items to have exactly one parent. This parameter will only take effect if the item is not in a shared drive. If the item's owner makes a request to add a single parent, the item will be removed from all current folders and placed in the requested folder. Other requests that increase the number of parents will fail, except when the canAddMyDriveParent file capability is true and a single parent is being added.",
// "location": "query",
// "type": "boolean"
// },
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "keepRevisionForever": {
// "default": "false",
// "description": "Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Google Drive. Only 200 revisions for the file can be kept forever. If the limit is reached, try deleting pinned revisions.",
// "location": "query",
// "type": "boolean"
// },
// "ocrLanguage": {
// "description": "A language hint for OCR processing during image import (ISO 639-1 code).",
// "location": "query",
// "type": "string"
// },
// "removeParents": {
// "description": "A comma-separated list of parent IDs to remove.",
// "location": "query",
// "type": "string"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "useContentAsIndexableText": {
// "default": "false",
// "description": "Whether to use the uploaded content as indexable text.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "files/{fileId}",
// "request": {
// "$ref": "File"
// },
// "response": {
// "$ref": "File"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.metadata",
// "https://www.googleapis.com/auth/drive.scripts"
// ],
// "supportsMediaUpload": true
// }
}
// method id "drive.files.watch":
type FilesWatchCall struct {
s *Service
fileId string
channel *Channel
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Watch: Subscribes to changes to a file
func (r *FilesService) Watch(fileId string, channel *Channel) *FilesWatchCall {
c := &FilesWatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.channel = channel
return c
}
// AcknowledgeAbuse sets the optional parameter "acknowledgeAbuse":
// Whether the user is acknowledging the risk of downloading known
// malware or other abusive files. This is only applicable when
// alt=media.
func (c *FilesWatchCall) AcknowledgeAbuse(acknowledgeAbuse bool) *FilesWatchCall {
c.urlParams_.Set("acknowledgeAbuse", fmt.Sprint(acknowledgeAbuse))
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *FilesWatchCall) SupportsAllDrives(supportsAllDrives bool) *FilesWatchCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *FilesWatchCall) SupportsTeamDrives(supportsTeamDrives bool) *FilesWatchCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FilesWatchCall) Fields(s ...googleapi.Field) *FilesWatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do and Download
// methods. Any pending HTTP request will be aborted if the provided
// context is canceled.
func (c *FilesWatchCall) Context(ctx context.Context) *FilesWatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FilesWatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FilesWatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/watch")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Download fetches the API endpoint's "media" value, instead of the normal
// API response value. If the returned error is nil, the Response is guaranteed to
// have a 2xx status code. Callers must close the Response.Body as usual.
func (c *FilesWatchCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("media")
if err != nil {
return nil, err
}
if err := googleapi.CheckMediaResponse(res); err != nil {
res.Body.Close()
return nil, err
}
return res, nil
}
// Do executes the "drive.files.watch" call.
// Exactly one of *Channel or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Channel.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *FilesWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Channel{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Subscribes to changes to a file",
// "httpMethod": "POST",
// "id": "drive.files.watch",
// "parameterOrder": [
// "fileId"
// ],
// "parameters": {
// "acknowledgeAbuse": {
// "default": "false",
// "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.",
// "location": "query",
// "type": "boolean"
// },
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "files/{fileId}/watch",
// "request": {
// "$ref": "Channel",
// "parameterName": "resource"
// },
// "response": {
// "$ref": "Channel"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.metadata",
// "https://www.googleapis.com/auth/drive.metadata.readonly",
// "https://www.googleapis.com/auth/drive.photos.readonly",
// "https://www.googleapis.com/auth/drive.readonly"
// ],
// "supportsMediaDownload": true,
// "supportsSubscription": true,
// "useMediaDownloadService": true
// }
}
// method id "drive.permissions.create":
type PermissionsCreateCall struct {
s *Service
fileId string
permission *Permission
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a permission for a file or shared drive.
func (r *PermissionsService) Create(fileId string, permission *Permission) *PermissionsCreateCall {
c := &PermissionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.permission = permission
return c
}
// EmailMessage sets the optional parameter "emailMessage": A plain text
// custom message to include in the notification email.
func (c *PermissionsCreateCall) EmailMessage(emailMessage string) *PermissionsCreateCall {
c.urlParams_.Set("emailMessage", emailMessage)
return c
}
// EnforceSingleParent sets the optional parameter
// "enforceSingleParent": Set to true to opt in to API behavior that
// aims for all items to have exactly one parent. This parameter will
// only take effect if the item is not in a shared drive. See
// moveToNewOwnersRoot for details.
func (c *PermissionsCreateCall) EnforceSingleParent(enforceSingleParent bool) *PermissionsCreateCall {
c.urlParams_.Set("enforceSingleParent", fmt.Sprint(enforceSingleParent))
return c
}
// MoveToNewOwnersRoot sets the optional parameter
// "moveToNewOwnersRoot": This parameter will only take effect if the
// item is not in a shared drive and the request is attempting to
// transfer the ownership of the item. When set to true, the item will
// be moved to the new owner's My Drive root folder and all prior
// parents removed. If set to false, when enforceSingleParent=true,
// parents are not changed. If set to false, when
// enforceSingleParent=false, existing parents are not changed; however,
// the file will be added to the new owner's My Drive root folder,
// unless it is already in the new owner's My Drive.
func (c *PermissionsCreateCall) MoveToNewOwnersRoot(moveToNewOwnersRoot bool) *PermissionsCreateCall {
c.urlParams_.Set("moveToNewOwnersRoot", fmt.Sprint(moveToNewOwnersRoot))
return c
}
// SendNotificationEmail sets the optional parameter
// "sendNotificationEmail": Whether to send a notification email when
// sharing to users or groups. This defaults to true for users and
// groups, and is not allowed for other requests. It must not be
// disabled for ownership transfers.
func (c *PermissionsCreateCall) SendNotificationEmail(sendNotificationEmail bool) *PermissionsCreateCall {
c.urlParams_.Set("sendNotificationEmail", fmt.Sprint(sendNotificationEmail))
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *PermissionsCreateCall) SupportsAllDrives(supportsAllDrives bool) *PermissionsCreateCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *PermissionsCreateCall) SupportsTeamDrives(supportsTeamDrives bool) *PermissionsCreateCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// TransferOwnership sets the optional parameter "transferOwnership":
// Whether to transfer ownership to the specified user and downgrade the
// current owner to a writer. This parameter is required as an
// acknowledgement of the side effect.
func (c *PermissionsCreateCall) TransferOwnership(transferOwnership bool) *PermissionsCreateCall {
c.urlParams_.Set("transferOwnership", fmt.Sprint(transferOwnership))
return c
}
// UseDomainAdminAccess sets the optional parameter
// "useDomainAdminAccess": Issue the request as a domain administrator;
// if set to true, then the requester will be granted access if the file
// ID parameter refers to a shared drive and the requester is an
// administrator of the domain to which the shared drive belongs.
func (c *PermissionsCreateCall) UseDomainAdminAccess(useDomainAdminAccess bool) *PermissionsCreateCall {
c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *PermissionsCreateCall) Fields(s ...googleapi.Field) *PermissionsCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *PermissionsCreateCall) Context(ctx context.Context) *PermissionsCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *PermissionsCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *PermissionsCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.permission)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.permissions.create" call.
// Exactly one of *Permission or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Permission.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *PermissionsCreateCall) Do(opts ...googleapi.CallOption) (*Permission, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Permission{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a permission for a file or shared drive.",
// "httpMethod": "POST",
// "id": "drive.permissions.create",
// "parameterOrder": [
// "fileId"
// ],
// "parameters": {
// "emailMessage": {
// "description": "A plain text custom message to include in the notification email.",
// "location": "query",
// "type": "string"
// },
// "enforceSingleParent": {
// "default": "false",
// "description": "Set to true to opt in to API behavior that aims for all items to have exactly one parent. This parameter will only take effect if the item is not in a shared drive. See moveToNewOwnersRoot for details.",
// "location": "query",
// "type": "boolean"
// },
// "fileId": {
// "description": "The ID of the file or shared drive.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "moveToNewOwnersRoot": {
// "default": "false",
// "description": "This parameter will only take effect if the item is not in a shared drive and the request is attempting to transfer the ownership of the item. When set to true, the item will be moved to the new owner's My Drive root folder and all prior parents removed. If set to false, when enforceSingleParent=true, parents are not changed. If set to false, when enforceSingleParent=false, existing parents are not changed; however, the file will be added to the new owner's My Drive root folder, unless it is already in the new owner's My Drive.",
// "location": "query",
// "type": "boolean"
// },
// "sendNotificationEmail": {
// "description": "Whether to send a notification email when sharing to users or groups. This defaults to true for users and groups, and is not allowed for other requests. It must not be disabled for ownership transfers.",
// "location": "query",
// "type": "boolean"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "transferOwnership": {
// "default": "false",
// "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.",
// "location": "query",
// "type": "boolean"
// },
// "useDomainAdminAccess": {
// "default": "false",
// "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if the file ID parameter refers to a shared drive and the requester is an administrator of the domain to which the shared drive belongs.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "files/{fileId}/permissions",
// "request": {
// "$ref": "Permission"
// },
// "response": {
// "$ref": "Permission"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file"
// ]
// }
}
// method id "drive.permissions.delete":
type PermissionsDeleteCall struct {
s *Service
fileId string
permissionId string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a permission.
func (r *PermissionsService) Delete(fileId string, permissionId string) *PermissionsDeleteCall {
c := &PermissionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.permissionId = permissionId
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *PermissionsDeleteCall) SupportsAllDrives(supportsAllDrives bool) *PermissionsDeleteCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *PermissionsDeleteCall) SupportsTeamDrives(supportsTeamDrives bool) *PermissionsDeleteCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// UseDomainAdminAccess sets the optional parameter
// "useDomainAdminAccess": Issue the request as a domain administrator;
// if set to true, then the requester will be granted access if the file
// ID parameter refers to a shared drive and the requester is an
// administrator of the domain to which the shared drive belongs.
func (c *PermissionsDeleteCall) UseDomainAdminAccess(useDomainAdminAccess bool) *PermissionsDeleteCall {
c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *PermissionsDeleteCall) Fields(s ...googleapi.Field) *PermissionsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *PermissionsDeleteCall) Context(ctx context.Context) *PermissionsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *PermissionsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *PermissionsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"permissionId": c.permissionId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.permissions.delete" call.
func (c *PermissionsDeleteCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Deletes a permission.",
// "httpMethod": "DELETE",
// "id": "drive.permissions.delete",
// "parameterOrder": [
// "fileId",
// "permissionId"
// ],
// "parameters": {
// "fileId": {
// "description": "The ID of the file or shared drive.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "permissionId": {
// "description": "The ID of the permission.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "useDomainAdminAccess": {
// "default": "false",
// "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if the file ID parameter refers to a shared drive and the requester is an administrator of the domain to which the shared drive belongs.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "files/{fileId}/permissions/{permissionId}",
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file"
// ]
// }
}
// method id "drive.permissions.get":
type PermissionsGetCall struct {
s *Service
fileId string
permissionId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a permission by ID.
func (r *PermissionsService) Get(fileId string, permissionId string) *PermissionsGetCall {
c := &PermissionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.permissionId = permissionId
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *PermissionsGetCall) SupportsAllDrives(supportsAllDrives bool) *PermissionsGetCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *PermissionsGetCall) SupportsTeamDrives(supportsTeamDrives bool) *PermissionsGetCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// UseDomainAdminAccess sets the optional parameter
// "useDomainAdminAccess": Issue the request as a domain administrator;
// if set to true, then the requester will be granted access if the file
// ID parameter refers to a shared drive and the requester is an
// administrator of the domain to which the shared drive belongs.
func (c *PermissionsGetCall) UseDomainAdminAccess(useDomainAdminAccess bool) *PermissionsGetCall {
c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *PermissionsGetCall) Fields(s ...googleapi.Field) *PermissionsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *PermissionsGetCall) IfNoneMatch(entityTag string) *PermissionsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *PermissionsGetCall) Context(ctx context.Context) *PermissionsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *PermissionsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *PermissionsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"permissionId": c.permissionId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.permissions.get" call.
// Exactly one of *Permission or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Permission.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *PermissionsGetCall) Do(opts ...googleapi.CallOption) (*Permission, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Permission{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a permission by ID.",
// "httpMethod": "GET",
// "id": "drive.permissions.get",
// "parameterOrder": [
// "fileId",
// "permissionId"
// ],
// "parameters": {
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "permissionId": {
// "description": "The ID of the permission.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "useDomainAdminAccess": {
// "default": "false",
// "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if the file ID parameter refers to a shared drive and the requester is an administrator of the domain to which the shared drive belongs.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "files/{fileId}/permissions/{permissionId}",
// "response": {
// "$ref": "Permission"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.metadata",
// "https://www.googleapis.com/auth/drive.metadata.readonly",
// "https://www.googleapis.com/auth/drive.photos.readonly",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// method id "drive.permissions.list":
type PermissionsListCall struct {
s *Service
fileId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists a file's or shared drive's permissions.
func (r *PermissionsService) List(fileId string) *PermissionsListCall {
c := &PermissionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of permissions to return per page. When not set for files in a shared
// drive, at most 100 results will be returned. When not set for files
// that are not in a shared drive, the entire list will be returned.
func (c *PermissionsListCall) PageSize(pageSize int64) *PermissionsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The token for
// continuing a previous list request on the next page. This should be
// set to the value of 'nextPageToken' from the previous response.
func (c *PermissionsListCall) PageToken(pageToken string) *PermissionsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *PermissionsListCall) SupportsAllDrives(supportsAllDrives bool) *PermissionsListCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *PermissionsListCall) SupportsTeamDrives(supportsTeamDrives bool) *PermissionsListCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// UseDomainAdminAccess sets the optional parameter
// "useDomainAdminAccess": Issue the request as a domain administrator;
// if set to true, then the requester will be granted access if the file
// ID parameter refers to a shared drive and the requester is an
// administrator of the domain to which the shared drive belongs.
func (c *PermissionsListCall) UseDomainAdminAccess(useDomainAdminAccess bool) *PermissionsListCall {
c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *PermissionsListCall) Fields(s ...googleapi.Field) *PermissionsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *PermissionsListCall) IfNoneMatch(entityTag string) *PermissionsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *PermissionsListCall) Context(ctx context.Context) *PermissionsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *PermissionsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *PermissionsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.permissions.list" call.
// Exactly one of *PermissionList or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *PermissionList.ServerResponse.Header or (if a response was returned
// at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *PermissionsListCall) Do(opts ...googleapi.CallOption) (*PermissionList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &PermissionList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists a file's or shared drive's permissions.",
// "httpMethod": "GET",
// "id": "drive.permissions.list",
// "parameterOrder": [
// "fileId"
// ],
// "parameters": {
// "fileId": {
// "description": "The ID of the file or shared drive.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "pageSize": {
// "description": "The maximum number of permissions to return per page. When not set for files in a shared drive, at most 100 results will be returned. When not set for files that are not in a shared drive, the entire list will be returned.",
// "format": "int32",
// "location": "query",
// "maximum": "100",
// "minimum": "1",
// "type": "integer"
// },
// "pageToken": {
// "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.",
// "location": "query",
// "type": "string"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "useDomainAdminAccess": {
// "default": "false",
// "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if the file ID parameter refers to a shared drive and the requester is an administrator of the domain to which the shared drive belongs.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "files/{fileId}/permissions",
// "response": {
// "$ref": "PermissionList"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.metadata",
// "https://www.googleapis.com/auth/drive.metadata.readonly",
// "https://www.googleapis.com/auth/drive.photos.readonly",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *PermissionsListCall) Pages(ctx context.Context, f func(*PermissionList) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "drive.permissions.update":
type PermissionsUpdateCall struct {
s *Service
fileId string
permissionId string
permission *Permission
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates a permission with patch semantics.
func (r *PermissionsService) Update(fileId string, permissionId string, permission *Permission) *PermissionsUpdateCall {
c := &PermissionsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.permissionId = permissionId
c.permission = permission
return c
}
// RemoveExpiration sets the optional parameter "removeExpiration":
// Whether to remove the expiration date.
func (c *PermissionsUpdateCall) RemoveExpiration(removeExpiration bool) *PermissionsUpdateCall {
c.urlParams_.Set("removeExpiration", fmt.Sprint(removeExpiration))
return c
}
// SupportsAllDrives sets the optional parameter "supportsAllDrives":
// Deprecated - Whether the requesting application supports both My
// Drives and shared drives. This parameter will only be effective until
// June 1, 2020. Afterwards all applications are assumed to support
// shared drives.
func (c *PermissionsUpdateCall) SupportsAllDrives(supportsAllDrives bool) *PermissionsUpdateCall {
c.urlParams_.Set("supportsAllDrives", fmt.Sprint(supportsAllDrives))
return c
}
// SupportsTeamDrives sets the optional parameter "supportsTeamDrives":
// Deprecated use supportsAllDrives instead.
func (c *PermissionsUpdateCall) SupportsTeamDrives(supportsTeamDrives bool) *PermissionsUpdateCall {
c.urlParams_.Set("supportsTeamDrives", fmt.Sprint(supportsTeamDrives))
return c
}
// TransferOwnership sets the optional parameter "transferOwnership":
// Whether to transfer ownership to the specified user and downgrade the
// current owner to a writer. This parameter is required as an
// acknowledgement of the side effect.
func (c *PermissionsUpdateCall) TransferOwnership(transferOwnership bool) *PermissionsUpdateCall {
c.urlParams_.Set("transferOwnership", fmt.Sprint(transferOwnership))
return c
}
// UseDomainAdminAccess sets the optional parameter
// "useDomainAdminAccess": Issue the request as a domain administrator;
// if set to true, then the requester will be granted access if the file
// ID parameter refers to a shared drive and the requester is an
// administrator of the domain to which the shared drive belongs.
func (c *PermissionsUpdateCall) UseDomainAdminAccess(useDomainAdminAccess bool) *PermissionsUpdateCall {
c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *PermissionsUpdateCall) Fields(s ...googleapi.Field) *PermissionsUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *PermissionsUpdateCall) Context(ctx context.Context) *PermissionsUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *PermissionsUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *PermissionsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.permission)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/permissions/{permissionId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"permissionId": c.permissionId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.permissions.update" call.
// Exactly one of *Permission or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Permission.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *PermissionsUpdateCall) Do(opts ...googleapi.CallOption) (*Permission, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Permission{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a permission with patch semantics.",
// "httpMethod": "PATCH",
// "id": "drive.permissions.update",
// "parameterOrder": [
// "fileId",
// "permissionId"
// ],
// "parameters": {
// "fileId": {
// "description": "The ID of the file or shared drive.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "permissionId": {
// "description": "The ID of the permission.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "removeExpiration": {
// "default": "false",
// "description": "Whether to remove the expiration date.",
// "location": "query",
// "type": "boolean"
// },
// "supportsAllDrives": {
// "default": "false",
// "description": "Deprecated - Whether the requesting application supports both My Drives and shared drives. This parameter will only be effective until June 1, 2020. Afterwards all applications are assumed to support shared drives.",
// "location": "query",
// "type": "boolean"
// },
// "supportsTeamDrives": {
// "default": "false",
// "description": "Deprecated use supportsAllDrives instead.",
// "location": "query",
// "type": "boolean"
// },
// "transferOwnership": {
// "default": "false",
// "description": "Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of the side effect.",
// "location": "query",
// "type": "boolean"
// },
// "useDomainAdminAccess": {
// "default": "false",
// "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if the file ID parameter refers to a shared drive and the requester is an administrator of the domain to which the shared drive belongs.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "files/{fileId}/permissions/{permissionId}",
// "request": {
// "$ref": "Permission"
// },
// "response": {
// "$ref": "Permission"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file"
// ]
// }
}
// method id "drive.replies.create":
type RepliesCreateCall struct {
s *Service
fileId string
commentId string
reply *Reply
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a new reply to a comment.
func (r *RepliesService) Create(fileId string, commentId string, reply *Reply) *RepliesCreateCall {
c := &RepliesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.commentId = commentId
c.reply = reply
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *RepliesCreateCall) Fields(s ...googleapi.Field) *RepliesCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *RepliesCreateCall) Context(ctx context.Context) *RepliesCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *RepliesCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *RepliesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.reply)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"commentId": c.commentId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.replies.create" call.
// Exactly one of *Reply or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Reply.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *RepliesCreateCall) Do(opts ...googleapi.CallOption) (*Reply, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Reply{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new reply to a comment.",
// "httpMethod": "POST",
// "id": "drive.replies.create",
// "parameterOrder": [
// "fileId",
// "commentId"
// ],
// "parameters": {
// "commentId": {
// "description": "The ID of the comment.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "files/{fileId}/comments/{commentId}/replies",
// "request": {
// "$ref": "Reply"
// },
// "response": {
// "$ref": "Reply"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file"
// ]
// }
}
// method id "drive.replies.delete":
type RepliesDeleteCall struct {
s *Service
fileId string
commentId string
replyId string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a reply.
func (r *RepliesService) Delete(fileId string, commentId string, replyId string) *RepliesDeleteCall {
c := &RepliesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.commentId = commentId
c.replyId = replyId
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *RepliesDeleteCall) Fields(s ...googleapi.Field) *RepliesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *RepliesDeleteCall) Context(ctx context.Context) *RepliesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *RepliesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *RepliesDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"commentId": c.commentId,
"replyId": c.replyId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.replies.delete" call.
func (c *RepliesDeleteCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Deletes a reply.",
// "httpMethod": "DELETE",
// "id": "drive.replies.delete",
// "parameterOrder": [
// "fileId",
// "commentId",
// "replyId"
// ],
// "parameters": {
// "commentId": {
// "description": "The ID of the comment.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "replyId": {
// "description": "The ID of the reply.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "files/{fileId}/comments/{commentId}/replies/{replyId}",
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file"
// ]
// }
}
// method id "drive.replies.get":
type RepliesGetCall struct {
s *Service
fileId string
commentId string
replyId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a reply by ID.
func (r *RepliesService) Get(fileId string, commentId string, replyId string) *RepliesGetCall {
c := &RepliesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.commentId = commentId
c.replyId = replyId
return c
}
// IncludeDeleted sets the optional parameter "includeDeleted": Whether
// to return deleted replies. Deleted replies will not include their
// original content.
func (c *RepliesGetCall) IncludeDeleted(includeDeleted bool) *RepliesGetCall {
c.urlParams_.Set("includeDeleted", fmt.Sprint(includeDeleted))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *RepliesGetCall) Fields(s ...googleapi.Field) *RepliesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *RepliesGetCall) IfNoneMatch(entityTag string) *RepliesGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *RepliesGetCall) Context(ctx context.Context) *RepliesGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *RepliesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *RepliesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"commentId": c.commentId,
"replyId": c.replyId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.replies.get" call.
// Exactly one of *Reply or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Reply.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *RepliesGetCall) Do(opts ...googleapi.CallOption) (*Reply, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Reply{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a reply by ID.",
// "httpMethod": "GET",
// "id": "drive.replies.get",
// "parameterOrder": [
// "fileId",
// "commentId",
// "replyId"
// ],
// "parameters": {
// "commentId": {
// "description": "The ID of the comment.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "includeDeleted": {
// "default": "false",
// "description": "Whether to return deleted replies. Deleted replies will not include their original content.",
// "location": "query",
// "type": "boolean"
// },
// "replyId": {
// "description": "The ID of the reply.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "files/{fileId}/comments/{commentId}/replies/{replyId}",
// "response": {
// "$ref": "Reply"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// method id "drive.replies.list":
type RepliesListCall struct {
s *Service
fileId string
commentId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists a comment's replies.
func (r *RepliesService) List(fileId string, commentId string) *RepliesListCall {
c := &RepliesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.commentId = commentId
return c
}
// IncludeDeleted sets the optional parameter "includeDeleted": Whether
// to include deleted replies. Deleted replies will not include their
// original content.
func (c *RepliesListCall) IncludeDeleted(includeDeleted bool) *RepliesListCall {
c.urlParams_.Set("includeDeleted", fmt.Sprint(includeDeleted))
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of replies to return per page.
func (c *RepliesListCall) PageSize(pageSize int64) *RepliesListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The token for
// continuing a previous list request on the next page. This should be
// set to the value of 'nextPageToken' from the previous response.
func (c *RepliesListCall) PageToken(pageToken string) *RepliesListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *RepliesListCall) Fields(s ...googleapi.Field) *RepliesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *RepliesListCall) IfNoneMatch(entityTag string) *RepliesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *RepliesListCall) Context(ctx context.Context) *RepliesListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *RepliesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *RepliesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"commentId": c.commentId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.replies.list" call.
// Exactly one of *ReplyList or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *ReplyList.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *RepliesListCall) Do(opts ...googleapi.CallOption) (*ReplyList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ReplyList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists a comment's replies.",
// "httpMethod": "GET",
// "id": "drive.replies.list",
// "parameterOrder": [
// "fileId",
// "commentId"
// ],
// "parameters": {
// "commentId": {
// "description": "The ID of the comment.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "includeDeleted": {
// "default": "false",
// "description": "Whether to include deleted replies. Deleted replies will not include their original content.",
// "location": "query",
// "type": "boolean"
// },
// "pageSize": {
// "default": "20",
// "description": "The maximum number of replies to return per page.",
// "format": "int32",
// "location": "query",
// "maximum": "100",
// "minimum": "1",
// "type": "integer"
// },
// "pageToken": {
// "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "files/{fileId}/comments/{commentId}/replies",
// "response": {
// "$ref": "ReplyList"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *RepliesListCall) Pages(ctx context.Context, f func(*ReplyList) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "drive.replies.update":
type RepliesUpdateCall struct {
s *Service
fileId string
commentId string
replyId string
reply *Reply
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates a reply with patch semantics.
func (r *RepliesService) Update(fileId string, commentId string, replyId string, reply *Reply) *RepliesUpdateCall {
c := &RepliesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.commentId = commentId
c.replyId = replyId
c.reply = reply
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *RepliesUpdateCall) Fields(s ...googleapi.Field) *RepliesUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *RepliesUpdateCall) Context(ctx context.Context) *RepliesUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *RepliesUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *RepliesUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.reply)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/comments/{commentId}/replies/{replyId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"commentId": c.commentId,
"replyId": c.replyId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.replies.update" call.
// Exactly one of *Reply or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Reply.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *RepliesUpdateCall) Do(opts ...googleapi.CallOption) (*Reply, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Reply{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a reply with patch semantics.",
// "httpMethod": "PATCH",
// "id": "drive.replies.update",
// "parameterOrder": [
// "fileId",
// "commentId",
// "replyId"
// ],
// "parameters": {
// "commentId": {
// "description": "The ID of the comment.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "replyId": {
// "description": "The ID of the reply.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "files/{fileId}/comments/{commentId}/replies/{replyId}",
// "request": {
// "$ref": "Reply"
// },
// "response": {
// "$ref": "Reply"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.file"
// ]
// }
}
// method id "drive.revisions.delete":
type RevisionsDeleteCall struct {
s *Service
fileId string
revisionId string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Permanently deletes a file version. You can only delete
// revisions for files with binary content in Google Drive, like images
// or videos. Revisions for other files, like Google Docs or Sheets, and
// the last remaining file version can't be deleted.
func (r *RevisionsService) Delete(fileId string, revisionId string) *RevisionsDeleteCall {
c := &RevisionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.revisionId = revisionId
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *RevisionsDeleteCall) Fields(s ...googleapi.Field) *RevisionsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *RevisionsDeleteCall) Context(ctx context.Context) *RevisionsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *RevisionsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *RevisionsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"revisionId": c.revisionId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.revisions.delete" call.
func (c *RevisionsDeleteCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Permanently deletes a file version. You can only delete revisions for files with binary content in Google Drive, like images or videos. Revisions for other files, like Google Docs or Sheets, and the last remaining file version can't be deleted.",
// "httpMethod": "DELETE",
// "id": "drive.revisions.delete",
// "parameterOrder": [
// "fileId",
// "revisionId"
// ],
// "parameters": {
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "revisionId": {
// "description": "The ID of the revision.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "files/{fileId}/revisions/{revisionId}",
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file"
// ]
// }
}
// method id "drive.revisions.get":
type RevisionsGetCall struct {
s *Service
fileId string
revisionId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a revision's metadata or content by ID.
func (r *RevisionsService) Get(fileId string, revisionId string) *RevisionsGetCall {
c := &RevisionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.revisionId = revisionId
return c
}
// AcknowledgeAbuse sets the optional parameter "acknowledgeAbuse":
// Whether the user is acknowledging the risk of downloading known
// malware or other abusive files. This is only applicable when
// alt=media.
func (c *RevisionsGetCall) AcknowledgeAbuse(acknowledgeAbuse bool) *RevisionsGetCall {
c.urlParams_.Set("acknowledgeAbuse", fmt.Sprint(acknowledgeAbuse))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *RevisionsGetCall) Fields(s ...googleapi.Field) *RevisionsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *RevisionsGetCall) IfNoneMatch(entityTag string) *RevisionsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do and Download
// methods. Any pending HTTP request will be aborted if the provided
// context is canceled.
func (c *RevisionsGetCall) Context(ctx context.Context) *RevisionsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *RevisionsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *RevisionsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"revisionId": c.revisionId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Download fetches the API endpoint's "media" value, instead of the normal
// API response value. If the returned error is nil, the Response is guaranteed to
// have a 2xx status code. Callers must close the Response.Body as usual.
func (c *RevisionsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("media")
if err != nil {
return nil, err
}
if err := googleapi.CheckMediaResponse(res); err != nil {
res.Body.Close()
return nil, err
}
return res, nil
}
// Do executes the "drive.revisions.get" call.
// Exactly one of *Revision or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Revision.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *RevisionsGetCall) Do(opts ...googleapi.CallOption) (*Revision, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Revision{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a revision's metadata or content by ID.",
// "httpMethod": "GET",
// "id": "drive.revisions.get",
// "parameterOrder": [
// "fileId",
// "revisionId"
// ],
// "parameters": {
// "acknowledgeAbuse": {
// "default": "false",
// "description": "Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.",
// "location": "query",
// "type": "boolean"
// },
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "revisionId": {
// "description": "The ID of the revision.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "files/{fileId}/revisions/{revisionId}",
// "response": {
// "$ref": "Revision"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.metadata",
// "https://www.googleapis.com/auth/drive.metadata.readonly",
// "https://www.googleapis.com/auth/drive.photos.readonly",
// "https://www.googleapis.com/auth/drive.readonly"
// ],
// "supportsMediaDownload": true,
// "useMediaDownloadService": true
// }
}
// method id "drive.revisions.list":
type RevisionsListCall struct {
s *Service
fileId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists a file's revisions.
func (r *RevisionsService) List(fileId string) *RevisionsListCall {
c := &RevisionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of revisions to return per page.
func (c *RevisionsListCall) PageSize(pageSize int64) *RevisionsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The token for
// continuing a previous list request on the next page. This should be
// set to the value of 'nextPageToken' from the previous response.
func (c *RevisionsListCall) PageToken(pageToken string) *RevisionsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *RevisionsListCall) Fields(s ...googleapi.Field) *RevisionsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *RevisionsListCall) IfNoneMatch(entityTag string) *RevisionsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *RevisionsListCall) Context(ctx context.Context) *RevisionsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *RevisionsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *RevisionsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.revisions.list" call.
// Exactly one of *RevisionList or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *RevisionList.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *RevisionsListCall) Do(opts ...googleapi.CallOption) (*RevisionList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &RevisionList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists a file's revisions.",
// "httpMethod": "GET",
// "id": "drive.revisions.list",
// "parameterOrder": [
// "fileId"
// ],
// "parameters": {
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "pageSize": {
// "default": "200",
// "description": "The maximum number of revisions to return per page.",
// "format": "int32",
// "location": "query",
// "maximum": "1000",
// "minimum": "1",
// "type": "integer"
// },
// "pageToken": {
// "description": "The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "files/{fileId}/revisions",
// "response": {
// "$ref": "RevisionList"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file",
// "https://www.googleapis.com/auth/drive.metadata",
// "https://www.googleapis.com/auth/drive.metadata.readonly",
// "https://www.googleapis.com/auth/drive.photos.readonly",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *RevisionsListCall) Pages(ctx context.Context, f func(*RevisionList) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "drive.revisions.update":
type RevisionsUpdateCall struct {
s *Service
fileId string
revisionId string
revision *Revision
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates a revision with patch semantics.
func (r *RevisionsService) Update(fileId string, revisionId string, revision *Revision) *RevisionsUpdateCall {
c := &RevisionsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.fileId = fileId
c.revisionId = revisionId
c.revision = revision
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *RevisionsUpdateCall) Fields(s ...googleapi.Field) *RevisionsUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *RevisionsUpdateCall) Context(ctx context.Context) *RevisionsUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *RevisionsUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *RevisionsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.revision)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "files/{fileId}/revisions/{revisionId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"fileId": c.fileId,
"revisionId": c.revisionId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.revisions.update" call.
// Exactly one of *Revision or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Revision.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *RevisionsUpdateCall) Do(opts ...googleapi.CallOption) (*Revision, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Revision{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a revision with patch semantics.",
// "httpMethod": "PATCH",
// "id": "drive.revisions.update",
// "parameterOrder": [
// "fileId",
// "revisionId"
// ],
// "parameters": {
// "fileId": {
// "description": "The ID of the file.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "revisionId": {
// "description": "The ID of the revision.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "files/{fileId}/revisions/{revisionId}",
// "request": {
// "$ref": "Revision"
// },
// "response": {
// "$ref": "Revision"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.appdata",
// "https://www.googleapis.com/auth/drive.file"
// ]
// }
}
// method id "drive.teamdrives.create":
type TeamdrivesCreateCall struct {
s *Service
teamdrive *TeamDrive
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Deprecated use drives.create instead.
func (r *TeamdrivesService) Create(requestId string, teamdrive *TeamDrive) *TeamdrivesCreateCall {
c := &TeamdrivesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.urlParams_.Set("requestId", requestId)
c.teamdrive = teamdrive
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *TeamdrivesCreateCall) Fields(s ...googleapi.Field) *TeamdrivesCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *TeamdrivesCreateCall) Context(ctx context.Context) *TeamdrivesCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *TeamdrivesCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *TeamdrivesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.teamdrive)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "teamdrives")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.teamdrives.create" call.
// Exactly one of *TeamDrive or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *TeamDrive.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *TeamdrivesCreateCall) Do(opts ...googleapi.CallOption) (*TeamDrive, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &TeamDrive{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deprecated use drives.create instead.",
// "httpMethod": "POST",
// "id": "drive.teamdrives.create",
// "parameterOrder": [
// "requestId"
// ],
// "parameters": {
// "requestId": {
// "description": "An ID, such as a random UUID, which uniquely identifies this user's request for idempotent creation of a Team Drive. A repeated request by the same user and with the same request ID will avoid creating duplicates by attempting to create the same Team Drive. If the Team Drive already exists a 409 error will be returned.",
// "location": "query",
// "required": true,
// "type": "string"
// }
// },
// "path": "teamdrives",
// "request": {
// "$ref": "TeamDrive"
// },
// "response": {
// "$ref": "TeamDrive"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive"
// ]
// }
}
// method id "drive.teamdrives.delete":
type TeamdrivesDeleteCall struct {
s *Service
teamDriveId string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deprecated use drives.delete instead.
func (r *TeamdrivesService) Delete(teamDriveId string) *TeamdrivesDeleteCall {
c := &TeamdrivesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.teamDriveId = teamDriveId
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *TeamdrivesDeleteCall) Fields(s ...googleapi.Field) *TeamdrivesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *TeamdrivesDeleteCall) Context(ctx context.Context) *TeamdrivesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *TeamdrivesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *TeamdrivesDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "teamdrives/{teamDriveId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"teamDriveId": c.teamDriveId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.teamdrives.delete" call.
func (c *TeamdrivesDeleteCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Deprecated use drives.delete instead.",
// "httpMethod": "DELETE",
// "id": "drive.teamdrives.delete",
// "parameterOrder": [
// "teamDriveId"
// ],
// "parameters": {
// "teamDriveId": {
// "description": "The ID of the Team Drive",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "teamdrives/{teamDriveId}",
// "scopes": [
// "https://www.googleapis.com/auth/drive"
// ]
// }
}
// method id "drive.teamdrives.get":
type TeamdrivesGetCall struct {
s *Service
teamDriveId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Deprecated use drives.get instead.
func (r *TeamdrivesService) Get(teamDriveId string) *TeamdrivesGetCall {
c := &TeamdrivesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.teamDriveId = teamDriveId
return c
}
// UseDomainAdminAccess sets the optional parameter
// "useDomainAdminAccess": Issue the request as a domain administrator;
// if set to true, then the requester will be granted access if they are
// an administrator of the domain to which the Team Drive belongs.
func (c *TeamdrivesGetCall) UseDomainAdminAccess(useDomainAdminAccess bool) *TeamdrivesGetCall {
c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *TeamdrivesGetCall) Fields(s ...googleapi.Field) *TeamdrivesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *TeamdrivesGetCall) IfNoneMatch(entityTag string) *TeamdrivesGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *TeamdrivesGetCall) Context(ctx context.Context) *TeamdrivesGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *TeamdrivesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *TeamdrivesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "teamdrives/{teamDriveId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"teamDriveId": c.teamDriveId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.teamdrives.get" call.
// Exactly one of *TeamDrive or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *TeamDrive.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *TeamdrivesGetCall) Do(opts ...googleapi.CallOption) (*TeamDrive, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &TeamDrive{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deprecated use drives.get instead.",
// "httpMethod": "GET",
// "id": "drive.teamdrives.get",
// "parameterOrder": [
// "teamDriveId"
// ],
// "parameters": {
// "teamDriveId": {
// "description": "The ID of the Team Drive",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "useDomainAdminAccess": {
// "default": "false",
// "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the Team Drive belongs.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "teamdrives/{teamDriveId}",
// "response": {
// "$ref": "TeamDrive"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// method id "drive.teamdrives.list":
type TeamdrivesListCall struct {
s *Service
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Deprecated use drives.list instead.
func (r *TeamdrivesService) List() *TeamdrivesListCall {
c := &TeamdrivesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
return c
}
// PageSize sets the optional parameter "pageSize": Maximum number of
// Team Drives to return.
func (c *TeamdrivesListCall) PageSize(pageSize int64) *TeamdrivesListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": Page token for
// Team Drives.
func (c *TeamdrivesListCall) PageToken(pageToken string) *TeamdrivesListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Q sets the optional parameter "q": Query string for searching Team
// Drives.
func (c *TeamdrivesListCall) Q(q string) *TeamdrivesListCall {
c.urlParams_.Set("q", q)
return c
}
// UseDomainAdminAccess sets the optional parameter
// "useDomainAdminAccess": Issue the request as a domain administrator;
// if set to true, then all Team Drives of the domain in which the
// requester is an administrator are returned.
func (c *TeamdrivesListCall) UseDomainAdminAccess(useDomainAdminAccess bool) *TeamdrivesListCall {
c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *TeamdrivesListCall) Fields(s ...googleapi.Field) *TeamdrivesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *TeamdrivesListCall) IfNoneMatch(entityTag string) *TeamdrivesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *TeamdrivesListCall) Context(ctx context.Context) *TeamdrivesListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *TeamdrivesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *TeamdrivesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "teamdrives")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.teamdrives.list" call.
// Exactly one of *TeamDriveList or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *TeamDriveList.ServerResponse.Header or (if a response was returned
// at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *TeamdrivesListCall) Do(opts ...googleapi.CallOption) (*TeamDriveList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &TeamDriveList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deprecated use drives.list instead.",
// "httpMethod": "GET",
// "id": "drive.teamdrives.list",
// "parameters": {
// "pageSize": {
// "default": "10",
// "description": "Maximum number of Team Drives to return.",
// "format": "int32",
// "location": "query",
// "maximum": "100",
// "minimum": "1",
// "type": "integer"
// },
// "pageToken": {
// "description": "Page token for Team Drives.",
// "location": "query",
// "type": "string"
// },
// "q": {
// "description": "Query string for searching Team Drives.",
// "location": "query",
// "type": "string"
// },
// "useDomainAdminAccess": {
// "default": "false",
// "description": "Issue the request as a domain administrator; if set to true, then all Team Drives of the domain in which the requester is an administrator are returned.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "teamdrives",
// "response": {
// "$ref": "TeamDriveList"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive",
// "https://www.googleapis.com/auth/drive.readonly"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *TeamdrivesListCall) Pages(ctx context.Context, f func(*TeamDriveList) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "drive.teamdrives.update":
type TeamdrivesUpdateCall struct {
s *Service
teamDriveId string
teamdrive *TeamDrive
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Deprecated use drives.update instead
func (r *TeamdrivesService) Update(teamDriveId string, teamdrive *TeamDrive) *TeamdrivesUpdateCall {
c := &TeamdrivesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.teamDriveId = teamDriveId
c.teamdrive = teamdrive
return c
}
// UseDomainAdminAccess sets the optional parameter
// "useDomainAdminAccess": Issue the request as a domain administrator;
// if set to true, then the requester will be granted access if they are
// an administrator of the domain to which the Team Drive belongs.
func (c *TeamdrivesUpdateCall) UseDomainAdminAccess(useDomainAdminAccess bool) *TeamdrivesUpdateCall {
c.urlParams_.Set("useDomainAdminAccess", fmt.Sprint(useDomainAdminAccess))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *TeamdrivesUpdateCall) Fields(s ...googleapi.Field) *TeamdrivesUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *TeamdrivesUpdateCall) Context(ctx context.Context) *TeamdrivesUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *TeamdrivesUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *TeamdrivesUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200311")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.teamdrive)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "teamdrives/{teamDriveId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"teamDriveId": c.teamDriveId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "drive.teamdrives.update" call.
// Exactly one of *TeamDrive or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *TeamDrive.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *TeamdrivesUpdateCall) Do(opts ...googleapi.CallOption) (*TeamDrive, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &TeamDrive{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deprecated use drives.update instead",
// "httpMethod": "PATCH",
// "id": "drive.teamdrives.update",
// "parameterOrder": [
// "teamDriveId"
// ],
// "parameters": {
// "teamDriveId": {
// "description": "The ID of the Team Drive",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "useDomainAdminAccess": {
// "default": "false",
// "description": "Issue the request as a domain administrator; if set to true, then the requester will be granted access if they are an administrator of the domain to which the Team Drive belongs.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "teamdrives/{teamDriveId}",
// "request": {
// "$ref": "TeamDrive"
// },
// "response": {
// "$ref": "TeamDrive"
// },
// "scopes": [
// "https://www.googleapis.com/auth/drive"
// ]
// }
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
vendor/golang.org/x/sys/windows/svc/svc_test.go
|
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build windows
// +build windows
package svc_test
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
"golang.org/x/sys/windows/svc"
"golang.org/x/sys/windows/svc/mgr"
)
func getState(t *testing.T, s *mgr.Service) svc.State {
status, err := s.Query()
if err != nil {
t.Fatalf("Query(%s) failed: %s", s.Name, err)
}
return status.State
}
func testState(t *testing.T, s *mgr.Service, want svc.State) {
have := getState(t, s)
if have != want {
t.Fatalf("%s state is=%d want=%d", s.Name, have, want)
}
}
func waitState(t *testing.T, s *mgr.Service, want svc.State) {
for i := 0; ; i++ {
have := getState(t, s)
if have == want {
return
}
if i > 10 {
t.Fatalf("%s state is=%d, waiting timeout", s.Name, have)
}
time.Sleep(300 * time.Millisecond)
}
}
// stopAndDeleteIfInstalled stops and deletes service name,
// if the service is running and / or installed.
func stopAndDeleteIfInstalled(t *testing.T, m *mgr.Mgr, name string) {
s, err := m.OpenService(name)
if err != nil {
// Service is not installed.
return
}
defer s.Close()
// Make sure the service is not running, otherwise we won't be able to delete it.
if getState(t, s) == svc.Running {
_, err = s.Control(svc.Stop)
if err != nil {
t.Fatalf("Control(%s) failed: %s", s.Name, err)
}
waitState(t, s, svc.Stopped)
}
err = s.Delete()
if err != nil {
t.Fatalf("Delete failed: %s", err)
}
}
func TestExample(t *testing.T) {
if testing.Short() && os.Getenv("GO_BUILDER_NAME") != "" {
t.Skip("skipping test in short mode - it modifies system services")
}
const name = "myservice"
m, err := mgr.Connect()
if err != nil {
t.Fatalf("SCM connection failed: %s", err)
}
defer m.Disconnect()
dir, err := ioutil.TempDir("", "svc")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(dir)
exepath := filepath.Join(dir, "a.exe")
o, err := exec.Command("go", "build", "-o", exepath, "golang.org/x/sys/windows/svc/example").CombinedOutput()
if err != nil {
t.Fatalf("failed to build service program: %v\n%v", err, string(o))
}
stopAndDeleteIfInstalled(t, m, name)
s, err := m.CreateService(name, exepath, mgr.Config{DisplayName: "my service"}, "is", "auto-started")
if err != nil {
t.Fatalf("CreateService(%s) failed: %v", name, err)
}
defer s.Close()
args := []string{"is", "manual-started", fmt.Sprintf("%d", rand.Int())}
testState(t, s, svc.Stopped)
err = s.Start(args...)
if err != nil {
t.Fatalf("Start(%s) failed: %s", s.Name, err)
}
waitState(t, s, svc.Running)
time.Sleep(1 * time.Second)
// testing deadlock from issues 4.
_, err = s.Control(svc.Interrogate)
if err != nil {
t.Fatalf("Control(%s) failed: %s", s.Name, err)
}
_, err = s.Control(svc.Interrogate)
if err != nil {
t.Fatalf("Control(%s) failed: %s", s.Name, err)
}
time.Sleep(1 * time.Second)
_, err = s.Control(svc.Stop)
if err != nil {
t.Fatalf("Control(%s) failed: %s", s.Name, err)
}
waitState(t, s, svc.Stopped)
err = s.Delete()
if err != nil {
t.Fatalf("Delete failed: %s", err)
}
out, err := exec.Command("wevtutil.exe", "qe", "Application", "/q:*[System[Provider[@Name='myservice']]]", "/rd:true", "/c:10").CombinedOutput()
if err != nil {
t.Fatalf("wevtutil failed: %v\n%v", err, string(out))
}
want := strings.Join(append([]string{name}, args...), "-")
// Test context passing (see servicemain in sys_386.s and sys_amd64.s).
want += "-123456"
if !strings.Contains(string(out), want) {
t.Errorf("%q string does not contain %q", string(out), want)
}
}
func TestIsAnInteractiveSession(t *testing.T) {
isInteractive, err := svc.IsAnInteractiveSession()
if err != nil {
t.Fatal(err)
}
if !isInteractive {
t.Error("IsAnInteractiveSession retuns false when running interactively.")
}
}
func TestIsWindowsService(t *testing.T) {
isSvc, err := svc.IsWindowsService()
if err != nil {
t.Fatal(err)
}
if isSvc {
t.Error("IsWindowsService retuns true when not running in a service.")
}
}
func TestIsWindowsServiceWhenParentExits(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") == "parent" {
// in parent process
// Start the child and exit quickly.
child := exec.Command(os.Args[0], "-test.run=TestIsWindowsServiceWhenParentExits")
child.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=child")
err := child.Start()
if err != nil {
fmt.Fprintf(os.Stderr, fmt.Sprintf("child start failed: %v", err))
os.Exit(1)
}
os.Exit(0)
}
if os.Getenv("GO_WANT_HELPER_PROCESS") == "child" {
// in child process
dumpPath := os.Getenv("GO_WANT_HELPER_PROCESS_FILE")
if dumpPath == "" {
// We cannot report this error. But main test will notice
// that we did not create dump file.
os.Exit(1)
}
var msg string
isSvc, err := svc.IsWindowsService()
if err != nil {
msg = err.Error()
}
if isSvc {
msg = "IsWindowsService retuns true when not running in a service."
}
err = ioutil.WriteFile(dumpPath, []byte(msg), 0644)
if err != nil {
// We cannot report this error. But main test will notice
// that we did not create dump file.
os.Exit(2)
}
os.Exit(0)
}
// Run in a loop until it fails.
for i := 0; i < 10; i++ {
childDumpPath := filepath.Join(t.TempDir(), "issvc.txt")
parent := exec.Command(os.Args[0], "-test.run=TestIsWindowsServiceWhenParentExits")
parent.Env = append(os.Environ(),
"GO_WANT_HELPER_PROCESS=parent",
"GO_WANT_HELPER_PROCESS_FILE="+childDumpPath)
parentOutput, err := parent.CombinedOutput()
if err != nil {
t.Errorf("parent failed: %v: %v", err, string(parentOutput))
}
for i := 0; ; i++ {
if _, err := os.Stat(childDumpPath); err == nil {
break
}
time.Sleep(100 * time.Millisecond)
if i > 10 {
t.Fatal("timed out waiting for child ouput file to be created.")
}
}
childOutput, err := ioutil.ReadFile(childDumpPath)
if err != nil {
t.Fatalf("reading child ouput failed: %v", err)
}
if got, want := string(childOutput), ""; got != want {
t.Fatalf("child output: want %q, got %q", want, got)
}
}
}
|
[
"\"GO_BUILDER_NAME\"",
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_WANT_HELPER_PROCESS_FILE\""
] |
[] |
[
"GO_BUILDER_NAME",
"GO_WANT_HELPER_PROCESS",
"GO_WANT_HELPER_PROCESS_FILE"
] |
[]
|
["GO_BUILDER_NAME", "GO_WANT_HELPER_PROCESS", "GO_WANT_HELPER_PROCESS_FILE"]
|
go
| 3 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CapFront.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
config/config.go
|
/*Package config Server App協議常用設定 middleware 機密資料不要放這邊*/
package config
import (
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
_ "github.com/go-sql-driver/mysql" // for "database/sql"
_ "github.com/joho/godotenv/autoload" // 環境變數套件os.Getenv
"github.com/wtg42/go-api-sooon/app"
)
const configCodePrefix = "CNF00"
// JWTClaims JWT帶入的使用者資訊
// Payload server端定義要帶入哪些常用的變數
type JWTClaims struct {
Email string `json:"Email"`
Role string `json:"Role"`
MemberID int64 `json:"MemberID"`
Lang string `json:"Lang"`
jwt.StandardClaims
}
// JwtSecret SecretKey
var JwtSecret = []byte(os.Getenv("JWT_SECRET"))
// CreateJWTClaims 簽發JWT token
func CreateJWTClaims(memberID int64, email string, role string, issuer string) (JWTClaims, string, error) {
now := time.Now()
jwtID := email + strconv.FormatInt(now.Unix(), 10)
// set claims and sign
claims := JWTClaims{
Email: email,
Role: role,
MemberID: memberID,
StandardClaims: jwt.StandardClaims{
Audience: email,
ExpiresAt: now.Add(3600 * time.Second).Unix(), // 過期時間
Id: jwtID,
IssuedAt: now.Unix(), // 發行時間
Issuer: issuer,
NotBefore: now.Add(1 * time.Second).Unix(), // 幾秒後可以開始使用
Subject: email,
},
}
tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
token, err := tokenClaims.SignedString(JwtSecret)
return claims, token, err
}
// JWTAuth JWT middleware payload內容設定環境變數
func JWTAuth(c *gin.Context) {
auth := c.GetHeader("Authorization")
bearerString := strings.Split(auth, "Bearer ")
if len(bearerString) < 2 {
c.JSON(http.StatusOK, gin.H{
"s": -9,
"errMsg": "no Bearer",
"errCode": app.SFunc.DumpErrorCode(configCodePrefix),
})
return
}
// fmt.Println(len(bearerString))
// fmt.Println(bearerString)
// fmt.Println(os.Getenv("JWT_SECRET"))
token := strings.Split(auth, "Bearer ")[1]
// parse and validate token for six things:
// validationErrorMalformed => token is malformed
// validationErrorUnverifiable => token could not be verified because of signing problems
// validationErrorSignatureInvalid => signature validation failed
// validationErrorExpired => exp validation failed
// validationErrorNotValidYet => nbf validation failed
// validationErrorIssuedAt => iat validation failed
tokenClaims, err := jwt.ParseWithClaims(token, &JWTClaims{}, func(token *jwt.Token) (i interface{}, err error) {
// JwtSecret 取得驗證
// app.SFunc.DumpAnything(token.Claims)
// sample token is expired. override time so it parses as valid?
return JwtSecret, nil
})
if err != nil {
var message string
if ve, ok := err.(*jwt.ValidationError); ok {
if ve.Errors&jwt.ValidationErrorMalformed != 0 {
message = "token is malformed"
} else if ve.Errors&jwt.ValidationErrorUnverifiable != 0 {
message = "token could not be verified because of signing problems"
} else if ve.Errors&jwt.ValidationErrorSignatureInvalid != 0 {
message = "signature validation failed"
} else if ve.Errors&jwt.ValidationErrorExpired != 0 {
message = "token is expired"
} else if ve.Errors&jwt.ValidationErrorNotValidYet != 0 {
message = "token is not yet valid before sometime"
} else {
message = "can not handle this token"
}
}
c.JSON(http.StatusUnauthorized, gin.H{
"s": -1,
"error": message,
})
c.Abort()
return
}
// 設定環境變數
if claims, ok := tokenClaims.Claims.(*JWTClaims); ok && tokenClaims.Valid {
c.Set("email", claims.Email)
c.Set("role", claims.Role)
c.Set("memberID", claims.MemberID)
c.Set("lang", claims.Lang)
c.Next()
} else {
c.Abort()
return
}
}
// MemberSessions 存放redis sessions的用戶結構
type MemberSessions struct {
LoginTs int64
Lang string
Email string
}
// CORSMiddleware 允許CORS
func CORSMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
c.Writer.Header().Set("Access-Control-Allow-Credentials", "true")
c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With")
c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT")
if c.Request.Method == "OPTIONS" {
c.AbortWithStatus(204)
return
}
c.Next()
}
}
|
[
"\"JWT_SECRET\"",
"\"JWT_SECRET\""
] |
[] |
[
"JWT_SECRET"
] |
[]
|
["JWT_SECRET"]
|
go
| 1 | 0 | |
cronmailer/main.go
|
package main
import (
"fmt"
"gopkg.in/gomail.v2"
"github.com/go-redis/redis/v7"
"os"
log "github.com/sirupsen/logrus"
"encoding/json"
"crypto/aes"
"crypto/cipher"
"encoding/hex"
"strconv"
"github.com/mergermarket/go-pkcs7"
"io/ioutil"
"time"
)
func main() {
log.SetLevel(log.DebugLevel)
log.Info("Starting cronmailer")
client := redis.NewClient(&redis.Options{
Addr: "redis:6379",
Password: os.Getenv("REDIS_PASSWORD"), // no password set
DB: 0, // use default DB
})
log.Debug("Getting mails");
val, err := client.LRange("mailer",0,-1).Result()
if err != nil {
os.Exit(1)
log.Error("Error getting data: ",err)
}
log.Debug("Length of mailer: ",len(val))
for i := 0; i < len(val); i++ {
log.Debug("Checking item: ",val[i])
decrypted, err := Decrypt(val[i])
if err != nil {
log.Error("Error when decrypting: ",err)
}
var js map[string]interface{}
in := []byte(decrypted)
if err := json.Unmarshal(in, &js); err != nil {
log.Error("Error unmarshaling data: ",err)
}
now := int32(time.Now().Unix())
i64, err := strconv.ParseInt(js["mailDate"].(string), 10, 32)
if err != nil {
log.Error("Error getting mailDate",err)
}
mailDate := int32(i64)
if (mailDate < now) {
log.Info("Sending mail now");
err = send(js["from"].(string),js["msg"].(string))
if err != nil {
log.Error("Not able to send mail: ",err)
}
// TODO: Set list item and remove
client.LSet("mailer",int64(i),"SENT")
// Delete now sent item
client.LRem("mailer",-1,"SENT")
} else {
log.Debug(js)
}
}
time.Sleep(60 * 60 * time.Second)
//send()
}
func Decrypt(encrypted string) (string, error) {
key := []byte(os.Getenv("ENCRYPTION_KEY"))
cipherText, _ := hex.DecodeString(encrypted)
block, err := aes.NewCipher(key)
if err != nil {
panic(err)
}
if len(cipherText) < aes.BlockSize {
panic("cipherText too short")
}
iv := cipherText[:aes.BlockSize]
cipherText = cipherText[aes.BlockSize:]
if len(cipherText)%aes.BlockSize != 0 {
panic("cipherText is not a multiple of the block size")
}
mode := cipher.NewCBCDecrypter(block, iv)
mode.CryptBlocks(cipherText, cipherText)
cipherText, _ = pkcs7.Unpad(cipherText, aes.BlockSize)
return fmt.Sprintf("%s", cipherText), nil
}
func send(receiver string, message string) error{
m := gomail.NewMessage()
m.SetHeader("From", "[email protected]")
m.SetHeader("To", receiver)
m.SetHeader("Subject", "Deine Nachricht aus der Vergangenheit")
content, err := ioutil.ReadFile("./mail_start.html");
if err != nil {
log.Error(err)
}
mailStart := string(content)
content, err = ioutil.ReadFile("./mail_end.html");
if err != nil {
log.Error(err)
}
mailEnd := string(content)
mailContent := mailStart + message + mailEnd
m.SetBody("text/html",mailContent)
d := gomail.NewDialer("mail.codebrew.de", 587, "[email protected]", os.Getenv("MAILER_PASSWORD"))
if err := d.DialAndSend(m); err != nil {
fmt.Println(err)
return err
}
return nil
}
|
[
"\"REDIS_PASSWORD\"",
"\"ENCRYPTION_KEY\"",
"\"MAILER_PASSWORD\""
] |
[] |
[
"REDIS_PASSWORD",
"ENCRYPTION_KEY",
"MAILER_PASSWORD"
] |
[]
|
["REDIS_PASSWORD", "ENCRYPTION_KEY", "MAILER_PASSWORD"]
|
go
| 3 | 0 | |
source/PrepareRevisionMapInputFunction/app.py
|
import boto3
import os
import logging
from datetime import datetime
from pyrearcadx.s3_helper import s3_select
def lambda_handler(event, context):
"""
This function prepares input for the revision map state
"""
try:
global log_level
log_level = str(os.getenv('LOG_LEVEL')).upper()
valid_log_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
if log_level not in valid_log_levels:
log_level = 'ERROR'
logging.getLogger().setLevel(log_level)
logging.debug(f'{event=}')
bucket = event['Bucket']
key = event['Key']
product_id = s3_select(bucket, key, """SELECT * FROM s3object[*].product_id r;""") #event['ProductId']
dataset_id = s3_select(bucket, key, """SELECT * FROM s3object[*].dataset_id r;""") #event['DatasetId']
logging.debug(f"{bucket=}\n{key=}\n{product_id=}\n{dataset_id=}")
select_expression = """SELECT COUNT(*) FROM s3object[*].asset_list_nested[*] r;"""
num_revisions = s3_select(bucket, key, select_expression)
num_jobs = 0
num_revision_assets = 0
if num_revisions:
logging.info(f"Creating the input list to create {num_revisions} revisions")
revision_map_input_list = list(range(num_revisions))
for revisions_index in range(num_revisions):
select_expression = """SELECT COUNT(*) FROM s3object[*].asset_list_nested[{}][*] r;""".format(revisions_index)
num_revision_assets = s3_select(bucket, key, select_expression)
num_jobs += num_revision_assets
metrics = {
"Version": os.getenv('Version'),
"TimeStamp": datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f'),
"ProductId": product_id,
"DatasetId": dataset_id,
"RevisionAssetCount": num_revision_assets,
"TotalJobCount": num_jobs,
"RevisionMapInput": revision_map_input_list
}
logging.info(f'Metrics:{metrics}')
except Exception as e:
logging.error(e)
raise e
return {
"StatusCode": 200,
"Message": "Input generated for {} revisions and {} jobs".format(num_revisions, num_jobs),
"Bucket": bucket,
"Key": key,
"ProductId": product_id,
"DatasetId": dataset_id,
"RevisionCount": num_revisions,
"TotalJobCount": num_jobs,
"RevisionMapInput": revision_map_input_list
}
|
[] |
[] |
[
"Version",
"LOG_LEVEL"
] |
[]
|
["Version", "LOG_LEVEL"]
|
python
| 2 | 0 | |
server/framework/settings.py
|
"""
Django settings for framework project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hdr#^b4)7o6^y(=i6k6n-^n2ho!m&%r$noacr$kvix_f6nikl!'
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = bool( os.environ.get('DJANGO_DEBUG', True) )
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_swagger',
'gunicorn'
# 'app'
]
REST_FRAMEWORK = {
# dev mode?
# 'DEFAULT_RENDERER_CLASSES': (
# 'rest_framework.renderers.JSONRenderer',
# ),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 1
}
# swagger 配置项
SWAGGER_SETTINGS = {
# 基础样式
'SECURITY_DEFINITIONS': {
"basic":{
'type': 'basic'
}
},
'LOGIN_URL': 'rest_framework:login',
'LOGOUT_URL': 'rest_framework:logout',
# 'DOC_EXPANSION': None,
# 'SHOW_REQUEST_HEADERS':True,
# 'USE_SESSION_AUTH': True,
# 'DOC_EXPANSION': 'list',
'APIS_SORTER': 'alpha',
'JSON_EDITOR': False,
'OPERATIONS_SORTER': 'alpha',
'VALIDATOR_URL': None,
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'framework.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'framework.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[] |
[] |
[
"DJANGO_DEBUG"
] |
[]
|
["DJANGO_DEBUG"]
|
python
| 1 | 0 | |
pytest-server-fixtures/pytest_server_fixtures/base2.py
|
import os
import hashlib
import logging
import time
from datetime import datetime
from pytest_server_fixtures import CONFIG
from pytest_shutil.workspace import Workspace
from .base import get_ephemeral_port
from .serverclass import create_server
log = logging.getLogger(__name__)
class TeardownTestServerException(Exception):
"""Thrown when attempting to start an already teardown test server."""
pass
class TestServerV2(Workspace):
"""Base class of a v2 test server."""
random_port = True
random_hostname = True
port_seed = 65535
def __init__(self, cwd=None, workspace=None, delete=None, server_class=CONFIG.server_class):
"""
Initialise a test server.
@param cwd: the current working directory
@param workspace: where all files will be stored
@param delete: whether to delete the workspace after teardown or not
@param server_class: specify server class name (default from CONFIG.server_class)
"""
super(TestServerV2, self).__init__(workspace=workspace, delete=delete)
self._cwd = cwd or os.getcwd()
self._server_class = server_class
self._server = None
self._teardown = False
def start(self):
"""
Start the test server.
"""
if self._teardown:
raise TeardownTestServerException()
try:
self._server = create_server(
server_class=CONFIG.server_class,
server_type=self.__class__.__name__,
cmd=self.cmd,
cmd_local=self.cmd_local,
get_args=self.get_args,
env=self.env,
image=self.image,
labels=self.labels,
workspace=self.workspace,
cwd=self._cwd,
random_hostname=self.random_hostname,
)
if self._server_class == 'thread':
self.pre_setup()
self._server.launch()
self._wait_for_go()
log.debug("Server now awake")
self.post_setup()
except OSError as err:
log.warning("Error when starting the test server.")
log.debug(err)
raise
def teardown(self):
"""
Stop the server and clean up all resources.
"""
if self._teardown:
log.debug("Server is already teardown, skipping")
return
if not self._server:
log.debug("Server not started yet, skipping")
return
self._server.teardown()
self._server = None
super(TestServerV2, self).teardown()
self._teardown = True
def check_server_up(self):
"""
Check if the server is up.
"""
raise NotImplementedError("Concret class should implement this")
@property
def hostname(self):
"""
Get the IP address of the server.
"""
return self._server.hostname
@property
def port(self):
"""
Get the port number of the server.
"""
raise NotImplementedError("Concret class should implement this")
@property
def cwd(self):
"""
Get the current working directory of the server.
"""
return self._cwd
@property
def image(self):
"""
Get the Docker image of the server.
Only used when SERVER_FIXTURE_SERVER_CLASS is 'docker' or 'kubernetes'.
"""
raise NotImplementedError("Concret class should implement this")
@property
def labels(self):
"""
Extra labels to be added to the server fixture container.
Only used when SERVER_FIXTURE_SERVER_CLASS is 'docker' or 'kubernetes'.
"""
return dict()
@property
def env(self):
"""
Get the environment variables for running the server fixture.
"""
return dict()
@property
def cmd(self):
"""
Get the command to run the server fixture.
"""
raise NotImplementedError("Concrete class should implement this")
@property
def cmd_local(self):
"""
Get the local command to run the server fixture.
Only used when SERVER_FIXTURES_SERVER_CLASS is 'thread'.
"""
return self.get_cmd()
def get_args(self, hostname=None, workspace=None):
"""
Get the arguments to run the server fixtures.
@param hostname: hostname of the server
@param workspace: workspace of the server
"""
raise NotImplementedError("Concrete class should implement this")
def pre_setup(self):
"""
DEPRECATED
Only used when SERVER_FIXTURE_SERVER_CLASS is 'thread'
"""
pass
def post_setup(self):
"""
Set up step to be run after server is up.
"""
pass
def _wait_for_go(self, start_interval=0.1, retries_per_interval=3, retry_limit=28, base=2.0):
"""
This is called to wait until the server has started running.
Uses a binary exponential backoff algorithm to set wait interval
between retries. This finds the happy medium between quick starting
servers (e.g. in-memory DBs) while remaining useful for the slower
starting servers (e.g. web servers).
Parameters
----------
start_interval: ``float``
initial wait interval in seconds
retries_per_interval: ``int``
number of retries before increasing waiting time
retry_limit: ``int``
total number of retries to attempt before giving up
base: ``float``
backoff multiplier
"""
if start_interval <= 0.0:
raise ValueError('start interval must be positive!')
interval = start_interval
retry_count = retry_limit
start_time = datetime.now()
while retry_count > 0:
for _ in range(retries_per_interval):
log.debug('sleeping for %s before retrying (%d of %d)'
% (interval, ((retry_limit + 1) - retry_count), retry_limit))
if self.check_server_up():
log.debug('waited %s for server to start successfully'
% str(datetime.now() - start_time))
return
time.sleep(interval)
retry_count -= 1
interval *= base
raise ValueError("Server failed to start up after waiting %s. Giving up!"
% str(datetime.now() - start_time))
def _get_port(self, default_port):
"""
Get a random or pseudo-random port based on config.
"""
if self._server_class != 'thread':
return default_port
return (
get_ephemeral_port() if self.random_port
else self._get_pseudo_random_port())
def _get_pseudo_random_port(self):
"""
Get a pseudo random port based on port_seed,
classname and current username.
"""
sig = (os.environ['USER'] + self.__class__.__name__).encode('utf-8')
return self.port_seed - int(hashlib.sha1(sig).hexdigest()[:3], 16)
|
[] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
python
| 1 | 0 | |
cmd/kube-apiserver/app/testing/testserver.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
"fmt"
"net"
"os"
"path"
"path/filepath"
"runtime"
"time"
"github.com/spf13/pflag"
"go.etcd.io/etcd/client/pkg/v3/transport"
clientv3 "go.etcd.io/etcd/client/v3"
"google.golang.org/grpc"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/registry/generic/registry"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/storageversion"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/util/cert"
"k8s.io/kube-aggregator/pkg/apiserver"
"k8s.io/kubernetes/cmd/kube-apiserver/app"
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
testutil "k8s.io/kubernetes/test/utils"
)
// This key is for testing purposes only and is not considered secure.
const ecdsaPrivateKey = `-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIEZmTmUhuanLjPA2CLquXivuwBDHTt5XYwgIr/kA1LtRoAoGCCqGSM49
AwEHoUQDQgAEH6cuzP8XuD5wal6wf9M6xDljTOPLX2i8uIp/C/ASqiIGUeeKQtX0
/IR3qCXyThP/dbCiHrF3v1cuhBOHY8CLVg==
-----END EC PRIVATE KEY-----`
// TearDownFunc is to be called to tear down a test server.
type TearDownFunc func()
// TestServerInstanceOptions Instance options the TestServer
type TestServerInstanceOptions struct {
// DisableStorageCleanup Disable the automatic storage cleanup
DisableStorageCleanup bool
// Enable cert-auth for the kube-apiserver
EnableCertAuth bool
// Wrap the storage version interface of the created server's generic server.
StorageVersionWrapFunc func(storageversion.Manager) storageversion.Manager
}
// TestServer return values supplied by kube-test-ApiServer
type TestServer struct {
ClientConfig *restclient.Config // Rest client config
ServerOpts *options.ServerRunOptions // ServerOpts
TearDownFn TearDownFunc // TearDown function
TmpDir string // Temp Dir used, by the apiserver
EtcdClient *clientv3.Client // used by tests that need to check data migrated from APIs that are no longer served
EtcdStoragePrefix string // storage prefix in etcd
}
// Logger allows t.Testing and b.Testing to be passed to StartTestServer and StartTestServerOrDie
type Logger interface {
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Logf(format string, args ...interface{})
}
// NewDefaultTestServerOptions Default options for TestServer instances
func NewDefaultTestServerOptions() *TestServerInstanceOptions {
return &TestServerInstanceOptions{
DisableStorageCleanup: false,
EnableCertAuth: true,
}
}
// StartTestServer starts a etcd server and kube-apiserver. A rest client config and a tear-down func,
// and location of the tmpdir are returned.
//
// Note: we return a tear-down func instead of a stop channel because the later will leak temporary
// files that because Golang testing's call to os.Exit will not give a stop channel go routine
// enough time to remove temporary files.
func StartTestServer(t Logger, instanceOptions *TestServerInstanceOptions, customFlags []string, storageConfig *storagebackend.Config) (result TestServer, err error) {
if instanceOptions == nil {
instanceOptions = NewDefaultTestServerOptions()
}
// TODO : Remove TrackStorageCleanup below when PR
// https://github.com/kubernetes/kubernetes/pull/50690
// merges as that shuts down storage properly
if !instanceOptions.DisableStorageCleanup {
registry.TrackStorageCleanup()
}
stopCh := make(chan struct{})
tearDown := func() {
if !instanceOptions.DisableStorageCleanup {
registry.CleanupStorage()
}
close(stopCh)
if len(result.TmpDir) != 0 {
os.RemoveAll(result.TmpDir)
}
}
defer func() {
if result.TearDownFn == nil {
tearDown()
}
}()
result.TmpDir, err = os.MkdirTemp("", "kubernetes-kube-apiserver")
if err != nil {
return result, fmt.Errorf("failed to create temp dir: %v", err)
}
fs := pflag.NewFlagSet("test", pflag.PanicOnError)
s := options.NewServerRunOptions()
for _, f := range s.Flags().FlagSets {
fs.AddFlagSet(f)
}
s.SecureServing.Listener, s.SecureServing.BindPort, err = createLocalhostListenerOnFreePort()
if err != nil {
return result, fmt.Errorf("failed to create listener: %v", err)
}
s.SecureServing.ServerCert.CertDirectory = result.TmpDir
if instanceOptions.EnableCertAuth {
// create certificates for aggregation and client-cert auth
proxySigningKey, err := testutil.NewPrivateKey()
if err != nil {
return result, err
}
proxySigningCert, err := cert.NewSelfSignedCACert(cert.Config{CommonName: "front-proxy-ca"}, proxySigningKey)
if err != nil {
return result, err
}
proxyCACertFile := path.Join(s.SecureServing.ServerCert.CertDirectory, "proxy-ca.crt")
if err := os.WriteFile(proxyCACertFile, testutil.EncodeCertPEM(proxySigningCert), 0644); err != nil {
return result, err
}
s.Authentication.RequestHeader.ClientCAFile = proxyCACertFile
clientSigningKey, err := testutil.NewPrivateKey()
if err != nil {
return result, err
}
clientSigningCert, err := cert.NewSelfSignedCACert(cert.Config{CommonName: "client-ca"}, clientSigningKey)
if err != nil {
return result, err
}
clientCACertFile := path.Join(s.SecureServing.ServerCert.CertDirectory, "client-ca.crt")
if err := os.WriteFile(clientCACertFile, testutil.EncodeCertPEM(clientSigningCert), 0644); err != nil {
return result, err
}
s.Authentication.ClientCert.ClientCA = clientCACertFile
}
s.SecureServing.ExternalAddress = s.SecureServing.Listener.Addr().(*net.TCPAddr).IP // use listener addr although it is a loopback device
pkgPath, err := pkgPath(t)
if err != nil {
return result, err
}
s.SecureServing.ServerCert.FixtureDirectory = filepath.Join(pkgPath, "testdata")
s.ServiceClusterIPRanges = "10.0.0.0/16"
s.Etcd.StorageConfig = *storageConfig
s.APIEnablement.RuntimeConfig.Set("api/all=true")
if err := fs.Parse(customFlags); err != nil {
return result, err
}
saSigningKeyFile, err := os.CreateTemp("/tmp", "insecure_test_key")
if err != nil {
t.Fatalf("create temp file failed: %v", err)
}
defer os.RemoveAll(saSigningKeyFile.Name())
if err = os.WriteFile(saSigningKeyFile.Name(), []byte(ecdsaPrivateKey), 0666); err != nil {
t.Fatalf("write file %s failed: %v", saSigningKeyFile.Name(), err)
}
s.ServiceAccountSigningKeyFile = saSigningKeyFile.Name()
s.Authentication.ServiceAccounts.Issuers = []string{"https://foo.bar.example.com"}
s.Authentication.ServiceAccounts.KeyFiles = []string{saSigningKeyFile.Name()}
completedOptions, err := app.Complete(s)
if err != nil {
return result, fmt.Errorf("failed to set default ServerRunOptions: %v", err)
}
if errs := completedOptions.Validate(); len(errs) != 0 {
return result, fmt.Errorf("failed to validate ServerRunOptions: %v", utilerrors.NewAggregate(errs))
}
t.Logf("runtime-config=%v", completedOptions.APIEnablement.RuntimeConfig)
t.Logf("Starting kube-apiserver on port %d...", s.SecureServing.BindPort)
server, err := app.CreateServerChain(completedOptions, stopCh)
if err != nil {
return result, fmt.Errorf("failed to create server chain: %v", err)
}
if instanceOptions.StorageVersionWrapFunc != nil {
server.GenericAPIServer.StorageVersionManager = instanceOptions.StorageVersionWrapFunc(server.GenericAPIServer.StorageVersionManager)
}
errCh := make(chan error)
go func(stopCh <-chan struct{}) {
prepared, err := server.PrepareRun()
if err != nil {
errCh <- err
} else if err := prepared.Run(stopCh); err != nil {
errCh <- err
}
}(stopCh)
t.Logf("Waiting for /healthz to be ok...")
client, err := kubernetes.NewForConfig(server.GenericAPIServer.LoopbackClientConfig)
if err != nil {
return result, fmt.Errorf("failed to create a client: %v", err)
}
// wait until healthz endpoint returns ok
err = wait.Poll(100*time.Millisecond, time.Minute, func() (bool, error) {
select {
case err := <-errCh:
return false, err
default:
}
req := client.CoreV1().RESTClient().Get().AbsPath("/healthz")
// The storage version bootstrap test wraps the storage version post-start
// hook, so the hook won't become health when the server bootstraps
if instanceOptions.StorageVersionWrapFunc != nil {
// We hardcode the param instead of having a new instanceOptions field
// to avoid confusing users with more options.
storageVersionCheck := fmt.Sprintf("poststarthook/%s", apiserver.StorageVersionPostStartHookName)
req.Param("exclude", storageVersionCheck)
}
result := req.Do(context.TODO())
status := 0
result.StatusCode(&status)
if status == 200 {
return true, nil
}
return false, nil
})
if err != nil {
return result, fmt.Errorf("failed to wait for /healthz to return ok: %v", err)
}
// wait until default namespace is created
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
select {
case err := <-errCh:
return false, err
default:
}
if _, err := client.CoreV1().Namespaces().Get(context.TODO(), "default", metav1.GetOptions{}); err != nil {
if !errors.IsNotFound(err) {
t.Logf("Unable to get default namespace: %v", err)
}
return false, nil
}
return true, nil
})
if err != nil {
return result, fmt.Errorf("failed to wait for default namespace to be created: %v", err)
}
tlsInfo := transport.TLSInfo{
CertFile: storageConfig.Transport.CertFile,
KeyFile: storageConfig.Transport.KeyFile,
TrustedCAFile: storageConfig.Transport.TrustedCAFile,
}
tlsConfig, err := tlsInfo.ClientConfig()
if err != nil {
return result, err
}
etcdConfig := clientv3.Config{
Endpoints: storageConfig.Transport.ServerList,
DialTimeout: 20 * time.Second,
DialOptions: []grpc.DialOption{
grpc.WithBlock(), // block until the underlying connection is up
},
TLS: tlsConfig,
}
etcdClient, err := clientv3.New(etcdConfig)
if err != nil {
return result, err
}
// from here the caller must call tearDown
result.ClientConfig = restclient.CopyConfig(server.GenericAPIServer.LoopbackClientConfig)
result.ClientConfig.QPS = 1000
result.ClientConfig.Burst = 10000
result.ServerOpts = s
result.TearDownFn = tearDown
result.EtcdClient = etcdClient
result.EtcdStoragePrefix = storageConfig.Prefix
return result, nil
}
// StartTestServerOrDie calls StartTestServer t.Fatal if it does not succeed.
func StartTestServerOrDie(t Logger, instanceOptions *TestServerInstanceOptions, flags []string, storageConfig *storagebackend.Config) *TestServer {
result, err := StartTestServer(t, instanceOptions, flags, storageConfig)
if err == nil {
return &result
}
t.Fatalf("failed to launch server: %v", err)
return nil
}
func createLocalhostListenerOnFreePort() (net.Listener, int, error) {
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, 0, err
}
// get port
tcpAddr, ok := ln.Addr().(*net.TCPAddr)
if !ok {
ln.Close()
return nil, 0, fmt.Errorf("invalid listen address: %q", ln.Addr().String())
}
return ln, tcpAddr.Port, nil
}
// pkgPath returns the absolute file path to this package's directory. With go
// test, we can just look at the runtime call stack. However, bazel compiles go
// binaries with the -trimpath option so the simple approach fails however we
// can consult environment variables to derive the path.
//
// The approach taken here works for both go test and bazel on the assumption
// that if and only if trimpath is passed, we are running under bazel.
func pkgPath(t Logger) (string, error) {
_, thisFile, _, ok := runtime.Caller(0)
if !ok {
return "", fmt.Errorf("failed to get current file")
}
pkgPath := filepath.Dir(thisFile)
// If we find bazel env variables, then -trimpath was passed so we need to
// construct the path from the environment.
if testSrcdir, testWorkspace := os.Getenv("TEST_SRCDIR"), os.Getenv("TEST_WORKSPACE"); testSrcdir != "" && testWorkspace != "" {
t.Logf("Detected bazel env varaiables: TEST_SRCDIR=%q TEST_WORKSPACE=%q", testSrcdir, testWorkspace)
pkgPath = filepath.Join(testSrcdir, testWorkspace, pkgPath)
}
// If the path is still not absolute, something other than bazel compiled
// with -trimpath.
if !filepath.IsAbs(pkgPath) {
return "", fmt.Errorf("can't construct an absolute path from %q", pkgPath)
}
t.Logf("Resolved testserver package path to: %q", pkgPath)
return pkgPath, nil
}
|
[
"\"TEST_SRCDIR\"",
"\"TEST_WORKSPACE\""
] |
[] |
[
"TEST_WORKSPACE",
"TEST_SRCDIR"
] |
[]
|
["TEST_WORKSPACE", "TEST_SRCDIR"]
|
go
| 2 | 0 | |
graphs/typegraph/diff/diff_test.go
|
// Copyright 2017 The Serulian Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package diff
import (
"encoding/json"
"io/ioutil"
"os"
"strings"
"testing"
"github.com/serulian/compiler/compilergraph"
"github.com/serulian/compiler/graphs/srg"
"github.com/serulian/compiler/graphs/srg/typeconstructor"
"github.com/serulian/compiler/graphs/typegraph"
"github.com/serulian/compiler/packageloader"
"github.com/serulian/compiler/webidl"
"github.com/stretchr/testify/assert"
)
type diffTest struct {
name string
originalModules []typegraph.TestModule
updatedModules []typegraph.TestModule
expectedKind map[string]DiffKind
}
var diffTests = []diffTest{
diffTest{
"no changes test",
[]typegraph.TestModule{
typegraph.TestModule{
"somemodule",
[]typegraph.TestType{
typegraph.TestType{"nominal", "SomeAgent", "int",
[]typegraph.TestGeneric{},
[]typegraph.TestMember{},
},
},
[]typegraph.TestMember{},
},
},
[]typegraph.TestModule{
typegraph.TestModule{
"somemodule",
[]typegraph.TestType{
typegraph.TestType{"nominal", "SomeAgent", "int",
[]typegraph.TestGeneric{},
[]typegraph.TestMember{},
},
},
[]typegraph.TestMember{},
},
},
map[string]DiffKind{
".": Same,
},
},
diffTest{
"filtered no changes test",
[]typegraph.TestModule{
typegraph.TestModule{
"foopackage/somemodule",
[]typegraph.TestType{
typegraph.TestType{"nominal", "SomeAgent", "int",
[]typegraph.TestGeneric{},
[]typegraph.TestMember{},
},
},
[]typegraph.TestMember{},
},
typegraph.TestModule{
"barpackage/somemodule",
[]typegraph.TestType{
typegraph.TestType{"nominal", "SomeAgent", "int",
[]typegraph.TestGeneric{},
[]typegraph.TestMember{},
},
},
[]typegraph.TestMember{},
},
},
[]typegraph.TestModule{
typegraph.TestModule{
"foopackage/somemodule",
[]typegraph.TestType{
typegraph.TestType{"nominal", "SomeAgent", "int",
[]typegraph.TestGeneric{},
[]typegraph.TestMember{},
},
},
[]typegraph.TestMember{},
},
typegraph.TestModule{
"barpackage/somemodule",
[]typegraph.TestType{
typegraph.TestType{"nominal", "SomeAgent", "string",
[]typegraph.TestGeneric{},
[]typegraph.TestMember{},
},
},
[]typegraph.TestMember{},
},
},
map[string]DiffKind{
"foopackage": Same,
},
},
diffTest{
"package removed test",
[]typegraph.TestModule{
typegraph.TestModule{
"somepackage/somemodule",
[]typegraph.TestType{
typegraph.TestType{"nominal", "SomeAgent", "int",
[]typegraph.TestGeneric{},
[]typegraph.TestMember{},
},
},
[]typegraph.TestMember{},
},
},
[]typegraph.TestModule{},
map[string]DiffKind{
"somepackage": Removed,
},
},
diffTest{
"package added test",
[]typegraph.TestModule{},
[]typegraph.TestModule{
typegraph.TestModule{
"somepackage/somemodule",
[]typegraph.TestType{
typegraph.TestType{"nominal", "SomeAgent", "int",
[]typegraph.TestGeneric{},
[]typegraph.TestMember{},
},
},
[]typegraph.TestMember{},
},
},
map[string]DiffKind{
"somepackage": Added,
},
},
diffTest{
"package changed test",
[]typegraph.TestModule{
typegraph.TestModule{
"somepackage/somemodule",
[]typegraph.TestType{
typegraph.TestType{"nominal", "SomeAgent", "string",
[]typegraph.TestGeneric{},
[]typegraph.TestMember{},
},
},
[]typegraph.TestMember{},
},
},
[]typegraph.TestModule{
typegraph.TestModule{
"somepackage/somemodule",
[]typegraph.TestType{
typegraph.TestType{"nominal", "SomeAgent", "int",
[]typegraph.TestGeneric{},
[]typegraph.TestMember{},
},
},
[]typegraph.TestMember{},
},
},
map[string]DiffKind{
"somepackage": Changed,
},
},
diffTest{
"file moved in package no change test",
[]typegraph.TestModule{
typegraph.TestModule{
"somepackage/firstmodule",
[]typegraph.TestType{
typegraph.TestType{"nominal", "SomeAgent", "int",
[]typegraph.TestGeneric{},
[]typegraph.TestMember{},
},
},
[]typegraph.TestMember{},
},
},
[]typegraph.TestModule{
typegraph.TestModule{
"somepackage/secondmodule",
[]typegraph.TestType{
typegraph.TestType{"nominal", "SomeAgent", "int",
[]typegraph.TestGeneric{},
[]typegraph.TestMember{},
},
},
[]typegraph.TestMember{},
},
},
map[string]DiffKind{
"somepackage": Same,
},
},
}
func TestDiff(t *testing.T) {
for _, test := range diffTests {
originalGraph := typegraph.ConstructTypeGraphWithBasicTypes(test.originalModules...)
updatedGraph := typegraph.ConstructTypeGraphWithBasicTypes(test.updatedModules...)
diff := ComputeDiff(
TypeGraphInformation{originalGraph, ""},
TypeGraphInformation{updatedGraph, ""})
for path, kind := range test.expectedKind {
packageDiff, found := diff.Packages[path]
if !assert.True(t, found, "Missing expected package diff %s for test %s", path, test.name) {
continue
}
assert.Equal(t, kind, packageDiff.Kind, "Mismatch in expected kind for package %s under test %s", path, test.name)
}
}
}
const TESTLIB_PATH = "../../../testlib"
func getTypeGraphFromSource(t *testing.T, path string) (*typegraph.TypeGraph, bool) {
graph, err := compilergraph.NewGraph("test/" + path)
if err != nil {
t.Errorf("Got error: %v", err)
return nil, false
}
testSRG := srg.NewSRG(graph)
testIDL := webidl.WebIDLProvider(graph)
loader := packageloader.NewPackageLoader(
packageloader.NewBasicConfig(graph.RootSourceFilePath(), testIDL.SourceHandler(), testSRG.SourceHandler()))
srgResult := loader.Load(packageloader.Library{TESTLIB_PATH, false, "", "testcore"})
// Make sure we had no errors during construction.
if !assert.True(t, srgResult.Status, "Got error for SRG construction: %s", srgResult.Errors) {
return nil, false
}
// Construct the type graph.
result, _ := typegraph.BuildTypeGraph(testSRG.Graph, testIDL.TypeConstructor(), typeconstructor.GetConstructor(testSRG))
return result.Graph, true
}
func loadJson(path string) string {
b, err := ioutil.ReadFile(path)
if err != nil {
panic(err)
}
return string(b)
}
func writeJson(value string, path string) {
err := ioutil.WriteFile(path, []byte(value), 0644)
if err != nil {
panic(err)
}
}
var sourceDiffTests = []string{
"nochanges",
"classadded",
"classremoved",
"classchanged",
"unexportedtypechanged",
"memberchanged",
"unexportedmemberchanged",
"nullableparameteradded",
"generics",
"withwebidl",
"withwebidlchanges",
"withcorelibref",
"withwebidlsubpackage",
"operatorchanged",
"operatorsame",
"fieldadded",
"interfacefunctionadded",
"nonrequiredfieldadded",
}
func TestSourcedDiff(t *testing.T) {
for _, test := range sourceDiffTests {
originalGraph, ok := getTypeGraphFromSource(t, test+"/original.seru")
if !ok {
return
}
updatedGraph, ok := getTypeGraphFromSource(t, test+"/updated.seru")
if !ok {
return
}
filter := func(module typegraph.TGModule) bool {
return strings.HasPrefix(module.PackagePath(), "test")
}
diff := ComputeDiff(
TypeGraphInformation{originalGraph, "test"},
TypeGraphInformation{updatedGraph, "test"},
filter)
b, _ := json.MarshalIndent(diff, "", " ")
diffJson := string(b)
if os.Getenv("REGEN") == "true" {
writeJson(diffJson, "test/"+test+"/diff.json")
} else {
expectedDiff := loadJson("test/" + test + "/diff.json")
assert.Equal(t, expectedDiff, diffJson, "Diff mismatch on test %s\nExpected: %v\nActual: %v\n\n", test, expectedDiff, diffJson)
}
}
}
|
[
"\"REGEN\""
] |
[] |
[
"REGEN"
] |
[]
|
["REGEN"]
|
go
| 1 | 0 | |
cogs/notifications.py
|
import discord
from discord.ext import commands, tasks
import asyncio
import os
from . utils.notification import latestYtVid
import asyncpraw
from googleapiclient.discovery import build
class Notifications(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.reddit = asyncpraw.Reddit(client_id=os.getenv('REDDIT_CLIENT_ID'),
client_secret=os.getenv('REDDIT_CLIENT_SECRET'), user_agent="Martin Garrix Bot")
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "auth.json"
self.youtube = build('youtube', 'v3', developerKey=os.getenv('YOUTUBE_API_TOKEN'))
self.getRedditPosts.start()
self.getYtVids.start()
@tasks.loop(minutes=3)
async def getRedditPosts(self):
subreddit = await self.reddit.subreddit('Martingarrix')
new_post = subreddit.new(limit=5)
async for post in new_post:
try:
await self.bot.db.execute("INSERT INTO reddit_posts(post_id) VALUES ($1)", post.id)
except:
continue
embed = discord.Embed(title=post.title,
url=f"https://reddit.com{post.permalink}",
color=discord.Color.orange())
if post.selftext:
embed.add_field(name="Content", value=post.selftext, inline=False)
try:
if post.preview['images'][0]['source']['url']:
embed.set_image(url=post.preview['images'][0]['source']['url'])
except:
pass
embed.set_footer(
text=f"Author: u/{post.author} on Subreddit {post.subreddit_name_prefixed}")
query = "SELECT reddit_notifications_channel FROM guild_configs WHERE reddit_notifications_channel IS NOT NULL"
channels = await self.bot.db.fetch(query)
for channel in channels:
reddit_channel = self.bot.get_channel(channel['reddit_notifications_channel'])
try:
await reddit_channel.send(embed=embed)
except Exception as e:
print(print(channel))
await asyncio.sleep(2)
@tasks.loop(minutes=3)
async def getYtVids(self):
playlist_ids = ['UU5H_KXkPbEsGs0tFt8R35mA', 'PLwPIORXMGwchuy4DTiIAasWRezahNrbUJ']
for playlist_id in playlist_ids:
video = self.youtube.playlistItems().list(playlistId=playlist_id, part="snippet", maxResults=1)
loop = asyncio.get_event_loop()
video = await loop.run_in_executor(None, video.execute)
video_id = video['items'][0]['snippet']['resourceId']['videoId']
try:
await self.bot.db.execute("INSERT INTO youtube_videos(video_id) VALUES ($1)", video_id)
except:
continue
query = "SELECT youtube_notifications_channel FROM guild_configs WHERE youtube_notifications_channel IS NOT NULL"
channels = await self.bot.db.fetch(query)
for channel in channels:
youtube_notification_channel = self.bot.get_channel(channel['youtube_notifications_channel'])
try:
await youtube_notification_channel.send('https://www.youtube.com/watch?v=' + video_id)
except:
pass
await asyncio.sleep(2)
def setup(bot):
bot.add_cog(Notifications(bot))
|
[] |
[] |
[
"REDDIT_CLIENT_SECRET",
"REDDIT_CLIENT_ID",
"YOUTUBE_API_TOKEN",
"GOOGLE_APPLICATION_CREDENTIALS"
] |
[]
|
["REDDIT_CLIENT_SECRET", "REDDIT_CLIENT_ID", "YOUTUBE_API_TOKEN", "GOOGLE_APPLICATION_CREDENTIALS"]
|
python
| 4 | 0 | |
test/span.go
|
package test
import (
"context"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/log"
jaeger "github.com/uber/jaeger-client-go"
)
type MockSpan struct {
traceID jaeger.TraceID
spanID jaeger.SpanID
}
func NewMockSpan(traceID, spanID uint64) *MockSpan {
return &MockSpan{
traceID: jaeger.TraceID{Low: traceID},
spanID: jaeger.SpanID(spanID),
}
}
func NewContextWithMockSpan(ctx context.Context, traceID, spanID uint64) context.Context {
return opentracing.ContextWithSpan(ctx, NewMockSpan(traceID, spanID))
}
func (s *MockSpan) Context() opentracing.SpanContext { return &MockSpanContext{span: *s} }
func (s *MockSpan) SetBaggageItem(_, _ string) opentracing.Span { return s }
func (s *MockSpan) BaggageItem(_ string) string { return "" }
func (s *MockSpan) SetTag(_ string, _ interface{}) opentracing.Span { return s }
func (s *MockSpan) LogFields(_ ...log.Field) {}
func (s *MockSpan) LogKV(_ ...interface{}) {}
func (s *MockSpan) Finish() {}
func (s *MockSpan) FinishWithOptions(_ opentracing.FinishOptions) {}
func (s *MockSpan) SetOperationName(_ string) opentracing.Span { return s }
func (s *MockSpan) Tracer() opentracing.Tracer { return nil }
func (s *MockSpan) LogEvent(_ string) {}
func (s *MockSpan) LogEventWithPayload(_ string, _ interface{}) {}
func (s *MockSpan) Log(_ opentracing.LogData) {}
type MockSpanContext struct {
span MockSpan
}
func (n *MockSpanContext) ForeachBaggageItem(_ func(k, v string) bool) {}
func (n *MockSpanContext) TraceID() jaeger.TraceID {
return n.span.traceID
}
func (n *MockSpanContext) SpanID() jaeger.SpanID {
return n.span.spanID
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
pkg/operator/starter.go
|
package operator
import (
"context"
"encoding/json"
"math/rand"
"os"
"time"
configv1 "github.com/openshift/api/config/v1"
operatorv1 "github.com/openshift/api/operator/v1"
configv1client "github.com/openshift/client-go/config/clientset/versioned"
configv1informers "github.com/openshift/client-go/config/informers/externalversions"
operatorcontrolplaneclient "github.com/openshift/client-go/operatorcontrolplane/clientset/versioned"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/boundsatokensignercontroller"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/certrotationcontroller"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/certrotationtimeupgradeablecontroller"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/configmetrics"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/configobservation/configobservercontroller"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/connectivitycheckcontroller"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/featureupgradablecontroller"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/nodekubeconfigcontroller"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/operatorclient"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/resourcesynccontroller"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/targetconfigcontroller"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/terminationobserver"
"github.com/openshift/cluster-kube-apiserver-operator/pkg/operator/v410_00_assets"
"github.com/openshift/library-go/pkg/controller/controllercmd"
libgoaudit "github.com/openshift/library-go/pkg/operator/apiserver/audit"
"github.com/openshift/library-go/pkg/operator/certrotation"
"github.com/openshift/library-go/pkg/operator/encryption"
"github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators"
encryptiondeployer "github.com/openshift/library-go/pkg/operator/encryption/deployer"
"github.com/openshift/library-go/pkg/operator/eventwatch"
"github.com/openshift/library-go/pkg/operator/genericoperatorclient"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
"github.com/openshift/library-go/pkg/operator/staleconditions"
"github.com/openshift/library-go/pkg/operator/staticpod"
"github.com/openshift/library-go/pkg/operator/staticpod/controller/installer"
"github.com/openshift/library-go/pkg/operator/staticpod/controller/revision"
"github.com/openshift/library-go/pkg/operator/staticresourcecontroller"
"github.com/openshift/library-go/pkg/operator/status"
"github.com/openshift/library-go/pkg/operator/v1helpers"
corev1 "k8s.io/api/core/v1"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
kubemigratorclient "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset"
migrationv1alpha1informer "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/informer"
)
func RunOperator(ctx context.Context, controllerContext *controllercmd.ControllerContext) error {
// This kube client use protobuf, do not use it for CR
kubeClient, err := kubernetes.NewForConfig(controllerContext.ProtoKubeConfig)
if err != nil {
return err
}
dynamicClient, err := dynamic.NewForConfig(controllerContext.ProtoKubeConfig)
if err != nil {
return err
}
configClient, err := configv1client.NewForConfig(controllerContext.KubeConfig)
if err != nil {
return err
}
operatorcontrolplaneClient, err := operatorcontrolplaneclient.NewForConfig(controllerContext.KubeConfig)
if err != nil {
return err
}
apiextensionsClient, err := apiextensionsclient.NewForConfig(controllerContext.KubeConfig)
if err != nil {
return err
}
migrationClient, err := kubemigratorclient.NewForConfig(controllerContext.KubeConfig)
if err != nil {
return err
}
kubeInformersForNamespaces := v1helpers.NewKubeInformersForNamespaces(
kubeClient,
"",
operatorclient.GlobalUserSpecifiedConfigNamespace,
operatorclient.GlobalMachineSpecifiedConfigNamespace,
operatorclient.TargetNamespace,
operatorclient.OperatorNamespace,
"kube-system", // system:openshift:controller:kube-apiserver-check-endpoints role binding
"openshift-etcd",
"openshift-apiserver",
)
configInformers := configv1informers.NewSharedInformerFactory(configClient, 10*time.Minute)
operatorClient, dynamicInformers, err := genericoperatorclient.NewStaticPodOperatorClient(controllerContext.KubeConfig, operatorv1.GroupVersion.WithResource("kubeapiservers"))
if err != nil {
return err
}
resourceSyncController, err := resourcesynccontroller.NewResourceSyncController(
operatorClient,
kubeInformersForNamespaces,
kubeClient,
controllerContext.EventRecorder,
)
if err != nil {
return err
}
auditPolicyPahGetter, err := libgoaudit.NewAuditPolicyPathGetter("/etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-audit-policies")
if err != nil {
return err
}
configObserver := configobservercontroller.NewConfigObserver(
operatorClient,
kubeInformersForNamespaces,
configInformers,
resourceSyncController,
auditPolicyPahGetter,
controllerContext.EventRecorder,
)
eventWatcher := eventwatch.New().
WithEventHandler(operatorclient.TargetNamespace, "LateConnections", terminationobserver.ProcessLateConnectionEvents).
ToController(kubeInformersForNamespaces.InformersFor(operatorclient.TargetNamespace), kubeClient.CoreV1(), controllerContext.EventRecorder)
staticResourceController := staticresourcecontroller.NewStaticResourceController(
"KubeAPIServerStaticResources",
libgoaudit.WithAuditPolicies("kube-apiserver-audit-policies", "openshift-kube-apiserver", v410_00_assets.Asset),
[]string{
"v4.1.0/kube-apiserver/ns.yaml",
"v4.1.0/kube-apiserver/svc.yaml",
"v4.1.0/kube-apiserver/kubeconfig-cm.yaml",
"v4.1.0/kube-apiserver/check-endpoints-clusterrole.yaml",
"v4.1.0/kube-apiserver/check-endpoints-clusterrole-node-reader.yaml",
"v4.1.0/kube-apiserver/check-endpoints-clusterrole-crd-reader.yaml",
"v4.1.0/kube-apiserver/check-endpoints-clusterrolebinding-auth-delegator.yaml",
"v4.1.0/kube-apiserver/check-endpoints-clusterrolebinding-node-reader.yaml",
"v4.1.0/kube-apiserver/check-endpoints-clusterrolebinding-crd-reader.yaml",
"v4.1.0/kube-apiserver/check-endpoints-kubeconfig-cm.yaml",
"v4.1.0/kube-apiserver/check-endpoints-rolebinding-kube-system.yaml",
"v4.1.0/kube-apiserver/check-endpoints-rolebinding.yaml",
"v4.1.0/kube-apiserver/control-plane-node-kubeconfig-cm.yaml",
"v4.1.0/kube-apiserver/delegated-incluster-authentication-rolebinding.yaml",
"v4.1.0/kube-apiserver/localhost-recovery-client-crb.yaml",
"v4.1.0/kube-apiserver/localhost-recovery-sa.yaml",
"v4.1.0/kube-apiserver/localhost-recovery-token.yaml",
"v4.1.0/kube-apiserver/apiserver.openshift.io_apirequestcount.yaml",
"v4.1.0/kube-apiserver/storage-version-migration-flowschema.yaml",
"v4.1.0/kube-apiserver/storage-version-migration-prioritylevelconfiguration.yaml",
"v4.1.0/alerts/api-usage.yaml",
"v4.1.0/alerts/cpu-utilization.yaml",
"v4.1.0/alerts/kube-apiserver-requests.yaml",
libgoaudit.AuditPoliciesConfigMapFileName,
"v4.1.0/alerts/kube-apiserver-slos.yaml",
},
(&resourceapply.ClientHolder{}).
WithKubernetes(kubeClient).
WithAPIExtensionsClient(apiextensionsClient).
WithDynamicClient(dynamicClient).
WithMigrationClient(migrationClient),
operatorClient,
controllerContext.EventRecorder,
).AddKubeInformers(kubeInformersForNamespaces)
targetConfigReconciler := targetconfigcontroller.NewTargetConfigController(
os.Getenv("IMAGE"),
os.Getenv("OPERATOR_IMAGE"),
operatorClient,
kubeInformersForNamespaces.InformersFor(operatorclient.TargetNamespace),
kubeInformersForNamespaces,
kubeClient,
controllerContext.EventRecorder,
)
nodeKubeconfigController := nodekubeconfigcontroller.NewNodeKubeconfigController(
operatorClient,
kubeInformersForNamespaces,
kubeClient,
configInformers.Config().V1().Infrastructures(),
controllerContext.EventRecorder,
)
apiextensionsInformers := apiextensionsinformers.NewSharedInformerFactory(apiextensionsClient, 10*time.Minute)
connectivityCheckController := connectivitycheckcontroller.NewKubeAPIServerConnectivityCheckController(
kubeClient,
operatorClient,
apiextensionsClient,
kubeInformersForNamespaces,
operatorcontrolplaneClient,
configInformers,
apiextensionsInformers,
controllerContext.EventRecorder,
)
// don't change any versions until we sync
versionRecorder := status.NewVersionGetter()
clusterOperator, err := configClient.ConfigV1().ClusterOperators().Get(ctx, "kube-apiserver", metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return err
}
for _, version := range clusterOperator.Status.Versions {
versionRecorder.SetVersion(version.Name, version.Version)
}
versionRecorder.SetVersion("raw-internal", status.VersionForOperatorFromEnv())
staticPodControllers, err := staticpod.NewBuilder(operatorClient, kubeClient, kubeInformersForNamespaces).
WithEvents(controllerContext.EventRecorder).
WithCustomInstaller([]string{"cluster-kube-apiserver-operator", "installer"}, installerErrorInjector(operatorClient)).
WithPruning([]string{"cluster-kube-apiserver-operator", "prune"}, "kube-apiserver-pod").
WithRevisionedResources(operatorclient.TargetNamespace, "kube-apiserver", RevisionConfigMaps, RevisionSecrets).
WithUnrevisionedCerts("kube-apiserver-certs", CertConfigMaps, CertSecrets).
WithVersioning("kube-apiserver", versionRecorder).
WithMinReadyDuration(30 * time.Second).
ToControllers()
if err != nil {
return err
}
clusterOperatorStatus := status.NewClusterOperatorStatusController(
"kube-apiserver",
[]configv1.ObjectReference{
{Group: "operator.openshift.io", Resource: "kubeapiservers", Name: "cluster"},
{Group: "apiextensions.k8s.io", Resource: "customresourcedefinitions"},
{Group: "security.openshift.io", Resource: "securitycontextconstraints"},
{Resource: "namespaces", Name: operatorclient.GlobalUserSpecifiedConfigNamespace},
{Resource: "namespaces", Name: operatorclient.GlobalMachineSpecifiedConfigNamespace},
{Resource: "namespaces", Name: operatorclient.OperatorNamespace},
{Resource: "namespaces", Name: operatorclient.TargetNamespace},
{Group: "admissionregistration.k8s.io", Resource: "mutatingwebhookconfigurations"},
{Group: "admissionregistration.k8s.io", Resource: "validatingwebhookconfigurations"},
{Group: "controlplane.operator.openshift.io", Resource: "podnetworkconnectivitychecks", Namespace: "openshift-kube-apiserver"},
{Group: "apiserver.openshift.io", Resource: "apirequestcounts"},
},
configClient.ConfigV1(),
configInformers.Config().V1().ClusterOperators(),
operatorClient,
versionRecorder,
controllerContext.EventRecorder,
)
certRotationScale, err := certrotation.GetCertRotationScale(kubeClient, operatorclient.GlobalUserSpecifiedConfigNamespace)
if err != nil {
return err
}
certRotationController, err := certrotationcontroller.NewCertRotationController(
kubeClient,
operatorClient,
configInformers,
kubeInformersForNamespaces,
controllerContext.EventRecorder.WithComponentSuffix("cert-rotation-controller"),
certRotationScale,
)
if err != nil {
return err
}
staticPodNodeProvider := encryptiondeployer.StaticPodNodeProvider{OperatorClient: operatorClient}
deployer, err := encryptiondeployer.NewRevisionLabelPodDeployer("revision", operatorclient.TargetNamespace, kubeInformersForNamespaces, resourceSyncController, kubeClient.CoreV1(), kubeClient.CoreV1(), staticPodNodeProvider)
if err != nil {
return err
}
migrationInformer := migrationv1alpha1informer.NewSharedInformerFactory(migrationClient, time.Minute*30)
migrator := migrators.NewKubeStorageVersionMigrator(migrationClient, migrationInformer.Migration().V1alpha1(), kubeClient.Discovery())
encryptionControllers, err := encryption.NewControllers(
operatorclient.TargetNamespace,
nil,
encryption.StaticEncryptionProvider{
schema.GroupResource{Group: "", Resource: "secrets"},
schema.GroupResource{Group: "", Resource: "configmaps"},
},
deployer,
migrator,
operatorClient,
configClient.ConfigV1().APIServers(),
configInformers.Config().V1().APIServers(),
kubeInformersForNamespaces,
kubeClient.CoreV1(),
controllerContext.EventRecorder,
)
if err != nil {
return err
}
featureUpgradeableController := featureupgradablecontroller.NewFeatureUpgradeableController(
operatorClient,
configInformers,
controllerContext.EventRecorder,
)
certRotationTimeUpgradeableController := certrotationtimeupgradeablecontroller.NewCertRotationTimeUpgradeableController(
operatorClient,
kubeInformersForNamespaces.InformersFor(operatorclient.GlobalUserSpecifiedConfigNamespace).Core().V1().ConfigMaps(),
controllerContext.EventRecorder.WithComponentSuffix("cert-rotation-controller"),
)
terminationObserver := terminationobserver.NewTerminationObserver(
operatorclient.TargetNamespace,
kubeInformersForNamespaces.InformersFor(operatorclient.TargetNamespace),
kubeClient.CoreV1(),
controllerContext.EventRecorder,
)
boundSATokenSignerController := boundsatokensignercontroller.NewBoundSATokenSignerController(
operatorClient,
kubeInformersForNamespaces,
kubeClient,
controllerContext.EventRecorder,
)
staleConditionsController := staleconditions.NewRemoveStaleConditionsController(
[]string{
// the static pod operator used to directly set these. this removes those conditions since the static pod operator was updated.
// these can be removed in 4.5
"Available", "Progressing",
},
operatorClient,
controllerContext.EventRecorder,
)
// register termination metrics
terminationobserver.RegisterMetrics()
// register config metrics
configmetrics.Register(configInformers)
kubeInformersForNamespaces.Start(ctx.Done())
configInformers.Start(ctx.Done())
dynamicInformers.Start(ctx.Done())
migrationInformer.Start(ctx.Done())
apiextensionsInformers.Start(ctx.Done())
go staticPodControllers.Start(ctx)
go resourceSyncController.Run(ctx, 1)
go staticResourceController.Run(ctx, 1)
go targetConfigReconciler.Run(ctx, 1)
go nodeKubeconfigController.Run(ctx, 1)
go configObserver.Run(ctx, 1)
go clusterOperatorStatus.Run(ctx, 1)
go certRotationController.Run(ctx, 1)
go encryptionControllers.Run(ctx, 1)
go featureUpgradeableController.Run(ctx, 1)
go certRotationTimeUpgradeableController.Run(ctx, 1)
go terminationObserver.Run(ctx, 1)
go eventWatcher.Run(ctx, 1)
go boundSATokenSignerController.Run(ctx, 1)
go staleConditionsController.Run(ctx, 1)
go connectivityCheckController.Run(ctx, 1)
<-ctx.Done()
return nil
}
// installerErrorInjector mutates the given installer pod to fail or OOM depending on the propability (
// - 0 <= unsupportedConfigOverrides.installerErrorInjection.failPropability <= 1.0: fail the pod (crash loop)
// - 0 <= unsupportedConfigOverrides.installerErrorInjection.oomPropability <= 1.0: cause OOM due to 1 MB memory limits
func installerErrorInjector(operatorClient v1helpers.StaticPodOperatorClient) func(pod *corev1.Pod, nodeName string, operatorSpec *operatorv1.StaticPodOperatorSpec, revision int32) error {
return func(pod *corev1.Pod, nodeName string, operatorSpec *operatorv1.StaticPodOperatorSpec, revision int32) error {
// get UnsupportedConfigOverrides
spec, _, _, err := operatorClient.GetOperatorState()
if err != nil {
klog.Warningf("failed to get operator/v1 spec for error injection: %v", err)
return nil // ignore error
}
if len(spec.UnsupportedConfigOverrides.Raw) == 0 {
return nil
}
var obj map[string]interface{}
if err := json.Unmarshal(spec.UnsupportedConfigOverrides.Raw, &obj); err != nil {
klog.Warningf("failed to unmarshal operator/v1 spec.unsupportedConfigOverrides for error injection: %v", err)
return nil
}
if failPropability, found, err := nestedFloat64OrInt(obj, "installerErrorInjection", "failPropability"); err == nil && found {
if rand.Float64() < failPropability {
pod.Spec.Containers[0].Command = []string{"false"}
}
}
if oomPropability, found, err := nestedFloat64OrInt(obj, "installerErrorInjection", "oomPropability"); err == nil && found {
if rand.Float64() < oomPropability {
twoMB := resource.NewQuantity(int64(2000000), resource.DecimalSI) // instead of 200M
for n := range pod.Spec.Containers[0].Resources.Limits {
if n == corev1.ResourceMemory {
pod.Spec.Containers[0].Resources.Limits[n] = *twoMB
}
}
for n := range pod.Spec.Containers[0].Resources.Requests {
if n == corev1.ResourceMemory {
pod.Spec.Containers[0].Resources.Requests[n] = *twoMB
}
}
}
}
return nil
}
}
func nestedFloat64OrInt(obj map[string]interface{}, fields ...string) (float64, bool, error) {
if x, found, err := unstructured.NestedFloat64(obj, fields...); err == nil && !found {
return 0.0, false, nil
} else if err == nil && found {
return x, found, err
}
x, found, err := unstructured.NestedInt64(obj, fields...)
return float64(x), found, err
}
// RevisionConfigMaps is a list of configmaps that are directly copied for the current values. A different actor/controller modifies these.
// the first element should be the configmap that contains the static pod manifest
var RevisionConfigMaps = []revision.RevisionResource{
{Name: "kube-apiserver-pod"},
{Name: "config"},
{Name: "kube-apiserver-cert-syncer-kubeconfig"},
{Name: "oauth-metadata", Optional: true},
{Name: "cloud-config", Optional: true},
// This configmap is managed by the operator, but ensuring a revision history
// supports signing key promotion. Promotion requires knowing whether the current
// public key is present in the configmap(s) associated with the current
// revision(s) of the master nodes.
{Name: "bound-sa-token-signing-certs"},
// these need to removed, but if we remove them now, the cluster will die because we don't reload them yet
{Name: "etcd-serving-ca"},
{Name: "kube-apiserver-server-ca", Optional: true},
{Name: "kubelet-serving-ca"},
{Name: "sa-token-signing-certs"},
{Name: "kube-apiserver-audit-policies"},
}
// RevisionSecrets is a list of secrets that are directly copied for the current values. A different actor/controller modifies these.
var RevisionSecrets = []revision.RevisionResource{
// these need to removed, but if we remove them now, the cluster will die because we don't reload them yet
{Name: "etcd-client"},
{Name: "kubelet-client"},
// etcd encryption
{Name: "encryption-config", Optional: true},
// this needs to be revisioned as certsyncer's kubeconfig isn't wired to be live reloaded, nor will be autorecovery
{Name: "localhost-recovery-serving-certkey"},
{Name: "localhost-recovery-client-token"},
{Name: "webhook-authenticator", Optional: true},
}
var CertConfigMaps = []installer.UnrevisionedResource{
{Name: "aggregator-client-ca"},
{Name: "client-ca"},
// this is a copy of trusted-ca-bundle CM without the injection annotations
{Name: "trusted-ca-bundle", Optional: true},
// kubeconfig that is a system:master. this ensures a stable location
{Name: "control-plane-node-kubeconfig"},
// kubeconfig for check-endpoints
{Name: "check-endpoints-kubeconfig"},
}
var CertSecrets = []installer.UnrevisionedResource{
{Name: "aggregator-client"},
{Name: "localhost-serving-cert-certkey"},
{Name: "service-network-serving-certkey"},
{Name: "external-loadbalancer-serving-certkey"},
{Name: "internal-loadbalancer-serving-certkey"},
{Name: "bound-service-account-signing-key"},
{Name: "control-plane-node-admin-client-cert-key"},
{Name: "check-endpoints-client-cert-key"},
{Name: "node-kubeconfigs"},
{Name: "user-serving-cert", Optional: true},
{Name: "user-serving-cert-000", Optional: true},
{Name: "user-serving-cert-001", Optional: true},
{Name: "user-serving-cert-002", Optional: true},
{Name: "user-serving-cert-003", Optional: true},
{Name: "user-serving-cert-004", Optional: true},
{Name: "user-serving-cert-005", Optional: true},
{Name: "user-serving-cert-006", Optional: true},
{Name: "user-serving-cert-007", Optional: true},
{Name: "user-serving-cert-008", Optional: true},
{Name: "user-serving-cert-009", Optional: true},
}
|
[
"\"IMAGE\"",
"\"OPERATOR_IMAGE\""
] |
[] |
[
"OPERATOR_IMAGE",
"IMAGE"
] |
[]
|
["OPERATOR_IMAGE", "IMAGE"]
|
go
| 2 | 0 | |
python_examples/dictionary.py
|
# Creating dictionary
student = {
"name": "Adarsh",
"id": 12,
"feedback": None
}
# Updating dictionary
student["name"] = "Mohit"
# Accessing the dictionary by key safely
print(student.get("last_name", "Unknown") == "Unknown")
# Get the list of keys
student.keys() == ["name", "id", "feedback"]
# Get the list of values
student.values() == ["Mohit", 12, None]
# Removing the key/value
del student["name"]
# KeyError
# student["notfound"]
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
gocialite.go
|
package gocialite
import (
"crypto/rand"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"strings"
"sync"
"time"
"github.com/Decem-Technology/gocialite/drivers"
"github.com/Decem-Technology/gocialite/structs"
"github.com/dgrijalva/jwt-go"
"github.com/s12v/go-jwks"
"golang.org/x/oauth2"
"gopkg.in/oleiade/reflections.v1"
)
var jwksAppleClient jwks.JWKSClient
func init() {
jwksSource := jwks.NewWebSource("https://appleid.apple.com/auth/keys")
jwksAppleClient = jwks.NewDefaultClient(
jwksSource,
time.Minute*10,
12*time.Hour,
)
}
// Dispatcher allows to safely issue concurrent Gocials
type Dispatcher struct {
mu sync.RWMutex
g map[string]*Gocial
gt *Gocial
}
// NewDispatcher creates new Dispatcher
func NewDispatcher() *Dispatcher {
return &Dispatcher{g: make(map[string]*Gocial), gt: &Gocial{}}
}
// New Gocial instance
func (d *Dispatcher) New() *Gocial {
d.mu.Lock()
defer d.mu.Unlock()
state := randToken()
g := &Gocial{state: state}
d.g[state] = g
return g
}
// Handle callback. Can be called only once for given state.
func (d *Dispatcher) Handle(state, code string) (*structs.User, *oauth2.Token, error) {
d.mu.RLock()
g, ok := d.g[state]
d.mu.RUnlock()
if !ok {
return nil, nil, fmt.Errorf("invalid CSRF token: %s", state)
}
err := g.Handle(state, code)
d.mu.Lock()
delete(d.g, state)
d.mu.Unlock()
return &g.User, g.Token, err
}
// HandleToken get user profiel
func (d *Dispatcher) HandleToken(provider string, token string) (*structs.User, error) {
if provider == "apple" {
user, err := d.gt.HandleAppleToken(token)
return user, err
}
user, err := d.gt.HandleToken(provider, token)
return user, err
}
// Gocial is the main struct of the package
type Gocial struct {
driver, state string
scopes []string
conf *oauth2.Config
User structs.User
Token *oauth2.Token
}
func init() {
drivers.InitializeDrivers(RegisterNewDriver)
}
var (
// Set the basic information such as the endpoint and the scopes URIs
apiMap = map[string]map[string]string{}
// Mapping to create a valid "user" struct from providers
userMap = map[string]map[string]string{}
// Map correct endpoints
endpointMap = map[string]oauth2.Endpoint{}
// Map custom callbacks
callbackMap = map[string]func(client *http.Client, u *structs.User){}
// Default scopes for each driver
defaultScopesMap = map[string][]string{}
)
//RegisterNewDriver adds a new driver to the existing set
func RegisterNewDriver(driver string, defaultscopes []string, callback func(client *http.Client, u *structs.User), endpoint oauth2.Endpoint, apimap, usermap map[string]string) {
apiMap[driver] = apimap
userMap[driver] = usermap
endpointMap[driver] = endpoint
callbackMap[driver] = callback
defaultScopesMap[driver] = defaultscopes
}
// Driver is needed to choose the correct social
func (g *Gocial) Driver(driver string) *Gocial {
g.driver = driver
g.scopes = defaultScopesMap[driver]
// BUG: sequential usage of single Gocial instance will have same CSRF token. This is serious security issue.
// NOTE: Dispatcher eliminates this bug.
if g.state == "" {
g.state = randToken()
}
return g
}
// Scopes is used to set the oAuth scopes, for example "user", "calendar"
func (g *Gocial) Scopes(scopes []string) *Gocial {
g.scopes = append(g.scopes, scopes...)
return g
}
// Redirect returns an URL for the selected social oAuth login
func (g *Gocial) Redirect(clientID, clientSecret, redirectURL string) (string, error) {
// Check if driver is valid
if !inSlice(g.driver, complexKeys(apiMap)) {
return "", fmt.Errorf("Driver not valid: %s", g.driver)
}
// Check if valid redirectURL
_, err := url.ParseRequestURI(redirectURL)
if err != nil {
return "", fmt.Errorf("Redirect URL <%s> not valid: %s", redirectURL, err.Error())
}
if !strings.HasPrefix(redirectURL, "http://") && !strings.HasPrefix(redirectURL, "https://") {
return "", fmt.Errorf("Redirect URL <%s> not valid: protocol not valid", redirectURL)
}
g.conf = &oauth2.Config{
ClientID: clientID,
ClientSecret: clientSecret,
RedirectURL: redirectURL,
Scopes: g.scopes,
Endpoint: endpointMap[g.driver],
}
return g.conf.AuthCodeURL(g.state), nil
}
// Handle callback from provider
func (g *Gocial) Handle(state, code string) error {
// Handle the exchange code to initiate a transport.
if g.state != state {
return fmt.Errorf("Invalid state: %s", state)
}
// Check if driver is valid
if !inSlice(g.driver, complexKeys(apiMap)) {
return fmt.Errorf("Driver not valid: %s", g.driver)
}
token, err := g.conf.Exchange(oauth2.NoContext, code)
if err != nil {
return fmt.Errorf("oAuth exchanged failed: %s", err.Error())
}
client := g.conf.Client(oauth2.NoContext, token)
// Set gocial token
g.Token = token
// Retrieve all from scopes
driverAPIMap := apiMap[g.driver]
driverUserMap := userMap[g.driver]
userEndpoint := strings.Replace(driverAPIMap["userEndpoint"], "%ACCESS_TOKEN", token.AccessToken, -1)
// Get user info
req, err := client.Get(driverAPIMap["endpoint"] + userEndpoint)
if err != nil {
return err
}
defer req.Body.Close()
res, _ := ioutil.ReadAll(req.Body)
data, err := jsonDecode(res)
if err != nil {
return fmt.Errorf("Error decoding JSON: %s", err.Error())
}
// Scan all fields and dispatch through the mapping
mapKeys := keys(driverUserMap)
gUser := structs.User{}
for k, f := range data {
if !inSlice(k, mapKeys) { // Skip if not in the mapping
continue
}
// Assign the value
// Dirty way, but we need to convert also int/float to string
_ = reflections.SetField(&gUser, driverUserMap[k], fmt.Sprint(f))
}
// Set the "raw" user interface
gUser.Raw = data
// Custom callback
callbackMap[g.driver](client, &gUser)
// Update the struct
g.User = gUser
return nil
}
// HandleToken get user profile from token
func (g *Gocial) HandleToken(provider string, token string) (*structs.User, error) {
// Retrieve all from scopes
if _, ok := apiMap[provider]; !ok {
return nil, errors.New("Provider not found")
}
driverAPIMap := apiMap[provider]
driverUserMap := userMap[provider]
userEndpoint := strings.Replace(driverAPIMap["userEndpoint"], "%ACCESS_TOKEN", token, -1)
// Get user info
g.User = structs.User{}
req, err := http.NewRequest("GET", driverAPIMap["endpoint"]+userEndpoint, nil) // , bytes.NewBuffer(jsonStr)
q := req.URL.Query() // Get a copy of the query values.
if provider == "google" {
q.Add("id_token", token)
} else if provider == "line" {
payload := strings.NewReader(fmt.Sprintf("client_id=%s&id_token=%s", os.Getenv("LINE_CLIENT_ID"), token))
req, err = http.NewRequest("POST", driverAPIMap["endpoint"]+userEndpoint, payload)
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
} else {
q.Add("access_token", token)
}
req.URL.RawQuery = q.Encode()
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= 200 && resp.StatusCode <= 299 {
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
data, err := jsonDecode(body)
if err, ok := data["error"]; ok {
errorDetail := err.(map[string]interface{})
if errorMessage, ok := errorDetail["message"]; ok {
return nil, errors.New(errorMessage.(string))
}
return nil, errors.New("Token is invalid")
}
if err != nil {
return nil, fmt.Errorf("Error decoding JSON: %s", err.Error())
}
// Scan all fields and dispatch through the mapping
mapKeys := keys(driverUserMap)
gUser := structs.User{}
for k, f := range data {
if !inSlice(k, mapKeys) { // Skip if not in the mapping
continue
}
// Assign the value
// Dirty way, but we need to convert also int/float to string
_ = reflections.SetField(&gUser, driverUserMap[k], fmt.Sprint(f))
}
// Set the "raw" user interface
gUser.Raw = data
// Update the struct
return &gUser, nil
} else {
return nil, errors.New("Token is invalid")
}
}
func (g *Gocial) HandleAppleToken(idToken string) (*structs.User, error) {
token, err := jwt.ParseWithClaims(idToken, &structs.CustomClaims{}, func(token *jwt.Token) (interface{}, error) {
jwk, err := jwksAppleClient.GetEncryptionKey(fmt.Sprintf("%v", token.Header["kid"]))
if err != nil {
return nil, err
}
return jwk.Key, nil
})
if err != nil {
return nil, err
}
if !token.Valid {
return nil, err
}
user := token.Claims.(*structs.CustomClaims)
if "https://appleid.apple.com" != user.Issuer {
return nil, errors.New("token is invalid")
}
if os.Getenv("APPLE_CLIENT_ID") != user.Audience {
err := errors.New("token is invalid")
return nil, err
}
u := structs.User{
ID: user.Subject,
Username: "",
FirstName: user.Name,
LastName: user.Lastname,
FullName: user.Name + " " + user.Lastname,
Email: user.Email,
// Avatar
// Raw
}
return &u, nil
}
// Generate a random token
func randToken() string {
b := make([]byte, 32)
rand.Read(b)
return base64.StdEncoding.EncodeToString(b)
}
// Check if a value is in a string slice
func inSlice(v string, s []string) bool {
for _, scope := range s {
if scope == v {
return true
}
}
return false
}
// Decode a json or return an error
func jsonDecode(js []byte) (map[string]interface{}, error) {
var decoded map[string]interface{}
decoder := json.NewDecoder(strings.NewReader(string(js)))
decoder.UseNumber()
if err := decoder.Decode(&decoded); err != nil {
return nil, err
}
return decoded, nil
}
// Return the keys of a map
func keys(m map[string]string) []string {
var keys []string
for k := range m {
keys = append(keys, k)
}
return keys
}
func complexKeys(m map[string]map[string]string) []string {
var keys []string
for k := range m {
keys = append(keys, k)
}
return keys
}
|
[
"\"LINE_CLIENT_ID\"",
"\"APPLE_CLIENT_ID\""
] |
[] |
[
"APPLE_CLIENT_ID",
"LINE_CLIENT_ID"
] |
[]
|
["APPLE_CLIENT_ID", "LINE_CLIENT_ID"]
|
go
| 2 | 0 | |
build-tools/src/linuxkit/vendor/github.com/scaleway/go-scaleway/cache/cache.go
|
// Copyright (C) 2018 Scaleway. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE.md file.
package cache
import (
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"github.com/moul/anonuuid"
"github.com/renstrom/fuzzysearch/fuzzy"
"github.com/scaleway/go-scaleway/types"
)
const (
// CacheRegion permits to access at the region field
CacheRegion = iota
// CacheArch permits to access at the arch field
CacheArch
// CacheOwner permits to access at the owner field
CacheOwner
// CacheTitle permits to access at the title field
CacheTitle
// CacheMarketPlaceUUID is used to determine the UUID of local images
CacheMarketPlaceUUID
// CacheMaxfield is used to determine the size of array
CacheMaxfield
)
// ScalewayCache is used not to query the API to resolve full identifiers
type ScalewayCache struct {
// Images contains names of Scaleway images indexed by identifier
Images map[string][CacheMaxfield]string `json:"images"`
// Snapshots contains names of Scaleway snapshots indexed by identifier
Snapshots map[string][CacheMaxfield]string `json:"snapshots"`
// Volumes contains names of Scaleway volumes indexed by identifier
Volumes map[string][CacheMaxfield]string `json:"volumes"`
// Bootscripts contains names of Scaleway bootscripts indexed by identifier
Bootscripts map[string][CacheMaxfield]string `json:"bootscripts"`
// Servers contains names of Scaleway servers indexed by identifier
Servers map[string][CacheMaxfield]string `json:"servers"`
// Path is the path to the cache file
Path string `json:"-"`
// Modified tells if the cache needs to be overwritten or not
Modified bool `json:"-"`
// Lock allows ScalewayCache to be used concurrently
Lock sync.Mutex `json:"-"`
hookSave func()
}
// NewScalewayCache loads a per-user cache
func NewScalewayCache(hookSave func()) (*ScalewayCache, error) {
var cache ScalewayCache
cache.hookSave = hookSave
homeDir := os.Getenv("HOME") // *nix
if homeDir == "" { // Windows
homeDir = os.Getenv("USERPROFILE")
}
if homeDir == "" {
homeDir = "/tmp"
}
cachePath := filepath.Join(homeDir, ".scw-cache.db")
cache.Path = cachePath
_, err := os.Stat(cachePath)
if os.IsNotExist(err) {
cache.Clear()
return &cache, nil
} else if err != nil {
return nil, err
}
file, err := ioutil.ReadFile(cachePath)
if err != nil {
return nil, err
}
err = json.Unmarshal(file, &cache)
if err != nil {
// fix compatibility with older version
if err = os.Remove(cachePath); err != nil {
return nil, err
}
cache.Clear()
return &cache, nil
}
if cache.Images == nil {
cache.Images = make(map[string][CacheMaxfield]string)
}
if cache.Snapshots == nil {
cache.Snapshots = make(map[string][CacheMaxfield]string)
}
if cache.Volumes == nil {
cache.Volumes = make(map[string][CacheMaxfield]string)
}
if cache.Servers == nil {
cache.Servers = make(map[string][CacheMaxfield]string)
}
if cache.Bootscripts == nil {
cache.Bootscripts = make(map[string][CacheMaxfield]string)
}
return &cache, nil
}
// Clear removes all information from the cache
func (c *ScalewayCache) Clear() {
c.Images = make(map[string][CacheMaxfield]string)
c.Snapshots = make(map[string][CacheMaxfield]string)
c.Volumes = make(map[string][CacheMaxfield]string)
c.Bootscripts = make(map[string][CacheMaxfield]string)
c.Servers = make(map[string][CacheMaxfield]string)
c.Modified = true
}
// Flush flushes the cache database
func (c *ScalewayCache) Flush() error {
return os.Remove(c.Path)
}
// Save atomically overwrites the current cache database
func (c *ScalewayCache) Save() error {
c.Lock.Lock()
defer c.Lock.Unlock()
c.hookSave()
if c.Modified {
file, err := ioutil.TempFile(filepath.Dir(c.Path), filepath.Base(c.Path))
if err != nil {
return err
}
if err := json.NewEncoder(file).Encode(c); err != nil {
file.Close()
os.Remove(file.Name())
return err
}
file.Close()
if err := os.Rename(file.Name(), c.Path); err != nil {
os.Remove(file.Name())
return err
}
}
return nil
}
// ComputeRankMatch fills `ScalewayResolverResult.RankMatch` with its `fuzzy` score
func ComputeRankMatch(s *types.ScalewayResolverResult, needle string) {
s.Needle = needle
s.RankMatch = fuzzy.RankMatch(needle, s.Name)
}
// LookUpImages attempts to return identifiers matching a pattern
func (c *ScalewayCache) LookUpImages(needle string, acceptUUID bool) (types.ScalewayResolverResults, error) {
c.Lock.Lock()
defer c.Lock.Unlock()
var res types.ScalewayResolverResults
var exactMatches types.ScalewayResolverResults
if acceptUUID && anonuuid.IsUUID(needle) == nil {
if fields, ok := c.Images[needle]; ok {
entry, err := types.NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierImage)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
res = append(res, entry)
}
}
needle = regexp.MustCompile(`^user/`).ReplaceAllString(needle, "")
// FIXME: if 'user/' is in needle, only watch for a user image
nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*"))
for identifier, fields := range c.Images {
if fields[CacheTitle] == needle {
entry, err := types.NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierImage)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
exactMatches = append(exactMatches, entry)
}
if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) {
entry, err := types.NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierImage)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
res = append(res, entry)
} else if strings.HasPrefix(fields[CacheMarketPlaceUUID], needle) || nameRegex.MatchString(fields[CacheMarketPlaceUUID]) {
entry, err := types.NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierImage)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
res = append(res, entry)
}
}
if len(exactMatches) == 1 {
return exactMatches, nil
}
return removeDuplicatesResults(res), nil
}
// LookUpSnapshots attempts to return identifiers matching a pattern
func (c *ScalewayCache) LookUpSnapshots(needle string, acceptUUID bool) (types.ScalewayResolverResults, error) {
c.Lock.Lock()
defer c.Lock.Unlock()
var res types.ScalewayResolverResults
var exactMatches types.ScalewayResolverResults
if acceptUUID && anonuuid.IsUUID(needle) == nil {
if fields, ok := c.Snapshots[needle]; ok {
entry, err := types.NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierSnapshot)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
res = append(res, entry)
}
}
needle = regexp.MustCompile(`^user/`).ReplaceAllString(needle, "")
nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*"))
for identifier, fields := range c.Snapshots {
if fields[CacheTitle] == needle {
entry, err := types.NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierSnapshot)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
exactMatches = append(exactMatches, entry)
}
if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) {
entry, err := types.NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierSnapshot)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
res = append(res, entry)
}
}
if len(exactMatches) == 1 {
return exactMatches, nil
}
return removeDuplicatesResults(res), nil
}
// LookUpVolumes attempts to return identifiers matching a pattern
func (c *ScalewayCache) LookUpVolumes(needle string, acceptUUID bool) (types.ScalewayResolverResults, error) {
c.Lock.Lock()
defer c.Lock.Unlock()
var res types.ScalewayResolverResults
var exactMatches types.ScalewayResolverResults
if acceptUUID && anonuuid.IsUUID(needle) == nil {
if fields, ok := c.Volumes[needle]; ok {
entry, err := types.NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierVolume)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
res = append(res, entry)
}
}
nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*"))
for identifier, fields := range c.Volumes {
if fields[CacheTitle] == needle {
entry, err := types.NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierVolume)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
exactMatches = append(exactMatches, entry)
}
if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) {
entry, err := types.NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierVolume)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
res = append(res, entry)
}
}
if len(exactMatches) == 1 {
return exactMatches, nil
}
return removeDuplicatesResults(res), nil
}
// LookUpBootscripts attempts to return identifiers matching a pattern
func (c *ScalewayCache) LookUpBootscripts(needle string, acceptUUID bool) (types.ScalewayResolverResults, error) {
c.Lock.Lock()
defer c.Lock.Unlock()
var res types.ScalewayResolverResults
var exactMatches types.ScalewayResolverResults
if acceptUUID && anonuuid.IsUUID(needle) == nil {
if fields, ok := c.Bootscripts[needle]; ok {
entry, err := types.NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierBootscript)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
res = append(res, entry)
}
}
nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*"))
for identifier, fields := range c.Bootscripts {
if fields[CacheTitle] == needle {
entry, err := types.NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierBootscript)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
exactMatches = append(exactMatches, entry)
}
if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) {
entry, err := types.NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierBootscript)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
res = append(res, entry)
}
}
if len(exactMatches) == 1 {
return exactMatches, nil
}
return removeDuplicatesResults(res), nil
}
// LookUpServers attempts to return identifiers matching a pattern
func (c *ScalewayCache) LookUpServers(needle string, acceptUUID bool) (types.ScalewayResolverResults, error) {
c.Lock.Lock()
defer c.Lock.Unlock()
var res types.ScalewayResolverResults
var exactMatches types.ScalewayResolverResults
if acceptUUID && anonuuid.IsUUID(needle) == nil {
if fields, ok := c.Servers[needle]; ok {
entry, err := types.NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierServer)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
res = append(res, entry)
}
}
nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*"))
for identifier, fields := range c.Servers {
if fields[CacheTitle] == needle {
entry, err := types.NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierServer)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
exactMatches = append(exactMatches, entry)
}
if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) {
entry, err := types.NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], fields[CacheRegion], types.IdentifierServer)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
res = append(res, entry)
}
}
if len(exactMatches) == 1 {
return exactMatches, nil
}
return removeDuplicatesResults(res), nil
}
// removeDuplicatesResults transforms an array into a unique array
func removeDuplicatesResults(elements types.ScalewayResolverResults) types.ScalewayResolverResults {
encountered := map[string]types.ScalewayResolverResult{}
// Create a map of all unique elements.
for v := range elements {
encountered[elements[v].Identifier] = elements[v]
}
// Place all keys from the map into a slice.
results := types.ScalewayResolverResults{}
for _, result := range encountered {
results = append(results, result)
}
return results
}
// LookUpIdentifiers attempts to return identifiers matching a pattern
func (c *ScalewayCache) LookUpIdentifiers(needle string) (types.ScalewayResolverResults, error) {
results := types.ScalewayResolverResults{}
identifierType, needle := types.ParseNeedle(needle)
if identifierType&(types.IdentifierUnknown|types.IdentifierServer) > 0 {
servers, err := c.LookUpServers(needle, false)
if err != nil {
return types.ScalewayResolverResults{}, err
}
for _, result := range servers {
entry, err := types.NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, types.IdentifierServer)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
results = append(results, entry)
}
}
if identifierType&(types.IdentifierUnknown|types.IdentifierImage) > 0 {
images, err := c.LookUpImages(needle, false)
if err != nil {
return types.ScalewayResolverResults{}, err
}
for _, result := range images {
entry, err := types.NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, types.IdentifierImage)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
results = append(results, entry)
}
}
if identifierType&(types.IdentifierUnknown|types.IdentifierSnapshot) > 0 {
snapshots, err := c.LookUpSnapshots(needle, false)
if err != nil {
return types.ScalewayResolverResults{}, err
}
for _, result := range snapshots {
entry, err := types.NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, types.IdentifierSnapshot)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
results = append(results, entry)
}
}
if identifierType&(types.IdentifierUnknown|types.IdentifierVolume) > 0 {
volumes, err := c.LookUpVolumes(needle, false)
if err != nil {
return types.ScalewayResolverResults{}, err
}
for _, result := range volumes {
entry, err := types.NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, types.IdentifierVolume)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
results = append(results, entry)
}
}
if identifierType&(types.IdentifierUnknown|types.IdentifierBootscript) > 0 {
bootscripts, err := c.LookUpBootscripts(needle, false)
if err != nil {
return types.ScalewayResolverResults{}, err
}
for _, result := range bootscripts {
entry, err := types.NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, result.Region, types.IdentifierBootscript)
if err != nil {
return types.ScalewayResolverResults{}, err
}
ComputeRankMatch(&entry, needle)
results = append(results, entry)
}
}
return results, nil
}
// InsertServer registers a server in the cache
func (c *ScalewayCache) InsertServer(identifier, region, arch, owner, name string) {
c.Lock.Lock()
defer c.Lock.Unlock()
fields, exists := c.Servers[identifier]
if !exists || fields[CacheTitle] != name {
c.Servers[identifier] = [CacheMaxfield]string{region, arch, owner, name}
c.Modified = true
}
}
// RemoveServer removes a server from the cache
func (c *ScalewayCache) RemoveServer(identifier string) {
c.Lock.Lock()
defer c.Lock.Unlock()
delete(c.Servers, identifier)
c.Modified = true
}
// ClearServers removes all servers from the cache
func (c *ScalewayCache) ClearServers() {
c.Lock.Lock()
defer c.Lock.Unlock()
c.Servers = make(map[string][CacheMaxfield]string)
c.Modified = true
}
// InsertImage registers an image in the cache
func (c *ScalewayCache) InsertImage(identifier, region, arch, owner, name, marketPlaceUUID string) {
c.Lock.Lock()
defer c.Lock.Unlock()
fields, exists := c.Images[identifier]
if !exists || fields[CacheTitle] != name {
c.Images[identifier] = [CacheMaxfield]string{region, arch, owner, name, marketPlaceUUID}
c.Modified = true
}
}
// RemoveImage removes a server from the cache
func (c *ScalewayCache) RemoveImage(identifier string) {
c.Lock.Lock()
defer c.Lock.Unlock()
delete(c.Images, identifier)
c.Modified = true
}
// ClearImages removes all images from the cache
func (c *ScalewayCache) ClearImages() {
c.Lock.Lock()
defer c.Lock.Unlock()
c.Images = make(map[string][CacheMaxfield]string)
c.Modified = true
}
// InsertSnapshot registers an snapshot in the cache
func (c *ScalewayCache) InsertSnapshot(identifier, region, arch, owner, name string) {
c.Lock.Lock()
defer c.Lock.Unlock()
fields, exists := c.Snapshots[identifier]
if !exists || fields[CacheTitle] != name {
c.Snapshots[identifier] = [CacheMaxfield]string{region, arch, owner, name}
c.Modified = true
}
}
// RemoveSnapshot removes a server from the cache
func (c *ScalewayCache) RemoveSnapshot(identifier string) {
c.Lock.Lock()
defer c.Lock.Unlock()
delete(c.Snapshots, identifier)
c.Modified = true
}
// ClearSnapshots removes all snapshots from the cache
func (c *ScalewayCache) ClearSnapshots() {
c.Lock.Lock()
defer c.Lock.Unlock()
c.Snapshots = make(map[string][CacheMaxfield]string)
c.Modified = true
}
// InsertVolume registers an volume in the cache
func (c *ScalewayCache) InsertVolume(identifier, region, arch, owner, name string) {
c.Lock.Lock()
defer c.Lock.Unlock()
fields, exists := c.Volumes[identifier]
if !exists || fields[CacheTitle] != name {
c.Volumes[identifier] = [CacheMaxfield]string{region, arch, owner, name}
c.Modified = true
}
}
// RemoveVolume removes a server from the cache
func (c *ScalewayCache) RemoveVolume(identifier string) {
c.Lock.Lock()
defer c.Lock.Unlock()
delete(c.Volumes, identifier)
c.Modified = true
}
// ClearVolumes removes all volumes from the cache
func (c *ScalewayCache) ClearVolumes() {
c.Lock.Lock()
defer c.Lock.Unlock()
c.Volumes = make(map[string][CacheMaxfield]string)
c.Modified = true
}
// InsertBootscript registers an bootscript in the cache
func (c *ScalewayCache) InsertBootscript(identifier, region, arch, owner, name string) {
c.Lock.Lock()
defer c.Lock.Unlock()
fields, exists := c.Bootscripts[identifier]
if !exists || fields[CacheTitle] != name {
c.Bootscripts[identifier] = [CacheMaxfield]string{region, arch, owner, name}
c.Modified = true
}
}
// RemoveBootscript removes a bootscript from the cache
func (c *ScalewayCache) RemoveBootscript(identifier string) {
c.Lock.Lock()
defer c.Lock.Unlock()
delete(c.Bootscripts, identifier)
c.Modified = true
}
// ClearBootscripts removes all bootscripts from the cache
func (c *ScalewayCache) ClearBootscripts() {
c.Lock.Lock()
defer c.Lock.Unlock()
c.Bootscripts = make(map[string][CacheMaxfield]string)
c.Modified = true
}
// GetNbServers returns the number of servers in the cache
func (c *ScalewayCache) GetNbServers() int {
c.Lock.Lock()
defer c.Lock.Unlock()
return len(c.Servers)
}
// GetNbImages returns the number of images in the cache
func (c *ScalewayCache) GetNbImages() int {
c.Lock.Lock()
defer c.Lock.Unlock()
return len(c.Images)
}
// GetNbSnapshots returns the number of snapshots in the cache
func (c *ScalewayCache) GetNbSnapshots() int {
c.Lock.Lock()
defer c.Lock.Unlock()
return len(c.Snapshots)
}
// GetNbVolumes returns the number of volumes in the cache
func (c *ScalewayCache) GetNbVolumes() int {
c.Lock.Lock()
defer c.Lock.Unlock()
return len(c.Volumes)
}
// GetNbBootscripts returns the number of bootscripts in the cache
func (c *ScalewayCache) GetNbBootscripts() int {
c.Lock.Lock()
defer c.Lock.Unlock()
return len(c.Bootscripts)
}
|
[
"\"HOME\"",
"\"USERPROFILE\""
] |
[] |
[
"HOME",
"USERPROFILE"
] |
[]
|
["HOME", "USERPROFILE"]
|
go
| 2 | 0 | |
aocpo_backend/aocpo/asgi.py
|
"""
ASGI config for aocpo project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'aocpo.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
efi_monitor/_cli.py
|
"""Define the command line iterface."""
import os
import glob
def _file_path():
"""Determine the file path."""
return os.environ.get("EFI_MONITOR_FILE_PATH", "/sys/firmware/efi/efivars/dump*")
def _files():
"""Find the dump_files."""
return glob.glob(_file_path())
def check():
"""Check for efi dump files."""
for a_file in _files():
print(a_file)
def clear():
"""Clear out efi dump files."""
for a_file in _files():
os.unlink(a_file)
|
[] |
[] |
[
"EFI_MONITOR_FILE_PATH"
] |
[]
|
["EFI_MONITOR_FILE_PATH"]
|
python
| 1 | 0 | |
vendor/github.com/elastic/beats/metricbeat/tests/system/test_zookeeper.py
|
import os
import metricbeat
import unittest
from nose.plugins.attrib import attr
ZK_FIELDS = metricbeat.COMMON_FIELDS + ["zookeeper"]
MNTR_FIELDS = ["version", "latency.avg", "latency.max",
"latency.min", "packets.received", "packets.sent",
"outstanding_requests", "server_state", "znode_count",
"watch_count", "ephemerals_count",
"approximate_data_size", "num_alive_connections"]
class ZooKeeperMntrTest(metricbeat.BaseTest):
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
@attr('integration')
def test_output(self):
"""
ZooKeeper mntr module outputs an event.
"""
self.render_config_template(modules=[{
"name": "zookeeper",
"metricsets": ["mntr"],
"hosts": self.get_hosts(),
"period": "5s"
}])
proc = self.start_beat()
self.wait_until(lambda: self.output_lines() > 0)
proc.check_kill_and_wait()
# Ensure no errors or warnings exist in the log.
log = self.get_log()
self.assertNotRegexpMatches(log, "ERR|WARN")
output = self.read_output_json()
self.assertEqual(len(output), 1)
evt = output[0]
self.assertItemsEqual(self.de_dot(ZK_FIELDS), evt.keys())
zk_mntr = evt["zookeeper"]["mntr"]
zk_mntr.pop("pending_syncs", None)
zk_mntr.pop("open_file_descriptor_count", None)
zk_mntr.pop("synced_followers", None)
zk_mntr.pop("max_file_descriptor_count", None)
zk_mntr.pop("followers", None)
self.assertItemsEqual(self.de_dot(MNTR_FIELDS), zk_mntr.keys())
self.assert_fields_are_documented(evt)
def get_hosts(self):
return [os.getenv('ZOOKEEPER_HOST', 'localhost') + ':' +
os.getenv('ZOOKEEPER_PORT', '2181')]
|
[] |
[] |
[
"ZOOKEEPER_HOST",
"ZOOKEEPER_PORT"
] |
[]
|
["ZOOKEEPER_HOST", "ZOOKEEPER_PORT"]
|
python
| 2 | 0 | |
sdk/conversation_test.go
|
package sdk
import (
"os"
"testing"
)
func TestRongCloud_ConversationMute(t *testing.T) {
conversation := NewRongCloud(
os.Getenv("RC_APP_ID"),
os.Getenv("RC_APP_SECRET"),
)
err := conversation.ConversationMute(
PRIVATE,
"u01",
"u02",
)
t.Log(err)
}
func TestRongCloud_ConversationUnmute(t *testing.T) {
conversation := NewRongCloud(
os.Getenv("RC_APP_ID"),
os.Getenv("RC_APP_SECRET"),
)
err := conversation.ConversationMute(
PRIVATE,
"u01",
"u02",
)
t.Log(err)
}
func TestRongCloud_ConversationGet(t *testing.T) {
conversation := NewRongCloud(
os.Getenv("RC_APP_ID"),
os.Getenv("RC_APP_SECRET"),
)
isMuted, err := conversation.ConversationGet(
PRIVATE,
"u01",
"u02")
t.Log(err)
t.Log(isMuted)
}
|
[
"\"RC_APP_ID\"",
"\"RC_APP_SECRET\"",
"\"RC_APP_ID\"",
"\"RC_APP_SECRET\"",
"\"RC_APP_ID\"",
"\"RC_APP_SECRET\""
] |
[] |
[
"RC_APP_ID",
"RC_APP_SECRET"
] |
[]
|
["RC_APP_ID", "RC_APP_SECRET"]
|
go
| 2 | 0 | |
Tests/test_PopGen_GenePop.py
|
# Copyright 2009 by Tiago Antao <[email protected]>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from __future__ import print_function
import os
import unittest
from Bio import MissingExternalDependencyError
from Bio.PopGen.GenePop.Controller import GenePopController
# Tests genepop related code. Note: this case requires genepop
# test_PopGen_GenePop_nodepend tests code that does not require genepop
found = False
for path in os.environ['PATH'].split(os.pathsep):
try:
for filename in os.listdir(path):
if filename.startswith('Genepop'):
found = True
except os.error:
pass # Path doesn't exist - correct to pass
if not found:
raise MissingExternalDependencyError(
"Install GenePop if you want to use Bio.PopGen.GenePop.")
class AppTest(unittest.TestCase):
"""Tests genepop execution via biopython.
"""
def test_allele_genotype_frequencies(self):
"""Test genepop execution on basic allele and genotype frequencies.
"""
ctrl = GenePopController()
pop_iter, locus_iter = ctrl.calc_allele_genotype_freqs("PopGen" + os.sep + "big.gen")
# print("%s %s" % (pop, loci))
# for popc in pop_iter:
# pop_name, loci_content = popc
# print(pop_name)
# for locus in loci_content:
# geno_list, hets, freq_fis = loci_content[locus]
# print(locus)
# print(hets)
# print(freq_fis)
# print(geno_list)
# print("")
def test_calc_diversities_fis_with_identity(self):
"""Test calculations of diversities ...
"""
ctrl = GenePopController()
iter, avg_fis, avg_Qintra = ctrl.calc_diversities_fis_with_identity(
"PopGen" + os.sep + "big.gen")
liter = list(iter)
assert len(liter) == 37
assert liter[0][0] == "Locus1"
assert len(avg_fis) == 10
assert len(avg_Qintra) == 10
def test_estimate_nm(self):
"""Test Nm estimation.
"""
ctrl = GenePopController()
mean_sample_size, mean_priv_alleles, mig10, mig25, mig50, mig_corrected =\
ctrl.estimate_nm("PopGen" + os.sep + "big.gen")
assert (mean_sample_size, mean_priv_alleles, mig10, mig25, mig50, mig_corrected) == \
(28.0, 0.016129, 52.5578, 15.3006, 8.94583, 13.6612)
def test_fst_all(self):
"""Test genepop execution on all fst.
"""
ctrl = GenePopController()
(allFis, allFst, allFit), itr = ctrl.calc_fst_all("PopGen" + os.sep + "c2line.gen")
results = list(itr)
assert (len(results) == 3)
assert (results[0][0] == "136255903")
assert (results[1][3] - 0.33 < 0.01)
def test_haploidy(self):
"""Test haploidy.
"""
ctrl = GenePopController()
(allFis, allFst, allFit), itr = ctrl.calc_fst_all("PopGen" + os.sep + "haplo.gen")
litr = list(itr)
assert not isinstance(allFst, int)
assert len(litr) == 37
assert litr[36][0] == "Locus37"
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
pkg/types/topic.go
|
package types
type Topic struct {
Name string
Partitions []Partition
}
type Partition struct {
ID int
Leader int
Replicas []int
Isrs []int // in-sync replicas
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
config/settings/base.py
|
import os
from pathlib import Path
from django.core.management import utils
import django_heroku
####~GENERAL SETTINGS~##################################################################################################
SITE_ID = 1
ALLOWED_HOSTS = ['0.0.0.0', 'volog.herokuapp.com']
BASE_DIR = Path(__file__).resolve().parent.parent.parent
SECRET_KEY = os.getenv('SECRET_KEY', utils.get_random_secret_key())
WSGI_APPLICATION = 'config.wsgi.application'
ROOT_URLCONF = 'config.urls'
####~LOCALITY & LANGUAGE~###############################################################################################
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#####~STATIC FILES SETTINGS~############################################################################################
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/app'
ACCOUNT_AUTHENTICATED_LOGIN_REDIRECTS = True
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'src', 'static'),
)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
#####~DJANGO APPS~######################################################################################################
INSTALLED_APPS = [
# Django Apps
'django.contrib.admin',
'django.contrib.auth',
'whitenoise.runserver_nostatic',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# REST
'rest_framework',
# Volog Apps
'api',
# authentication modules
'auth_backend.modules.common',
'auth_backend.modules.user',
'auth_backend.modules.superAdmin',
# allauth modules
'allauth',
'allauth.account',
'allauth.socialaccount',
# allauth google app
'allauth.socialaccount.providers.google',
]
####~MIDDLEWARE~########################################################################################################
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
"whitenoise.middleware.WhiteNoiseMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'auth_backend.modules.common.middleware.ProfileComplete',
]
#####~AUTH BACKEND SETTINGS~############################################################################################
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
AUTH_USER_MODEL = 'user.BaseVologUser'
#####~AUTH PASSWORD SETTINGS~###########################################################################################
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
#####~TEMPLATE DIRS~####################################################################################################
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'auth_backend', 'templates'), os.path.join(BASE_DIR, 'public')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
#####~DATABASE SETTINGS~################################################################################################
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
#####~REST SETTINGS~####################################################################################################
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 1000
}
####~LOGGING~###########################################################################################################
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
},
},
}
########################################################################################################################
django_heroku.settings(locals())
|
[] |
[] |
[
"SECRET_KEY",
"DJANGO_LOG_LEVEL"
] |
[]
|
["SECRET_KEY", "DJANGO_LOG_LEVEL"]
|
python
| 2 | 0 | |
bin/substrates.py
|
# substrates Tab
import os, math
from pathlib import Path
from shutil import copyfile
from ipywidgets import Layout, Label, Text, Checkbox, Button, BoundedIntText, HBox, VBox, Box, \
FloatText, Dropdown, interactive
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
from matplotlib.collections import LineCollection
from matplotlib.patches import Circle, Ellipse, Rectangle
from matplotlib.collections import PatchCollection
import matplotlib.colors as mplc
from collections import deque
import numpy as np
import scipy.io
import xml.etree.ElementTree as ET # https://docs.python.org/2/library/xml.etree.elementtree.html
import glob
import platform
import zipfile
from debug import debug_view
import warnings
hublib_flag = True
if platform.system() != 'Windows':
try:
# print("Trying to import hublib.ui")
from hublib.ui import Download
# from hublib.ui import Download,Upload
except:
hublib_flag = False
else:
hublib_flag = False
#warnings.warn(message, mplDeprecation, stacklevel=1)
warnings.filterwarnings("ignore")
class SubstrateTab(object):
def __init__(self, fury_tab):
self.fury_tab = fury_tab
self.output_dir = '.'
# self.output_dir = 'tmpdir'
self.figsize_width_substrate = 15.0 # allow extra for colormap
self.figsize_height_substrate = 12.5
self.figsize_width_svg = 12.0
self.figsize_height_svg = 12.0
# self.fig = plt.figure(figsize=(7.2,6)) # this strange figsize results in a ~square contour plot
self.first_time = True
self.modulo = 1
self.use_defaults = True
self.svg_delta_t = 1
self.substrate_delta_t = 1
self.svg_frame = 1
self.substrate_frame = 1
self.customized_output_freq = False
self.therapy_activation_time = 1000000
self.max_svg_frame_pre_therapy = 1000000
self.max_substrate_frame_pre_therapy = 1000000
self.svg_xmin = 0
# Probably don't want to hardwire these if we allow changing the domain size
# self.svg_xrange = 2000
# self.xmin = -1000.
# self.xmax = 1000.
# self.ymin = -1000.
# self.ymax = 1000.
# self.x_range = 2000.
# self.y_range = 2000.
self.show_nucleus = False
self.show_edge = True
# initial value
self.field_index = 4
# self.field_index = self.mcds_field.value + 4
self.skip_cb = False
# define dummy size of mesh (set in the tool's primary module)
self.numx = 0
self.numy = 0
self.title_str = ''
tab_height = '600px'
tab_height = '500px'
constWidth = '180px'
constWidth2 = '150px'
tab_layout = Layout(width='900px', # border='2px solid black',
height=tab_height, ) #overflow_y='scroll')
max_frames = 1
# self.mcds_plot = interactive(self.plot_substrate, frame=(0, max_frames), continuous_update=False)
# self.i_plot = interactive(self.plot_plots, frame=(0, max_frames), continuous_update=False)
self.i_plot = interactive(self.plot_substrate, frame=(0, max_frames), continuous_update=False)
# "plot_size" controls the size of the tab height, not the plot (rf. figsize for that)
# NOTE: the Substrates Plot tab has an extra row of widgets at the top of it (cf. Cell Plots tab)
svg_plot_size = '700px'
svg_plot_size = '600px'
svg_plot_size = '700px'
svg_plot_size = '900px'
self.i_plot.layout.width = svg_plot_size
self.i_plot.layout.height = svg_plot_size
self.fontsize = 20
# description='# cell frames',
self.max_frames = BoundedIntText(
min=0, max=99999, value=max_frames,
description='# frames',
layout=Layout(width='160px'),
)
self.max_frames.observe(self.update_max_frames)
# self.field_min_max = {'dummy': [0., 1., False]}
# NOTE: manually setting these for now (vs. parsing them out of data/initial.xml)
self.field_min_max = {'director signal':[0.,1.,False], 'cargo signal':[0.,1.,False] }
# hacky I know, but make a dict that's got (key,value) reversed from the dict in the Dropdown below
# self.field_dict = {0:'dummy'}
self.field_dict = {0:'director signal', 1:'cargo signal'}
self.mcds_field = Dropdown(
options={'director signal': 0, 'cargo signal':1},
disabled=True,
value=0,
# description='Field',
layout=Layout(width=constWidth)
)
# print("substrate __init__: self.mcds_field.value=",self.mcds_field.value)
# self.mcds_field.observe(self.mcds_field_cb)
self.mcds_field.observe(self.mcds_field_changed_cb)
self.field_cmap = Dropdown(
options=['viridis', 'jet', 'YlOrRd'],
value='YlOrRd',
disabled=True,
# description='Field',
layout=Layout(width=constWidth)
)
# self.field_cmap.observe(self.plot_substrate)
self.field_cmap.observe(self.mcds_field_cb)
self.cmap_fixed_toggle = Checkbox(
description='Fix',
disabled=True,
# layout=Layout(width=constWidth2),
)
self.cmap_fixed_toggle.observe(self.mcds_field_cb)
# def cmap_fixed_toggle_cb(b):
# # self.update()
# # self.field_min_max = {'oxygen': [0., 30.,True], 'glucose': [0., 1.,False]}
# field_name = self.field_dict[self.mcds_field.value]
# if (self.cmap_fixed_toggle.value):
# self.field_min_max[field_name][0] = self.cmap_min.value
# self.field_min_max[field_name][1] = self.cmap_max.value
# self.field_min_max[field_name][2] = True
# else:
# # self.field_min_max[field_name][0] = self.cmap_min.value
# # self.field_min_max[field_name][1] = self.cmap_max.value
# self.field_min_max[field_name][2] = False
# self.i_plot.update()
# self.cmap_fixed_toggle.observe(cmap_fixed_toggle_cb)
# self.save_min_max= Button(
# description='Save', #style={'description_width': 'initial'},
# button_style='success', # 'success', 'info', 'warning', 'danger' or ''
# tooltip='Save min/max for this substrate',
# disabled=True,
# layout=Layout(width='90px')
# )
# def save_min_max_cb(b):
# # field_name = self.mcds_field.options[]
# # field_name = next(key for key, value in self.mcds_field.options.items() if value == self.mcds_field.value)
# field_name = self.field_dict[self.mcds_field.value]
# # print(field_name)
# # self.field_min_max = {'oxygen': [0., 30.], 'glucose': [0., 1.], 'H+ ions': [0., 1.], 'ECM': [0., 1.], 'NP1': [0., 1.], 'NP2': [0., 1.]}
# self.field_min_max[field_name][0] = self.cmap_min.value
# self.field_min_max[field_name][1] = self.cmap_max.value
# # print(self.field_min_max)
# self.save_min_max.on_click(save_min_max_cb)
self.cmap_min = FloatText(
description='Min',
value=0,
step = 0.1,
disabled=True,
layout=Layout(width=constWidth2),
)
self.cmap_min.observe(self.mcds_field_cb)
self.cmap_max = FloatText(
description='Max',
value=38,
step = 0.1,
disabled=True,
layout=Layout(width=constWidth2),
)
self.cmap_max.observe(self.mcds_field_cb)
def cmap_fixed_toggle_cb(b):
field_name = self.field_dict[self.mcds_field.value]
# print(self.cmap_fixed_toggle.value)
if (self.cmap_fixed_toggle.value): # toggle on fixed range
self.cmap_min.disabled = False
self.cmap_max.disabled = False
self.field_min_max[field_name][0] = self.cmap_min.value
self.field_min_max[field_name][1] = self.cmap_max.value
self.field_min_max[field_name][2] = True
# self.save_min_max.disabled = False
else: # toggle off fixed range
self.cmap_min.disabled = True
self.cmap_max.disabled = True
self.field_min_max[field_name][2] = False
# self.save_min_max.disabled = True
# self.mcds_field_cb()
self.i_plot.update()
self.cmap_fixed_toggle.observe(cmap_fixed_toggle_cb)
field_cmap_row2 = HBox([self.field_cmap, self.cmap_fixed_toggle])
# field_cmap_row3 = HBox([self.save_min_max, self.cmap_min, self.cmap_max])
items_auto = [
# self.save_min_max, #layout=Layout(flex='3 1 auto', width='auto'),
self.cmap_min,
self.cmap_max,
]
box_layout = Layout(display='flex',
flex_flow='row',
align_items='stretch',
width='80%')
field_cmap_row3 = Box(children=items_auto, layout=box_layout)
# self.debug_str = Text(
# value='debug info',
# description='Debug:',
# disabled=True,
# layout=Layout(width='600px'), #constWidth = '180px'
# )
#---------------------
self.cell_nucleus_toggle = Checkbox(
description='nuclei',
disabled=False,
value = self.show_nucleus,
# layout=Layout(width=constWidth2),
)
def cell_nucleus_toggle_cb(b):
# self.update()
if (self.cell_nucleus_toggle.value):
self.show_nucleus = True
else:
self.show_nucleus = False
self.i_plot.update()
self.cell_nucleus_toggle.observe(cell_nucleus_toggle_cb)
#----
self.cell_edges_toggle = Checkbox(
description='edges',
disabled=False,
value=self.show_edge,
# layout=Layout(width=constWidth2),
)
def cell_edges_toggle_cb(b):
# self.update()
if (self.cell_edges_toggle.value):
self.show_edge = True
else:
self.show_edge = False
self.i_plot.update()
self.cell_edges_toggle.observe(cell_edges_toggle_cb)
self.cells_toggle = Checkbox(
description='Cells',
disabled=False,
value=True,
# layout=Layout(width=constWidth2),
)
def cells_toggle_cb(b):
# self.update()
self.i_plot.update()
if (self.cells_toggle.value):
self.cell_edges_toggle.disabled = False
self.cell_nucleus_toggle.disabled = False
else:
self.cell_edges_toggle.disabled = True
self.cell_nucleus_toggle.disabled = True
self.cells_toggle.observe(cells_toggle_cb)
#---------------------
self.substrates_toggle = Checkbox(
description='Substrates',
disabled=True,
value=False,
# layout=Layout(width=constWidth2),
)
def substrates_toggle_cb(b):
if (self.substrates_toggle.value): # seems bass-ackwards
self.cmap_fixed_toggle.disabled = False
self.cmap_min.disabled = False
self.cmap_max.disabled = False
self.mcds_field.disabled = False
self.field_cmap.disabled = False
else:
self.cmap_fixed_toggle.disabled = True
self.cmap_min.disabled = True
self.cmap_max.disabled = True
self.mcds_field.disabled = True
self.field_cmap.disabled = True
self.substrates_toggle.observe(substrates_toggle_cb)
self.grid_toggle = Checkbox(
description='grid',
disabled=False,
value=True,
# layout=Layout(width=constWidth2),
)
def grid_toggle_cb(b):
# self.update()
self.i_plot.update()
self.grid_toggle.observe(grid_toggle_cb)
# field_cmap_row3 = Box([self.save_min_max, self.cmap_min, self.cmap_max])
# mcds_tab = widgets.VBox([mcds_dir, mcds_plot, mcds_play], layout=tab_layout)
# mcds_params = VBox([self.mcds_field, field_cmap_row2, field_cmap_row3, self.max_frames]) # mcds_dir
# mcds_params = VBox([self.mcds_field, field_cmap_row2, field_cmap_row3,]) # mcds_dir
# self.tab = HBox([mcds_params, self.mcds_plot], layout=tab_layout)
help_label = Label('select slider: drag or left/right arrows')
# row1 = Box([help_label, Box( [self.max_frames, self.mcds_field, self.field_cmap], layout=Layout(border='0px solid black',
row1a = Box( [self.max_frames, self.mcds_field, self.field_cmap], layout=Layout(border='1px solid black',
width='50%',
height='',
align_items='stretch',
flex_direction='row',
display='flex'))
row1b = Box( [self.cells_toggle, self.cell_nucleus_toggle, self.cell_edges_toggle], layout=Layout(border='1px solid black',
width='50%',
height='',
align_items='stretch',
flex_direction='row',
display='flex'))
row1 = HBox( [row1a, Label('.....'), row1b])
row2a = Box([self.cmap_fixed_toggle, self.cmap_min, self.cmap_max], layout=Layout(border='1px solid black',
width='50%',
height='',
align_items='stretch',
flex_direction='row',
display='flex'))
# row2b = Box( [self.substrates_toggle, self.grid_toggle], layout=Layout(border='1px solid black',
row2b = Box( [self.substrates_toggle, ], layout=Layout(border='1px solid black',
width='50%',
height='',
align_items='stretch',
flex_direction='row',
display='flex'))
# row2 = HBox( [row2a, self.substrates_toggle, self.grid_toggle])
row2 = HBox( [row2a, Label('.....'), row2b])
if (hublib_flag):
self.fury_button= Button(
description="Send current frame's 3D data to Fury", #style={'description_width': 'initial'},
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click to send data to the Fury GPU server',
disabled=False,
layout=Layout(width='280px')
)
self.fury_feedback_str = Label(value='')
self.day13_button= Button(
description="Send day 13 data", #style={'description_width': 'initial'},
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click to send day 13 data to Fury',
disabled=False,
layout=Layout(width='180px')
)
self.day13_feedback_str = Label(value='')
self.fury_reset_button= Button(
description="Reset Fury", #style={'description_width': 'initial'},
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
disabled=False,
layout=Layout(width='180px')
)
def send_to_fury_cb(b):
self.fury_feedback_str.value = "working..."
session_dir = os.getenv('SESSIONDIR')
print('session_dir = ',session_dir)
session_id = os.getenv('SESSION')
print('session_id = ',session_id)
user_id = os.getenv('USER')
print('user_id = ',user_id)
fury_data_path_str = "/data/tools/shared/" + user_id + "/fury/" + session_id
# updated, based on email from Serge (1/19/21)
fury_data_path_str2 = "/srv/nanohub/data/tools/shared/" + user_id + "/fury/" + session_id
# dummy to test locally
# fury_data_path_str = "/tmp/" + user_id + "/fury"
print("fury_data_path_str = ",fury_data_path_str)
print("fury_data_path_str2 = ",fury_data_path_str2)
os.makedirs(fury_data_path_str, exist_ok=True)
# data_file = "output00000001_cells_physicell.mat"
# we need to copy 3(?) files (for any one frame)
mesh_file = "initial_mesh0.mat"
xml_file = "output%08d.xml" % self.svg_frame
data_file = "output%08d_cells_physicell.mat" % self.svg_frame
# from the app's root directory
# print("self.output_dir = ",self.output_dir)
# from_file = "tmpdir/" + data_file
from_file = self.output_dir + "/" + mesh_file
to_file = fury_data_path_str + "/" + mesh_file
copyfile(from_file, to_file)
from_file = self.output_dir + "/" + xml_file
to_file = fury_data_path_str + "/" + xml_file
copyfile(from_file, to_file)
from_file = self.output_dir + "/" + data_file
print("from: ",from_file)
to_file = fury_data_path_str + "/" + data_file
print("to: ",to_file)
copyfile(from_file, to_file)
# time.sleep(3)
file = Path(to_file)
while not file.exists():
time.sleep(2)
# copyfile("tmpdir/" + data_file, fury_data_path_str + "/" + "output00000001_cells_physicell.mat")
# Send signal to Fury that new data is ready: (folder, filename)
self.fury_tab.send_data(fury_data_path_str2, xml_file)
self.fury_feedback_str.value = ""
self.fury_button.on_click(send_to_fury_cb)
def send_day13_to_fury_cb(b):
self.day13_feedback_str.value = "working..."
session_dir = os.getenv('SESSIONDIR')
print('session_dir = ',session_dir)
session_id = os.getenv('SESSION')
print('session_id = ',session_id)
user_id = os.getenv('USER')
print('user_id = ',user_id)
fury_data_path_str = "/data/tools/shared/" + user_id + "/fury/" + session_id
# updated, based on email from Serge (1/19/21)
fury_data_path_str2 = "/srv/nanohub/data/tools/shared/" + user_id + "/fury/" + session_id
# dummy to test locally
# fury_data_path_str = "/tmp/" + user_id + "/fury"
print("fury_data_path_str = ",fury_data_path_str)
print("fury_data_path_str2 = ",fury_data_path_str2)
os.makedirs(fury_data_path_str, exist_ok=True)
# data_file = "output00000001_cells_physicell.mat"
# we need to copy 3(?) files (for any one frame)
mesh_file = "initial_mesh0.mat"
xml_file = "output%08d.xml" % 13
data_file = "output%08d_cells_physicell.mat" % 13
# from the app's root directory
# print("self.output_dir = ",self.output_dir)
# from_file = "tmpdir/" + data_file
# from_file = self.output_dir + "/" + mesh_file
from_file = "data/" + mesh_file
to_file = fury_data_path_str + "/" + mesh_file
copyfile(from_file, to_file)
# from_file = self.output_dir + "/" + xml_file
from_file = "data/" + xml_file
to_file = fury_data_path_str + "/" + xml_file
copyfile(from_file, to_file)
# from_file = self.output_dir + "/" + data_file
from_file = "data/" + data_file
print("from: ",from_file)
to_file = fury_data_path_str + "/" + data_file
print("to: ",to_file)
copyfile(from_file, to_file)
# time.sleep(3)
file = Path(to_file)
while not file.exists():
time.sleep(2)
# copyfile("tmpdir/" + data_file, fury_data_path_str + "/" + "output00000001_cells_physicell.mat")
# Send signal to Fury that new data is ready: (folder, filename)
self.fury_tab.send_data(fury_data_path_str2, xml_file)
self.day13_feedback_str.value = ""
self.day13_button.on_click(send_day13_to_fury_cb)
# self.fury_button.on_click(send_to_fury_cb)
fury_row = HBox([self.fury_button, self.fury_feedback_str])
day13_row = HBox([self.day13_button, self.day13_feedback_str])
#--------
def fury_reset_cb(b):
self.fury_tab.reset()
self.fury_reset_button.on_click(fury_reset_cb)
# self.fury_button = Button(description='random_seed', disabled=True, layout=name_button_layout)
# param_name1.style.button_color = 'lightgreen'
self.download_button = Download('mcds.zip', style='warning', icon='cloud-download',
tooltip='Download data', cb=self.download_cb)
self.download_svg_button = Download('svg.zip', style='warning', icon='cloud-download',
tooltip='You need to allow pop-ups in your browser', cb=self.download_svg_cb)
download_row = HBox([self.download_button.w, self.download_svg_button.w, Label("Download all cell plots (browser must allow pop-ups).")])
# https://hubzero.github.io/hublib/ui.html#file-upload
# self.upload_button = Upload('Single frame upload', style='warning', icon='cloud-upload',
# tooltip='Upload zipped data', maxsize='10M', cb=self.upload_cb)
# upload_row = HBox([self.upload_button.w, Label("Upload zipped data (single frame)")])
# box_layout = Layout(border='0px solid')
controls_box = VBox([row1, row2]) # ,width='50%', layout=box_layout)
# self.tab = VBox([controls_box, self.i_plot, fury_row, self.fury_reset_button, download_row, upload_row])
self.tab = VBox([controls_box, self.i_plot, fury_row, day13_row, self.fury_reset_button, download_row])
# self.tab = VBox([controls_box, self.debug_str, self.i_plot, download_row])
else: # no hublib
# self.tab = VBox([row1, row2])
self.tab = VBox([row1, row2, self.i_plot])
#---------------------------------------------------
def update_dropdown_fields(self, data_dir):
# print('update_dropdown_fields called --------')
self.output_dir = data_dir
tree = None
try:
fname = os.path.join(self.output_dir, "initial.xml")
tree = ET.parse(fname)
xml_root = tree.getroot()
except:
print("Cannot open ",fname," to read info, e.g., names of substrate fields.")
return
xml_root = tree.getroot()
self.field_min_max = {}
self.field_dict = {}
dropdown_options = {}
uep = xml_root.find('.//variables')
comment_str = ""
field_idx = 0
if (uep):
for elm in uep.findall('variable'):
# print("-----> ",elm.attrib['name'])
field_name = elm.attrib['name']
self.field_min_max[field_name] = [0., 1., False]
self.field_dict[field_idx] = field_name
dropdown_options[field_name] = field_idx
self.field_min_max[field_name][0] = 0
self.field_min_max[field_name][1] = 1
# self.field_min_max[field_name][0] = field_idx #rwh: helps debug
# self.field_min_max[field_name][1] = field_idx+1
self.field_min_max[field_name][2] = False
field_idx += 1
# constWidth = '180px'
# print('options=',dropdown_options)
# print(self.field_min_max) # debug
self.mcds_field.value = 0
self.mcds_field.options = dropdown_options
# self.mcds_field = Dropdown(
# # options={'oxygen': 0, 'glucose': 1},
# options=dropdown_options,
# value=0,
# # description='Field',
# layout=Layout(width=constWidth)
# )
# def update_max_frames_expected(self, value): # called when beginning an interactive Run
# self.max_frames.value = value # assumes naming scheme: "snapshot%08d.svg"
# self.mcds_plot.children[0].max = self.max_frames.value
#------------------------------------------------------------------------------
def update_params(self, config_tab, user_params_tab):
# xml_root.find(".//x_min").text = str(self.xmin.value)
# xml_root.find(".//x_max").text = str(self.xmax.value)
# xml_root.find(".//dx").text = str(self.xdelta.value)
# xml_root.find(".//y_min").text = str(self.ymin.value)
# xml_root.find(".//y_max").text = str(self.ymax.value)
# xml_root.find(".//dy").text = str(self.ydelta.value)
# xml_root.find(".//z_min").text = str(self.zmin.value)
# xml_root.find(".//z_max").text = str(self.zmax.value)
# xml_root.find(".//dz").text = str(self.zdelta.value)
self.xmin = config_tab.xmin.value
self.xmax = config_tab.xmax.value
self.x_range = self.xmax - self.xmin
self.svg_xrange = self.xmax - self.xmin
self.ymin = config_tab.ymin.value
self.ymax = config_tab.ymax.value
self.y_range = self.ymax - self.ymin
self.numx = math.ceil( (self.xmax - self.xmin) / config_tab.xdelta.value)
self.numy = math.ceil( (self.ymax - self.ymin) / config_tab.ydelta.value)
if (self.x_range > self.y_range):
ratio = self.y_range / self.x_range
self.figsize_width_substrate = 15.0 # allow extra for colormap
self.figsize_height_substrate = 12.5 * ratio
self.figsize_width_svg = 12.0
self.figsize_height_svg = 12.0 * ratio
else: # x < y
ratio = self.x_range / self.y_range
self.figsize_width_substrate = 15.0 * ratio
self.figsize_height_substrate = 12.5
self.figsize_width_svg = 12.0 * ratio
self.figsize_height_svg = 12.0
self.svg_flag = config_tab.toggle_svg.value
self.substrates_flag = config_tab.toggle_mcds.value
# print("substrates: update_params(): svg_flag, toggle=",self.svg_flag,config_tab.toggle_svg.value)
# print("substrates: update_params(): self.substrates_flag = ",self.substrates_flag)
self.svg_delta_t = config_tab.svg_interval.value
self.substrate_delta_t = config_tab.mcds_interval.value
self.modulo = int(self.substrate_delta_t / self.svg_delta_t)
# print("substrates: update_params(): modulo=",self.modulo)
if self.customized_output_freq:
# self.therapy_activation_time = user_params_tab.therapy_activation_time.value # NOTE: edit for user param name
# print("substrates: update_params(): therapy_activation_time=",self.therapy_activation_time)
self.max_svg_frame_pre_therapy = int(self.therapy_activation_time/self.svg_delta_t)
self.max_substrate_frame_pre_therapy = int(self.therapy_activation_time/self.substrate_delta_t)
#------------------------------------------------------------------------------
# def update(self, rdir):
# Called from driver module (e.g., pc4*.py) (among other places?)
def update(self, rdir=''):
# with debug_view:
# print("substrates: update rdir=", rdir)
# print("substrates: update rdir=", rdir)
if rdir:
self.output_dir = rdir
# print('update(): self.output_dir = ', self.output_dir)
if self.first_time:
# if True:
self.first_time = False
full_xml_filename = Path(os.path.join(self.output_dir, 'config.xml'))
# print("substrates: update(), config.xml = ",full_xml_filename)
# self.num_svgs = len(glob.glob(os.path.join(self.output_dir, 'snap*.svg')))
# self.num_substrates = len(glob.glob(os.path.join(self.output_dir, 'output*.xml')))
# print("substrates: num_svgs,num_substrates =",self.num_svgs,self.num_substrates)
# argh - no! If no files created, then denom = -1
# self.modulo = int((self.num_svgs - 1) / (self.num_substrates - 1))
# print("substrates: update(): modulo=",self.modulo)
if full_xml_filename.is_file():
tree = ET.parse(str(full_xml_filename)) # this file cannot be overwritten; part of tool distro
xml_root = tree.getroot()
self.svg_delta_t = float(xml_root.find(".//SVG//interval").text)
self.substrate_delta_t = float(xml_root.find(".//full_data//interval").text)
# print("substrates: svg,substrate delta_t values=",self.svg_delta_t,self.substrate_delta_t)
self.modulo = int(self.substrate_delta_t / self.svg_delta_t)
# print("substrates: update(): modulo=",self.modulo)
# all_files = sorted(glob.glob(os.path.join(self.output_dir, 'output*.xml'))) # if the substrates/MCDS
all_files = sorted(glob.glob(os.path.join(self.output_dir, 'snap*.svg'))) # if .svg
if len(all_files) > 0:
last_file = all_files[-1]
self.max_frames.value = int(last_file[-12:-4]) # assumes naming scheme: "snapshot%08d.svg"
else:
substrate_files = sorted(glob.glob(os.path.join(self.output_dir, 'output*.xml')))
if len(substrate_files) > 0:
last_file = substrate_files[-1]
self.max_frames.value = int(last_file[-12:-4])
def download_svg_cb(self):
file_str = os.path.join(self.output_dir, '*.svg')
# print('zip up all ',file_str)
with zipfile.ZipFile('svg.zip', 'w') as myzip:
for f in glob.glob(file_str):
myzip.write(f, os.path.basename(f)) # 2nd arg avoids full filename path in the archive
def download_cb(self):
file_xml = os.path.join(self.output_dir, '*.xml')
file_mat = os.path.join(self.output_dir, '*.mat')
# print('zip up all ',file_str)
with zipfile.ZipFile('mcds.zip', 'w') as myzip:
for f in glob.glob(file_xml):
myzip.write(f, os.path.basename(f)) # 2nd arg avoids full filename path in the archive
for f in glob.glob(file_mat):
myzip.write(f, os.path.basename(f))
def upload_cb(self,w,name):
# file_xml = os.path.join(self.output_dir, '*.xml')
# file_mat = os.path.join(self.output_dir, '*.mat')
# print('zip up all ',file_str)
print("%s downloaded" % name)
w.reset()
# with zipfile.ZipFile('mcds.zip', 'w') as myzip:
# for f in glob.glob(file_xml):
# myzip.write(f, os.path.basename(f)) # 2nd arg avoids full filename path in the archive
# for f in glob.glob(file_mat):
# myzip.write(f, os.path.basename(f))
def update_max_frames(self,_b):
self.i_plot.children[0].max = self.max_frames.value
# called if user selected different substrate in dropdown
def mcds_field_changed_cb(self, b):
# print("mcds_field_changed_cb: self.mcds_field.value=",self.mcds_field.value)
if (self.mcds_field.value == None):
return
self.field_index = self.mcds_field.value + 4
field_name = self.field_dict[self.mcds_field.value]
# print('mcds_field_changed_cb: field_name='+ field_name)
# print(self.field_min_max[field_name])
# self.debug_str.value = 'mcds_field_changed_cb: '+ field_name + str(self.field_min_max[field_name])
# self.debug_str.value = 'cb1: '+ str(self.field_min_max)
# BEWARE of these triggering the mcds_field_cb() callback! Hence, the "skip_cb"
self.skip_cb = True
self.cmap_min.value = self.field_min_max[field_name][0]
self.cmap_max.value = self.field_min_max[field_name][1]
self.cmap_fixed_toggle.value = bool(self.field_min_max[field_name][2])
self.skip_cb = False
self.i_plot.update()
# called if user provided different min/max values for colormap, or a different colormap
def mcds_field_cb(self, b):
if self.skip_cb:
return
self.field_index = self.mcds_field.value + 4
field_name = self.field_dict[self.mcds_field.value]
# print('mcds_field_cb: field_name='+ field_name)
# print('mcds_field_cb: '+ field_name)
self.field_min_max[field_name][0] = self.cmap_min.value
self.field_min_max[field_name][1] = self.cmap_max.value
self.field_min_max[field_name][2] = self.cmap_fixed_toggle.value
# print(self.field_min_max[field_name])
# self.debug_str.value = 'mcds_field_cb: ' + field_name + str(self.field_min_max[field_name])
# self.debug_str.value = 'cb2: '+ str(self.field_min_max)
# print('--- cb2: '+ str(self.field_min_max)) #rwh2
# self.cmap_fixed_toggle.value = self.field_min_max[field_name][2]
# field_name = self.mcds_field.options[self.mcds_field.value]
# self.cmap_min.value = self.field_min_max[field_name][0] # oxygen, etc
# self.cmap_max.value = self.field_min_max[field_name][1] # oxygen, etc
# self.field_index = self.mcds_field.value + 4
# print('field_index=',self.field_index)
self.i_plot.update()
#---------------------------------------------------------------------------
def circles(self, x, y, s, c='b', vmin=None, vmax=None, **kwargs):
"""
See https://gist.github.com/syrte/592a062c562cd2a98a83
Make a scatter plot of circles.
Similar to plt.scatter, but the size of circles are in data scale.
Parameters
----------
x, y : scalar or array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, )
Radius of circles.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs.
Note that `c` should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values
to be colormapped. (If you insist, use `color` instead.)
`c` can be a 2-D array in which the rows are RGB or RGBA, however.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used.
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Examples
--------
a = np.arange(11)
circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')
plt.colorbar()
License
--------
This code is under [The BSD 3-Clause License]
(http://opensource.org/licenses/BSD-3-Clause)
"""
if np.isscalar(c):
kwargs.setdefault('color', c)
c = None
if 'fc' in kwargs:
kwargs.setdefault('facecolor', kwargs.pop('fc'))
if 'ec' in kwargs:
kwargs.setdefault('edgecolor', kwargs.pop('ec'))
if 'ls' in kwargs:
kwargs.setdefault('linestyle', kwargs.pop('ls'))
if 'lw' in kwargs:
kwargs.setdefault('linewidth', kwargs.pop('lw'))
# You can set `facecolor` with an array for each patch,
# while you can only set `facecolors` with a value for all.
zipped = np.broadcast(x, y, s)
patches = [Circle((x_, y_), s_)
for x_, y_, s_ in zipped]
collection = PatchCollection(patches, **kwargs)
if c is not None:
c = np.broadcast_to(c, zipped.shape).ravel()
collection.set_array(c)
collection.set_clim(vmin, vmax)
ax = plt.gca()
ax.add_collection(collection)
ax.autoscale_view()
# plt.draw_if_interactive()
if c is not None:
plt.sci(collection)
# return collection
#------------------------------------------------------------
# def plot_svg(self, frame, rdel=''):
def plot_svg(self, frame):
# global current_idx, axes_max
global current_frame
current_frame = frame
fname = "snapshot%08d.svg" % frame
full_fname = os.path.join(self.output_dir, fname)
# with debug_view:
# print("plot_svg:", full_fname)
# print("-- plot_svg:", full_fname)
if not os.path.isfile(full_fname):
print("Once output files are generated, click the slider.")
return
xlist = deque()
ylist = deque()
rlist = deque()
rgb_list = deque()
# print('\n---- ' + fname + ':')
# tree = ET.parse(fname)
tree = ET.parse(full_fname)
root = tree.getroot()
# print('--- root.tag ---')
# print(root.tag)
# print('--- root.attrib ---')
# print(root.attrib)
# print('--- child.tag, child.attrib ---')
numChildren = 0
for child in root:
# print(child.tag, child.attrib)
# print("keys=",child.attrib.keys())
if self.use_defaults and ('width' in child.attrib.keys()):
self.axes_max = float(child.attrib['width'])
# print("debug> found width --> axes_max =", axes_max)
if child.text and "Current time" in child.text:
svals = child.text.split()
# remove the ".00" on minutes
self.title_str += " cells: " + svals[2] + "d, " + svals[4] + "h, " + svals[7][:-3] + "m"
# self.cell_time_mins = int(svals[2])*1440 + int(svals[4])*60 + int(svals[7][:-3])
# self.title_str += " cells: " + str(self.cell_time_mins) + "m" # rwh
# print("width ",child.attrib['width'])
# print('attrib=',child.attrib)
# if (child.attrib['id'] == 'tissue'):
if ('id' in child.attrib.keys()):
# print('-------- found tissue!!')
tissue_parent = child
break
# print('------ search tissue')
cells_parent = None
for child in tissue_parent:
# print('attrib=',child.attrib)
if (child.attrib['id'] == 'cells'):
# print('-------- found cells, setting cells_parent')
cells_parent = child
break
numChildren += 1
num_cells = 0
# print('------ search cells')
for child in cells_parent:
# print(child.tag, child.attrib)
# print('attrib=',child.attrib)
for circle in child: # two circles in each child: outer + nucleus
# circle.attrib={'cx': '1085.59','cy': '1225.24','fill': 'rgb(159,159,96)','r': '6.67717','stroke': 'rgb(159,159,96)','stroke-width': '0.5'}
# print(' --- cx,cy=',circle.attrib['cx'],circle.attrib['cy'])
xval = float(circle.attrib['cx'])
# map SVG coords into comp domain
# xval = (xval-self.svg_xmin)/self.svg_xrange * self.x_range + self.xmin
xval = xval/self.x_range * self.x_range + self.xmin
s = circle.attrib['fill']
# print("s=",s)
# print("type(s)=",type(s))
if (s[0:3] == "rgb"): # if an rgb string, e.g. "rgb(175,175,80)"
rgb = list(map(int, s[4:-1].split(",")))
rgb[:] = [x / 255. for x in rgb]
else: # otherwise, must be a color name
rgb_tuple = mplc.to_rgb(mplc.cnames[s]) # a tuple
rgb = [x for x in rgb_tuple]
# test for bogus x,y locations (rwh TODO: use max of domain?)
too_large_val = 10000.
if (np.fabs(xval) > too_large_val):
print("bogus xval=", xval)
break
yval = float(circle.attrib['cy'])
# yval = (yval - self.svg_xmin)/self.svg_xrange * self.y_range + self.ymin
yval = yval/self.y_range * self.y_range + self.ymin
if (np.fabs(yval) > too_large_val):
print("bogus xval=", xval)
break
rval = float(circle.attrib['r'])
# if (rgb[0] > rgb[1]):
# print(num_cells,rgb, rval)
xlist.append(xval)
ylist.append(yval)
rlist.append(rval)
rgb_list.append(rgb)
# For .svg files with cells that *have* a nucleus, there will be a 2nd
if (not self.show_nucleus):
#if (not self.show_nucleus):
break
num_cells += 1
# if num_cells > 3: # for debugging
# print(fname,': num_cells= ',num_cells," --- debug exit.")
# sys.exit(1)
# break
# print(fname,': num_cells= ',num_cells)
xvals = np.array(xlist)
yvals = np.array(ylist)
rvals = np.array(rlist)
rgbs = np.array(rgb_list)
# print("xvals[0:5]=",xvals[0:5])
# print("rvals[0:5]=",rvals[0:5])
# print("rvals.min, max=",rvals.min(),rvals.max())
# rwh - is this where I change size of render window?? (YES - yipeee!)
# plt.figure(figsize=(6, 6))
# plt.cla()
# if (self.substrates_toggle.value):
self.title_str += " (" + str(num_cells) + " agents)"
# title_str = " (" + str(num_cells) + " agents)"
# else:
# mins= round(int(float(root.find(".//current_time").text))) # TODO: check units = mins
# hrs = int(mins/60)
# days = int(hrs/24)
# title_str = '%dd, %dh, %dm' % (int(days),(hrs%24), mins - (hrs*60))
plt.title(self.title_str)
plt.xlim(self.xmin, self.xmax)
plt.ylim(self.ymin, self.ymax)
# plt.xlim(axes_min,axes_max)
# plt.ylim(axes_min,axes_max)
# plt.scatter(xvals,yvals, s=rvals*scale_radius, c=rgbs)
# TODO: make figsize a function of plot_size? What about non-square plots?
# self.fig = plt.figure(figsize=(9, 9))
# axx = plt.axes([0, 0.05, 0.9, 0.9]) # left, bottom, width, height
# axx = fig.gca()
# print('fig.dpi=',fig.dpi) # = 72
# im = ax.imshow(f.reshape(100,100), interpolation='nearest', cmap=cmap, extent=[0,20, 0,20])
# ax.xlim(axes_min,axes_max)
# ax.ylim(axes_min,axes_max)
# convert radii to radii in pixels
# ax2 = self.fig.gca()
# N = len(xvals)
# rr_pix = (ax2.transData.transform(np.vstack([rvals, rvals]).T) -
# ax2.transData.transform(np.vstack([np.zeros(N), np.zeros(N)]).T))
# rpix, _ = rr_pix.T
# markers_size = (144. * rpix / self.fig.dpi)**2 # = (2*rpix / fig.dpi * 72)**2
# markers_size = markers_size/4000000.
# print('max=',markers_size.max())
#rwh - temp fix - Ah, error only occurs when "edges" is toggled on
if (self.show_edge):
try:
# plt.scatter(xvals,yvals, s=markers_size, c=rgbs, edgecolor='black', linewidth=0.5)
self.circles(xvals,yvals, s=rvals, color=rgbs, edgecolor='black', linewidth=0.5)
# cell_circles = self.circles(xvals,yvals, s=rvals, color=rgbs, edgecolor='black', linewidth=0.5)
# plt.sci(cell_circles)
except (ValueError):
pass
else:
# plt.scatter(xvals,yvals, s=markers_size, c=rgbs)
self.circles(xvals,yvals, s=rvals, color=rgbs)
# if (self.show_tracks):
# for key in self.trackd.keys():
# xtracks = self.trackd[key][:,0]
# ytracks = self.trackd[key][:,1]
# plt.plot(xtracks[0:frame],ytracks[0:frame], linewidth=5)
# plt.xlim(self.axes_min, self.axes_max)
# plt.ylim(self.axes_min, self.axes_max)
# ax.grid(False)
# axx.set_title(title_str)
# plt.title(title_str)
#---------------------------------------------------------------------------
# assume "frame" is cell frame #, unless Cells is togggled off, then it's the substrate frame #
# def plot_substrate(self, frame, grid):
def plot_substrate(self, frame):
# global current_idx, axes_max, gFileId, field_index
# print("plot_substrate(): frame*self.substrate_delta_t = ",frame*self.substrate_delta_t)
# print("plot_substrate(): frame*self.svg_delta_t = ",frame*self.svg_delta_t)
self.title_str = ''
# Recall:
# self.svg_delta_t = config_tab.svg_interval.value
# self.substrate_delta_t = config_tab.mcds_interval.value
# self.modulo = int(self.substrate_delta_t / self.svg_delta_t)
# self.therapy_activation_time = user_params_tab.therapy_activation_time.value
# print("plot_substrate(): pre_therapy: max svg, substrate frames = ",max_svg_frame_pre_therapy, max_substrate_frame_pre_therapy)
# Assume: # .svg files >= # substrate files
# if (self.cells_toggle.value):
# if (self.substrates_toggle.value and frame*self.substrate_delta_t <= self.svg_frame*self.svg_delta_t):
# if (self.substrates_toggle.value and (frame % self.modulo == 0)):
if (self.substrates_toggle.value):
# self.fig = plt.figure(figsize=(14, 15.6))
# self.fig = plt.figure(figsize=(15.0, 12.5))
self.fig = plt.figure(figsize=(self.figsize_width_substrate, self.figsize_height_substrate))
# rwh - funky way to figure out substrate frame for pc4cancerbots (due to user-defined "save_interval*")
# self.cell_time_mins
# self.substrate_frame = int(frame / self.modulo)
if (self.customized_output_freq and (frame > self.max_svg_frame_pre_therapy)):
# max_svg_frame_pre_therapy = int(self.therapy_activation_time/self.svg_delta_t)
# max_substrate_frame_pre_therapy = int(self.therapy_activation_time/self.substrate_delta_t)
self.substrate_frame = self.max_substrate_frame_pre_therapy + (frame - self.max_svg_frame_pre_therapy)
else:
self.substrate_frame = int(frame / self.modulo)
# print("plot_substrate(): self.substrate_frame=",self.substrate_frame)
# if (self.substrate_frame > (self.num_substrates-1)):
# self.substrate_frame = self.num_substrates-1
# print('self.substrate_frame = ',self.substrate_frame)
# if (self.cells_toggle.value):
# self.modulo = int((self.num_svgs - 1) / (self.num_substrates - 1))
# self.substrate_frame = frame % self.modulo
# else:
# self.substrate_frame = frame
fname = "output%08d_microenvironment0.mat" % self.substrate_frame
xml_fname = "output%08d.xml" % self.substrate_frame
# fullname = output_dir_str + fname
# fullname = fname
full_fname = os.path.join(self.output_dir, fname)
# print("--- plot_substrate(): full_fname=",full_fname)
full_xml_fname = os.path.join(self.output_dir, xml_fname)
# self.output_dir = '.'
# if not os.path.isfile(fullname):
if not os.path.isfile(full_fname):
print("Once output files are generated, click the slider.") # No: output00000000_microenvironment0.mat
return
# tree = ET.parse(xml_fname)
tree = ET.parse(full_xml_fname)
xml_root = tree.getroot()
mins = round(int(float(xml_root.find(".//current_time").text))) # TODO: check units = mins
self.substrate_mins= round(int(float(xml_root.find(".//current_time").text))) # TODO: check units = mins
hrs = int(mins/60)
days = int(hrs/24)
self.title_str = 'substrate: %dd, %dh, %dm' % (int(days),(hrs%24), mins - (hrs*60))
# self.title_str = 'substrate: %dm' % (mins ) # rwh
info_dict = {}
# scipy.io.loadmat(fullname, info_dict)
scipy.io.loadmat(full_fname, info_dict)
M = info_dict['multiscale_microenvironment']
# global_field_index = int(mcds_field.value)
# print('plot_substrate: field_index =',field_index)
f = M[self.field_index, :] # 4=tumor cells field, 5=blood vessel density, 6=growth substrate
# plt.clf()
# my_plot = plt.imshow(f.reshape(400,400), cmap='jet', extent=[0,20, 0,20])
# self.fig = plt.figure(figsize=(18.0,15)) # this strange figsize results in a ~square contour plot
# plt.subplot(grid[0:1, 0:1])
# main_ax = self.fig.add_subplot(grid[0:1, 0:1]) # works, but tiny upper-left region
#main_ax = self.fig.add_subplot(grid[0:2, 0:2])
# main_ax = self.fig.add_subplot(grid[0:, 0:2])
#main_ax = self.fig.add_subplot(grid[:-1, 0:]) # nrows, ncols
#main_ax = self.fig.add_subplot(grid[0:, 0:]) # nrows, ncols
#main_ax = self.fig.add_subplot(grid[0:4, 0:]) # nrows, ncols
# main_ax = self.fig.add_subplot(grid[0:3, 0:]) # nrows, ncols
# main_ax = self.fig.add_subplot(111) # nrows, ncols
# plt.rc('font', size=10) # TODO: does this affect the Cell plots fonts too? YES. Not what we want.
# fig.set_tight_layout(True)
# ax = plt.axes([0, 0.05, 0.9, 0.9 ]) #left, bottom, width, height
# ax = plt.axes([0, 0.0, 1, 1 ])
# cmap = plt.cm.viridis # Blues, YlOrBr, ...
# im = ax.imshow(f.reshape(100,100), interpolation='nearest', cmap=cmap, extent=[0,20, 0,20])
# ax.grid(False)
# print("substrates.py: ------- numx, numy = ", self.numx, self.numy )
# if (self.numx == 0): # need to parse vals from the config.xml
# # print("--- plot_substrate(): full_fname=",full_fname)
# fname = os.path.join(self.output_dir, "config.xml")
# tree = ET.parse(fname)
# xml_root = tree.getroot()
# self.xmin = float(xml_root.find(".//x_min").text)
# self.xmax = float(xml_root.find(".//x_max").text)
# dx = float(xml_root.find(".//dx").text)
# self.ymin = float(xml_root.find(".//y_min").text)
# self.ymax = float(xml_root.find(".//y_max").text)
# dy = float(xml_root.find(".//dy").text)
# self.numx = math.ceil( (self.xmax - self.xmin) / dx)
# self.numy = math.ceil( (self.ymax - self.ymin) / dy)
try:
xgrid = M[0, :].reshape(self.numy, self.numx)
ygrid = M[1, :].reshape(self.numy, self.numx)
except:
print("substrates.py: mismatched mesh size for reshape: numx,numy=",self.numx, self.numy)
pass
# xgrid = M[0, :].reshape(self.numy, self.numx)
# ygrid = M[1, :].reshape(self.numy, self.numx)
num_contours = 15
levels = MaxNLocator(nbins=num_contours).tick_values(self.cmap_min.value, self.cmap_max.value)
contour_ok = True
if (self.cmap_fixed_toggle.value):
try:
# substrate_plot = main_ax.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy, self.numx), levels=levels, extend='both', cmap=self.field_cmap.value, fontsize=self.fontsize)
substrate_plot = plt.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy, self.numx), levels=levels, extend='both', cmap=self.field_cmap.value, fontsize=self.fontsize)
except:
contour_ok = False
# print('got error on contourf 1.')
else:
try:
# substrate_plot = main_ax.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy,self.numx), num_contours, cmap=self.field_cmap.value)
substrate_plot = plt.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy,self.numx), num_contours, cmap=self.field_cmap.value)
except:
contour_ok = False
# print('got error on contourf 2.')
if (contour_ok):
# main_ax.set_title(self.title_str, fontsize=self.fontsize)
plt.title(self.title_str, fontsize=self.fontsize)
# main_ax.tick_params(labelsize=self.fontsize)
# cbar = plt.colorbar(my_plot)
# cbar = self.fig.colorbar(substrate_plot, ax=main_ax)
cbar = self.fig.colorbar(substrate_plot)
cbar.ax.tick_params(labelsize=self.fontsize)
# cbar = main_ax.colorbar(my_plot)
# cbar.ax.tick_params(labelsize=self.fontsize)
# axes_min = 0
# axes_max = 2000
# main_ax.set_xlim([self.xmin, self.xmax])
# main_ax.set_ylim([self.ymin, self.ymax])
plt.xlim(self.xmin, self.xmax)
plt.ylim(self.ymin, self.ymax)
# if (frame == 0): # maybe allow substrate grid display later
# xs = np.linspace(self.xmin,self.xmax,self.numx)
# ys = np.linspace(self.ymin,self.ymax,self.numy)
# hlines = np.column_stack(np.broadcast_arrays(xs[0], ys, xs[-1], ys))
# vlines = np.column_stack(np.broadcast_arrays(xs, ys[0], xs, ys[-1]))
# grid_lines = np.concatenate([hlines, vlines]).reshape(-1, 2, 2)
# line_collection = LineCollection(grid_lines, color="gray", linewidths=0.5)
# # ax = main_ax.gca()
# main_ax.add_collection(line_collection)
# # ax.set_xlim(xs[0], xs[-1])
# # ax.set_ylim(ys[0], ys[-1])
# Now plot the cells (possibly on top of the substrate)
if (self.cells_toggle.value):
if (not self.substrates_toggle.value):
# self.fig = plt.figure(figsize=(12, 12))
self.fig = plt.figure(figsize=(self.figsize_width_svg, self.figsize_height_svg))
# self.plot_svg(frame)
self.svg_frame = frame
# print('plot_svg with frame=',self.svg_frame)
self.plot_svg(self.svg_frame)
# plt.subplot(grid[2, 0])
# oxy_ax = self.fig.add_subplot(grid[2:, 0:1])
#oxy_ax = self.fig.add_subplot(grid[:2, 2:])
#oxy_ax = self.fig.add_subplot(grid[:-1, 0:2]) # nrows, ncols
#oxy_ax = self.fig.add_subplot(grid[2:3, 0:1]) # nrows, ncols
# oxy_ax = self.fig.add_subplot(grid[4:4, 0:1]) # invalid
# main_ax = self.fig.add_subplot(grid[0:1, 0:1])
# experiment with small plot of oxygen (or whatever)
# oxy_ax = self.fig.add_subplot(grid[3:4, 0:1]) # nrows, ncols
# x = np.linspace(0, 500)
# oxy_ax.plot(x, 300*np.sin(x))
#---------------------------------------------------------------------------
# def plot_plots(self, frame):
# # if (self.first_time):
# # self.svg_delta_t = 1
# # self.substrate_delta_t = 1
# # self.first_time = False
# if (self.substrates_toggle.value):
# self.fig = plt.figure(figsize=(14, 15.6))
# else: # only cells being displayed (maybe)
# self.fig = plt.figure(figsize=(12, 12))
# # grid = plt.GridSpec(4, 3, wspace=0.10, hspace=0.2) # (nrows, ncols)
# # self.plot_substrate(frame, grid)
# self.plot_substrate(frame)
# # self.plot_svg(frame)
|
[] |
[] |
[
"USER",
"SESSIONDIR",
"SESSION"
] |
[]
|
["USER", "SESSIONDIR", "SESSION"]
|
python
| 3 | 0 | |
src/hebphonics/controllers/hebphonics.py
|
#!/usr/bin/env python
# coding: utf-8
"""Controllers for HebPhonics."""
# native
import os
from inspect import cleandoc
# lib
from flask import jsonify, render_template, request
from sqlalchemy.sql import and_, or_
from sqlalchemy.sql.expression import desc, false, func
from markdown import markdown
# pkg
from .. import app, tokens as T, rules, __version__, __pubdate__
from ..grammar import ItemList, Cluster, Parser
from ..models import Book, Word, Freq
from .jsend import JSend
VERSION = __version__ if not __pubdate__ else f"{__version__} ({__pubdate__})"
GOOGLE_ANALYTICS_ID = os.getenv("GOOGLE_ANALYTICS_ID")
def get_color(count):
"""Simple color scale."""
color = ""
if count == 0:
color = "is-danger"
elif count < 20:
color = "is-warning"
elif count < 300:
color = "is-primary"
elif count < 1000:
color = "is-success"
else:
color = "is-black"
return color
@app.route("/")
def index():
"""Home page."""
return render_template(
"index.html",
books=[b.name for b in Book.query.order_by(Book.id).all()],
symbols=[x for x in list(T.SYMBOLS) if x not in ["dagesh", "holam"]],
rules=rules.RULES,
version=VERSION,
GOOGLE_ANALYTICS_ID=GOOGLE_ANALYTICS_ID,
)
@app.route("/rules")
def list_rules():
"""Get rules list from the database."""
all_rules = {}
all_symbols = {}
see_also = rules.__doc__[rules.__doc__.find("See also:") + 10 :]
for rule, fn in rules.RULES.items():
count = (
Word.query.filter(Word.rules.like(f"%'{rule}'%"))
.with_entities(func.count())
.scalar()
)
parts = cleandoc(fn.__doc__ or "").split("\n")
stmt = f"- **Rule**: {parts[0]}"
rest = (
"\n".join(parts[1:])
.replace("Example:", "- **Example**:")
.replace("Examples:", "- **Examples**:")
.replace("Requires:", "- **Requires**:")
.replace("See also:", "- **See also**:")
.replace("Source:", "- **Source**:")
.replace("Sources:", "- **Sources**:")
)
doc = markdown(f"{rest}\n\n{stmt}\n\n{see_also}")
all_rules[rule] = dict(doc=doc, count=f"{count:,}", color=get_color(count))
for symbol in T.SYMBOLS:
count = (
Word.query.filter(Word.parsed.like(f"%'{symbol}'%"))
.with_entities(func.count())
.scalar()
)
all_symbols[symbol] = dict(count=f"{count:,}", color=get_color(count))
return render_template(
"rules.html",
rules=all_rules,
symbols=all_symbols,
version=VERSION,
GOOGLE_ANALYTICS_ID=GOOGLE_ANALYTICS_ID,
)
@app.route("/words", methods=["GET", "POST"])
def list_word():
"""Search database."""
args = request.json
query = search(**args)
words = [{"h": w.hebrew, "f": w.freq, "r": w.ref} for w in query.all()]
sql = str(query.statement.compile(compile_kwargs={"literal_binds": True}))
return jsonify(JSend(data=words, query=sql))
def _block_letter(base, bclasses, btool, full, fclasses, ftool, dtool, rules_=""):
if rules_:
rules_ = f"[{rules_}]"
tool = "\n".join([x for x in [btool, dtool, ftool, rules_] if x]).strip()
if tool:
tool = f' data-tooltip="{tool}"'
return f"""
<span class="letter">
<span class="base {' '.join(bclasses)} has-tooltip-left"{tool}>{base}</span>
<span class="full {' '.join(fclasses)}">{full}</span>
</span>"""
@app.route("/display", methods=["POST"])
def display_word():
"""Typographical hints for words."""
word = request.json["word"]
query = Word.query.filter(Word.hebrew == word).first()
if query:
syls = ItemList([ItemList([Cluster(**t) for t in s]) for s in query.syls])
else:
parser = Parser()
parsed = parser.parse(word)
syls = parser.syl(parsed)
result = ""
for num, syl in enumerate(syls):
if num > 0:
result += _block_letter(" | ", ["syllable"], "", "", ["syllable"], "", "")
for sym in syl:
lett = f"{T.SYMBOLS.get(sym.letter, '')}{T.SYMBOLS.get(sym.dagesh, '')}"
vow = T.SYMBOLS.get(sym.vowel, "")
if sym.vowel in [T.NAME_HOLAM_MALE_VAV, T.NAME_SHURUQ]:
vow = ""
lett_show = [T.POINT_HOLAM, T.LETTER_FINAL_KAF]
lett_hide = [T.LETTER_AYIN]
result += _block_letter(
lett,
[f"letter-{sym.letter}", sym.dagesh],
sym.letter,
f"{lett if (lett in lett_show or vow in lett_show) and not (lett in lett_hide or vow in lett_hide) else ' '}{vow}",
[f"vowel-{sym.vowel}"],
sym.vowel,
sym.dagesh,
", ".join(sym.rules),
)
if sym.vowel in [T.NAME_HOLAM_MALE_VAV, T.NAME_SHURUQ]:
lett = T.SYMBOLS.get("vav", "")
vow = T.SYMBOLS.get(sym.vowel, "")
result += _block_letter(
lett,
["letter-vav"],
"",
vow,
[f"vowel-{sym.vowel}"],
sym.vowel,
sym.dagesh,
", ".join(sym.rules),
)
return jsonify(
JSend(
display=f'<div class="hebrew" dir="rtl">{result}</div>',
syllables=str([x.flat() for x in syls]),
rules=str(syls.rules.flat()),
)
)
def _list(key: str, vals: dict) -> list:
"""Get a key from a dictionary of values and ensure it is a list."""
result = vals.get(key, [])
if not isinstance(result, list):
result = [result]
return result
def search(limit=1000, shemot=False, **kwargs):
"""Return a list of Words that match the search criteria.
Kwargs:
books (list): names of the books to search
shemot (bool): if True, allow shemot to be displayed (default: False)
limit (int): maximum number of results (default: 1000)
Character Filters:
- find_any (list): at least one of these characters must appear
- find_all (list): all of these characters must appear
- find_none (list): none of these character must apepar
- find_seq (list): all of these characters in this relative order must
appear in each word
- find_pat (list): all of these characters in this precise order must
appear in each word
Integer Filters:
- gematria (list[int]): only include words equal to these values
- syllen (list[int]): only include words with these syllable lengths
- frequency (list[int]): only include words with these frequencies
(0=rare, 5=extremely common)
Returns:
list<Word>. Words that match the criteria.
"""
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
query = Word.query.join(Freq)
if not shemot:
query = query.filter(Word.shemot == false())
# Books
books_all = _list("books_all", kwargs)
if books_all:
query = query.join(Book).filter(Book.name.in_(books_all))
# Symbols
find_any = _list("find_any", kwargs)
find_all = _list("find_all", kwargs)
find_none = _list("find_none", kwargs)
find_seq = _list("find_seq", kwargs)
find_pat = _list("find_pat", kwargs)
if find_any:
condition = [Word.parsed.like(f"%'{letter}'%") for letter in find_any]
query = query.filter(or_(*condition))
if find_all:
condition = [Word.parsed.like(f"%'{letter}'%") for letter in find_all]
query = query.filter(and_(*condition))
if find_none:
condition = [~Word.parsed.like(f"%'{letter}'%") for letter in find_none]
query = query.filter(and_(*condition))
if find_seq:
quoted = [f"'{x}'" for x in find_seq]
condition = f"%{'%'.join(quoted)}%"
query = query.filter(Word.parsed.like(condition))
if find_pat:
quoted = [f"'{x}'" for x in find_pat]
condition = ", ".join(quoted)
query = query.filter(Word.parsed.like(f"%{condition}%"))
# Rules
rule_any = _list("rule_any", kwargs)
rule_all = _list("rule_all", kwargs)
rule_none = _list("rule_none", kwargs)
if rule_any:
condition = [Word.rules.like(f"%'{rule}'%") for rule in rule_any]
query = query.filter(or_(*condition))
if rule_all:
condition = [Word.rules.like(f"%'{rule}'%") for rule in rule_all]
query = query.filter(and_(*condition))
if rule_none:
condition = [~Word.rules.like(f"%'{rule}'%") for rule in rule_none]
query = query.filter(and_(*condition))
# Filters
gematria = _list("gematria", kwargs)
syllen = _list("syllen", kwargs)
freq = _list("freq", kwargs)
if gematria:
query = query.filter(Word.gematria.in_(gematria))
if syllen:
query = query.filter(Word.syllen.in_(syllen))
freq_col = func.sum(Freq.freq).label("freq")
if freq:
condition = [freq_col.between(5 ** n, 5 ** (n + 1)) for n in freq]
query = query.having(or_(*condition))
# Order
order = kwargs.get("order", "alpha")
if order == "random":
query = query.order_by(func.random())
elif order == "freq":
query = query.order_by(desc("freq"), Freq.book_id, Word.id)
elif order == "alpha":
query = query.order_by(Word.hebrew, Freq.book_id, Word.id)
elif order == "source":
query = query.order_by(Freq.book_id, Word.id)
query = query.add_columns(
# Word.id, Word.hebrew, Word.parsed, Word.rules, freq_col, Freq.ref
Word.id,
Word.hebrew,
freq_col,
Freq.ref,
).group_by(Word.hebrew)
# Limits
if limit:
query = query.limit(limit)
return query
|
[] |
[] |
[
"GOOGLE_ANALYTICS_ID"
] |
[]
|
["GOOGLE_ANALYTICS_ID"]
|
python
| 1 | 0 | |
openstack/auth_env.go
|
package openstack
import (
"os"
"github.com/chnsz/golangsdk"
)
var nilOptions = golangsdk.AuthOptions{}
/*
AuthOptionsFromEnv fills out an identity.AuthOptions structure with the
settings found on the various OpenStack OS_* environment variables.
The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME,
OS_PASSWORD, OS_TENANT_ID, and OS_TENANT_NAME.
Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings,
or an error will result. OS_TENANT_ID, OS_TENANT_NAME, OS_PROJECT_ID, and
OS_PROJECT_NAME are optional.
OS_TENANT_ID and OS_TENANT_NAME are mutually exclusive to OS_PROJECT_ID and
OS_PROJECT_NAME. If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will
still be referred as "tenant" in Gophercloud.
To use this function, first set the OS_* environment variables (for example,
by sourcing an `openrc` file), then:
opts, err := openstack.AuthOptionsFromEnv()
provider, err := openstack.AuthenticatedClient(opts)
*/
func AuthOptionsFromEnv() (golangsdk.AuthOptions, error) {
authURL := os.Getenv("OS_AUTH_URL")
username := os.Getenv("OS_USERNAME")
userID := os.Getenv("OS_USERID")
password := os.Getenv("OS_PASSWORD")
tenantID := os.Getenv("OS_TENANT_ID")
tenantName := os.Getenv("OS_TENANT_NAME")
domainID := os.Getenv("OS_DOMAIN_ID")
domainName := os.Getenv("OS_DOMAIN_NAME")
// If OS_PROJECT_ID is set, overwrite tenantID with the value.
if v := os.Getenv("OS_PROJECT_ID"); v != "" {
tenantID = v
}
// If OS_PROJECT_NAME is set, overwrite tenantName with the value.
if v := os.Getenv("OS_PROJECT_NAME"); v != "" {
tenantName = v
}
if authURL == "" {
err := golangsdk.ErrMissingInput{Argument: "authURL"}
return nilOptions, err
}
if username == "" && userID == "" {
err := golangsdk.ErrMissingInput{Argument: "username"}
return nilOptions, err
}
if password == "" {
err := golangsdk.ErrMissingInput{Argument: "password"}
return nilOptions, err
}
ao := golangsdk.AuthOptions{
IdentityEndpoint: authURL,
UserID: userID,
Username: username,
Password: password,
TenantID: tenantID,
TenantName: tenantName,
DomainID: domainID,
DomainName: domainName,
}
return ao, nil
}
|
[
"\"OS_AUTH_URL\"",
"\"OS_USERNAME\"",
"\"OS_USERID\"",
"\"OS_PASSWORD\"",
"\"OS_TENANT_ID\"",
"\"OS_TENANT_NAME\"",
"\"OS_DOMAIN_ID\"",
"\"OS_DOMAIN_NAME\"",
"\"OS_PROJECT_ID\"",
"\"OS_PROJECT_NAME\""
] |
[] |
[
"OS_PROJECT_NAME",
"OS_PROJECT_ID",
"OS_AUTH_URL",
"OS_PASSWORD",
"OS_TENANT_ID",
"OS_USERNAME",
"OS_TENANT_NAME",
"OS_DOMAIN_ID",
"OS_DOMAIN_NAME",
"OS_USERID"
] |
[]
|
["OS_PROJECT_NAME", "OS_PROJECT_ID", "OS_AUTH_URL", "OS_PASSWORD", "OS_TENANT_ID", "OS_USERNAME", "OS_TENANT_NAME", "OS_DOMAIN_ID", "OS_DOMAIN_NAME", "OS_USERID"]
|
go
| 10 | 0 | |
platform/Stepik/Algorithms/14-Longest-non-strictly-increasing-subsequence/longest_non_strictly_decreasing_subsequence.go
|
package main
import (
"bufio"
"fmt"
"math/bits"
"os"
"sort"
)
const (
maxInt int = (1<<bits.UintSize)/2 - 1
minInt int = (1 << bits.UintSize) / -2
)
func longestNonStrictlyDecreasingSubsequence(arr []int) []int {
n := len(arr)
dp := make([]int, n+1)
position := make([]int, n+1)
previous := make([]int, n)
dp[0] = maxInt
for i := 1; i <= n; i++ {
dp[i] = minInt
}
position[0] = -1
length := 0
for i, num := range arr {
j := sort.Search(n, func(i int) bool { return num > dp[i] })
if num >= dp[j] {
dp[j] = num
position[j] = i
previous[i] = position[j-1]
if j > length {
length = j
}
}
}
result := make([]int, length)
for i, j := position[length], length-1; i != -1; i = previous[i] {
result[j] = i + 1
j--
}
return result
}
func main() {
stdin, err := os.Open(os.Getenv("INPUT_PATH"))
if err != nil {
stdin = os.Stdin
}
defer stdin.Close()
stdout, err := os.Create(os.Getenv("OUTPUT_PATH"))
if err != nil {
stdout = os.Stdout
}
defer stdout.Close()
reader := bufio.NewReaderSize(stdin, 1024*1024)
writer := bufio.NewWriterSize(stdout, 1024*1024)
var n int
_, err = fmt.Fscanln(reader, &n)
checkError(err)
arr := make([]int, n)
for i := 0; i < n; i++ {
_, err = fmt.Fscan(reader, &arr[i])
checkError(err)
}
result := longestNonStrictlyDecreasingSubsequence(arr)
fmt.Fprintln(writer, len(result))
for i := range result {
fmt.Fprintf(writer, "%d ", result[i])
}
writer.Flush()
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
|
[
"\"INPUT_PATH\"",
"\"OUTPUT_PATH\""
] |
[] |
[
"INPUT_PATH",
"OUTPUT_PATH"
] |
[]
|
["INPUT_PATH", "OUTPUT_PATH"]
|
go
| 2 | 0 | |
examples/imagenet_d/main.py
|
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from map_files import *
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
#new
parser.add_argument('--use-train-statistics', action='store_true')
def use_train_statistics(module):
if isinstance(module, nn.BatchNorm2d):
module.train()
best_acc1 = 0
def main():
args = parser.parse_args()
print(args)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
mapping_vector, _, _ = create_symlinks_and_get_imagenet_visda_mapping(args.data, map_dict)
valdir = './visda_symlinks/' + args.data.split('/')[-2] + '/'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
print('Number of classes: ', len(val_dataset.classes))
print('Number of images: ', len(val_loader.dataset))
if args.evaluate:
validate(val_loader, model, criterion, args, mapping_vector)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def validate(val_loader, model, criterion, args, mapping_vector):
batch_time = AverageMeter('Time', ':6.3f')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
if args.use_train_statistics:
model.apply(use_train_statistics)
nr_of_accepted_images = 0
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
# measure accuracy
# images are mapped in the accuracy function
acc = accuracy(output, target, topk=(1, 5), mapping_vector=mapping_vector)
top1.update(acc[0].item(), images.size(0))
top5.update(acc[1].item(), images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,), mapping_vector=[]):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
# map imagenet predictions for the top5 labels to visda classes
pred_label_visda = torch.zeros(pred.shape)
if torch.cuda.is_available():
pred_label_visda = torch.zeros(pred.shape).cuda()
for k in range(maxk):
pred_label_visda[k] = map_imagenet_class_to_visda_class(pred[k], mapping_vector)
correct = pred_label_visda.eq(target.view(1, -1).expand_as(pred_label_visda))
res = []
for k in topk:
correct_k, _ = correct[:k].float().max(dim=0)
correct_k = correct_k.sum()
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
[] |
[] |
[
"RANK",
"WORLD_SIZE"
] |
[]
|
["RANK", "WORLD_SIZE"]
|
python
| 2 | 0 | |
webapp/scripts/environment.py
|
import os,sys
proj_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.environ["DJANGO_SETTINGS_MODULE"] = "zooniverse.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zooniverse.settings")
sys.path.append(proj_path)
os.chdir(proj_path)
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
web/server.py
|
"""This module contains basic functions to instantiate the BigchainDB API.
The application is implemented in Flask and runs using Gunicorn.
"""
import os
import copy
import multiprocessing
import gunicorn.app.base
from flask import Flask
from flask_cors import CORS
from web.views.info import info_views
from web.views.users import user_views
from web.views.manifestations import manifestation_views
from web.views.rights import right_views
from web.views.works import work_views
class StandaloneApplication(gunicorn.app.base.BaseApplication):
"""Run a **wsgi** app wrapping it in a Gunicorn Base Application.
Adapted from:
- http://docs.gunicorn.org/en/latest/custom.html
"""
def __init__(self, app, options=None):
'''Initialize a new standalone application.
Args:
app: A wsgi Python application.
options (dict): the configuration.
'''
self.options = options or {}
self.application = app
super(StandaloneApplication, self).__init__()
def load_config(self):
config = dict((key, value) for key, value in self.options.items()
if key in self.cfg.settings and value is not None)
for key, value in config.items():
# not sure if we need the `key.lower` here, will just keep
# keep it for now.
self.cfg.set(key.lower(), value)
def load(self):
return self.application
def create_app(settings):
"""Return an instance of the Flask application.
Args:
debug (bool): a flag to activate the debug mode for the app
(default: False).
"""
app = Flask(__name__)
if not settings.get('cors_protection', True):
CORS(app)
app.debug = settings.get('debug', False)
app.register_blueprint(info_views, url_prefix='/api/v1')
app.register_blueprint(user_views, url_prefix='/api/v1')
app.register_blueprint(manifestation_views, url_prefix='/api/v1')
app.register_blueprint(right_views, url_prefix='/api/v1')
app.register_blueprint(work_views, url_prefix='/api/v1')
return app
def create_server(settings):
"""Wrap and return an application ready to be run.
Args:
settings (dict): a dictionary containing the settings, more info
here http://docs.gunicorn.org/en/latest/settings.html
Return:
an initialized instance of the application.
"""
settings = copy.deepcopy(settings)
if not settings.get('workers'):
settings['workers'] = (multiprocessing.cpu_count() * 2) + 1
if not settings.get('threads'):
settings['threads'] = (multiprocessing.cpu_count() * 2) + 1
app = create_app(settings)
standalone = StandaloneApplication(app, settings)
return standalone
if __name__ == '__main__':
# Double check in case the environment variable is sent via Docker,
# which will send empty strings for missing environment variables
hostname = os.environ.get('API_HOST', None)
if not hostname:
hostname = 'localhost'
port = os.environ.get('API_PORT', None)
if not port:
port = '3000'
cors_protection = os.environ.get('CORS_PROTECTION', 'True') == 'True'
# start the web api
settings = {
'bind': '{hostname}:{port}'.format(hostname=hostname, port=port),
'cors_protection': cors_protection,
'workers': 1,
'threads': 1
}
app_server = create_server(settings)
app_server.run()
|
[] |
[] |
[
"API_PORT",
"API_HOST",
"CORS_PROTECTION"
] |
[]
|
["API_PORT", "API_HOST", "CORS_PROTECTION"]
|
python
| 3 | 0 | |
vendor/github.com/cloudfoundry/libbuildpack/logger.go
|
package libbuildpack
import (
"fmt"
"io"
"os"
"strings"
)
type Logger struct {
w io.Writer
}
const (
msgPrefix = " "
redPrefix = "\033[31;1m"
bluePrefix = "\033[34;1m"
colorSuffix = "\033[0m"
msgError = msgPrefix + redPrefix + "**ERROR**" + colorSuffix
msgWarning = msgPrefix + redPrefix + "**WARNING**" + colorSuffix
msgProtip = msgPrefix + bluePrefix + "PRO TIP:" + colorSuffix
msgDebug = msgPrefix + bluePrefix + "DEBUG:" + colorSuffix
)
func NewLogger(w io.Writer) *Logger {
return &Logger{w: w}
}
func (l *Logger) Info(format string, args ...interface{}) {
l.printWithHeader(" ", format, args...)
}
func (l *Logger) Warning(format string, args ...interface{}) {
l.printWithHeader(msgWarning, format, args...)
}
func (l *Logger) Error(format string, args ...interface{}) {
l.printWithHeader(msgError, format, args...)
}
func (l *Logger) Debug(format string, args ...interface{}) {
if os.Getenv("BP_DEBUG") != "" {
l.printWithHeader(msgDebug, format, args...)
}
}
func (l *Logger) BeginStep(format string, args ...interface{}) {
l.printWithHeader("----->", format, args...)
}
func (l *Logger) Protip(tip string, helpURL string) {
l.printWithHeader(msgProtip, "%s", tip)
l.printWithHeader(msgPrefix+"Visit", "%s", helpURL)
}
func (l *Logger) printWithHeader(header string, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
msg = strings.Replace(msg, "\n", "\n ", -1)
fmt.Fprintf(l.w, "%s %s\n", header, msg)
}
func (l *Logger) Output() io.Writer {
return l.w
}
|
[
"\"BP_DEBUG\""
] |
[] |
[
"BP_DEBUG"
] |
[]
|
["BP_DEBUG"]
|
go
| 1 | 0 | |
examples/mtls/client.py
|
import os
import ssl
import asyncio
import logging
from pathlib import Path
from grpclib.client import Channel
from grpclib.health.v1.health_pb2 import HealthCheckRequest
from grpclib.health.v1.health_grpc import HealthStub
DIR = Path(__file__).parent.joinpath('keys')
SPY_MODE = 'SPY_MODE' in os.environ
SERVER_CERT = DIR.joinpath('mccoy.pem')
CLIENT_CERT = DIR.joinpath('spock-imposter.pem' if SPY_MODE else 'spock.pem')
CLIENT_KEY = DIR.joinpath('spock-imposter.key' if SPY_MODE else 'spock.key')
def create_secure_context(
client_cert: Path, client_key: Path, *, trusted: Path,
) -> ssl.SSLContext:
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(str(client_cert), str(client_key))
ctx.load_verify_locations(str(trusted))
ctx.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
ctx.set_ciphers('ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20')
ctx.set_alpn_protocols(['h2'])
try:
ctx.set_npn_protocols(['h2'])
except NotImplementedError:
pass
return ctx
async def main(*, host: str = 'localhost', port: int = 50051) -> None:
ssl_context = create_secure_context(
CLIENT_CERT, CLIENT_KEY, trusted=SERVER_CERT,
)
async with Channel(host, port, ssl=ssl_context) as channel:
stub = HealthStub(channel)
response = await stub.Check(HealthCheckRequest())
print(response)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
asyncio.run(main())
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
repositories_test.go
|
package main
import (
"fmt"
"log"
"net/http"
"net/http/httptest"
"net/url"
"os"
"reflect"
"testing"
"github.com/google/go-github/v34/github"
"github.com/ktrysmt/go-bitbucket"
gitlab "github.com/xanzy/go-gitlab"
)
var (
GitHubClient *github.Client
GitLabClient *gitlab.Client
BitbucketClient *bitbucket.Client
mux *http.ServeMux
server *httptest.Server
)
func setup() {
os.Setenv("GITHUB_TOKEN", "$$$randome")
os.Setenv("GITLAB_TOKEN", "$$$randome")
os.Setenv("BITBUCKET_USERNAME", "bbuser")
os.Setenv("BITBUCKET_PASSWORD", "$$$randomp")
// test server
mux = http.NewServeMux()
server = httptest.NewServer(mux)
base, _ := url.Parse(server.URL)
// Add a trailing slash because GitHub SDK expects it
u, err := url.Parse("/")
if err != nil {
log.Fatal(err)
}
url := base.ResolveReference(u)
// github client configured to use test server
GitHubClient = github.NewClient(nil)
GitHubClient.BaseURL = url
// github client configured to use test server
GitLabClient, err = gitlab.NewClient("", gitlab.WithBaseURL(url.String()))
BitbucketClient = bitbucket.NewBasicAuth(os.Getenv("BITBUCKET_USERNAME"), os.Getenv("BITBUCKET_USERNAME"))
BitbucketClient.SetApiBaseURL(url.String())
}
func teardown() {
os.Unsetenv("GITHUB_TOKEN")
os.Unsetenv("GITLAB_TOKEN")
os.Unsetenv("BITBUCKET_USERNAME")
os.Unsetenv("BITBUCKET_PASSWORD")
server.Close()
}
func TestGetPublicGitHubRepositories(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/user/repos", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, `[{"full_name": "test/r1", "id":1, "ssh_url": "https://github.com/u/r1", "name": "r1", "private": false, "fork": false}]`)
})
repos, err := getRepositories(GitHubClient, "github", "all", "", "", false)
if err != nil {
t.Fatalf("%v", err)
}
var expected []*Repository
expected = append(expected, &Repository{Namespace: "test", CloneURL: "https://github.com/u/r1", Name: "r1", Private: false})
if !reflect.DeepEqual(repos, expected) {
t.Errorf("Expected %+v, Got %+v", expected, repos)
}
}
func TestGetPrivateGitHubRepositories(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/user/repos", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, `[{"full_name": "test/r1", "id":1, "ssh_url": "https://github.com/u/r1", "name": "r1", "private": true, "fork": false}]`)
})
repos, err := getRepositories(GitHubClient, "github", "all", "", "", false)
if err != nil {
t.Fatalf("%v", err)
}
var expected []*Repository
expected = append(expected, &Repository{Namespace: "test", CloneURL: "https://github.com/u/r1", Name: "r1", Private: true})
if !reflect.DeepEqual(repos, expected) {
t.Errorf("Expected %+v, Got %+v", expected, repos)
}
}
func TestGetStarredGitHubRepositories(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/user/starred", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, `[{"repo":{"full_name": "test/r1", "id":1, "ssh_url": "https://github.com/u/r1", "name": "r1", "private": true, "fork": false}}]`)
})
repos, err := getRepositories(GitHubClient, "github", "starred", "", "", false)
if err != nil {
t.Fatalf("%v", err)
}
var expected []*Repository
expected = append(expected, &Repository{Namespace: "test", CloneURL: "https://github.com/u/r1", Name: "r1", Private: true})
if !reflect.DeepEqual(repos, expected) {
t.Errorf("Expected %+v, Got %+v", expected, repos)
}
}
func TestGetGitLabRepositories(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/api/v4/projects", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, `[{"path_with_namespace": "test/r1", "id":1, "ssh_url_to_repo": "https://gitlab.com/u/r1", "name": "r1"}]`)
})
repos, err := getRepositories(GitLabClient, "gitlab", "internal", "", "", false)
if err != nil {
t.Fatalf("%v", err)
}
var expected []*Repository
expected = append(expected, &Repository{Namespace: "test", CloneURL: "https://gitlab.com/u/r1", Name: "r1"})
if !reflect.DeepEqual(repos, expected) {
for i := 0; i < len(repos); i++ {
t.Errorf("Expected %+v, Got %+v", expected[i], repos[i])
}
}
}
func TestGetStarredGitLabRepositories(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/api/v4/projects", func(w http.ResponseWriter, r *http.Request) {
log.Printf("%#v\n", r.URL.Query())
if len(r.URL.Query().Get("starred")) != 0 {
fmt.Fprint(w, `[{"path_with_namespace": "test/starred-repo-r1", "id":1, "ssh_url_to_repo": "https://gitlab.com/u/r1", "name": "starred-repo-r1"}]`)
return
}
fmt.Fprintf(w, `[]`)
})
repos, err := getRepositories(GitLabClient, "gitlab", "", "", "starred", false)
if err != nil {
t.Fatalf("%v", err)
}
var expected []*Repository
expected = append(expected, &Repository{Namespace: "test", CloneURL: "https://gitlab.com/u/r1", Name: "starred-repo-r1"})
if !reflect.DeepEqual(repos, expected) {
if len(repos) != len(expected) {
t.Fatalf("Expected: %#v, Got: %v", expected, repos)
}
for i := 0; i < len(expected); i++ {
t.Errorf("Expected %+v, Got %+v", expected[i], repos[i])
}
}
}
func TestGetBitbucketRepositories(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/workspaces", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, `{"pagelen": 10, "page": 1, "size": 1, "values": [{"slug": "abc"}]}`)
})
mux.HandleFunc("/repositories/abc", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, `{"pagelen": 10, "page": 1, "size": 1, "values": [{"full_name":"abc/def", "slug":"def", "is_private":true, "links":{"clone":[{"name":"https", "href":"https://[email protected]/abc/def.git"}, {"name":"ssh", "href":"[email protected]:abc/def.git"}]}}]}`)
})
repos, err := getRepositories(BitbucketClient, "bitbucket", "", "", "", false)
if err != nil {
t.Fatalf("%v", err)
}
var expected []*Repository
expected = append(expected, &Repository{Namespace: "abc", CloneURL: "[email protected]:abc/def.git", Name: "def", Private: true})
if !reflect.DeepEqual(repos, expected) {
for i := 0; i < len(repos); i++ {
t.Errorf("Expected %+v, Got %+v", expected[i], repos[i])
}
}
}
|
[
"\"BITBUCKET_USERNAME\"",
"\"BITBUCKET_USERNAME\""
] |
[] |
[
"BITBUCKET_USERNAME"
] |
[]
|
["BITBUCKET_USERNAME"]
|
go
| 1 | 0 | |
Godeps/_workspace/src/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission.go
|
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcequota
import (
"fmt"
"io"
"math/rand"
"strings"
"time"
"github.com/hashicorp/golang-lru"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/quota"
"k8s.io/kubernetes/pkg/quota/install"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
)
func init() {
admission.RegisterPlugin("ResourceQuota",
func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
registry := install.NewRegistry(client)
return NewResourceQuota(client, registry)
})
}
// quotaAdmission implements an admission controller that can enforce quota constraints
type quotaAdmission struct {
*admission.Handler
// must be able to read/write ResourceQuota
client clientset.Interface
// indexer that holds quota objects by namespace
indexer cache.Indexer
// registry that knows how to measure usage for objects
registry quota.Registry
// liveLookups holds the last few live lookups we've done to help ammortize cost on repeated lookup failures.
// This let's us handle the case of latent caches, by looking up actual results for a namespace on cache miss/no results.
// We track the lookup result here so that for repeated requests, we don't look it up very often.
liveLookupCache *lru.Cache
liveTTL time.Duration
}
type liveLookupEntry struct {
expiry time.Time
items []*api.ResourceQuota
}
// NewResourceQuota configures an admission controller that can enforce quota constraints
// using the provided registry. The registry must have the capability to handle group/kinds that
// are persisted by the server this admission controller is intercepting
func NewResourceQuota(client clientset.Interface, registry quota.Registry) (admission.Interface, error) {
liveLookupCache, err := lru.New(100)
if err != nil {
return nil, err
}
lw := &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().ResourceQuotas(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return client.Core().ResourceQuotas(api.NamespaceAll).Watch(options)
},
}
indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0)
reflector.Run()
return "aAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update),
client: client,
indexer: indexer,
registry: registry,
liveLookupCache: liveLookupCache,
liveTTL: time.Duration(30 * time.Second),
}, nil
}
// Admit makes admission decisions while enforcing quota
func (q *quotaAdmission) Admit(a admission.Attributes) (err error) {
// ignore all operations that correspond to sub-resource actions
if a.GetSubresource() != "" {
return nil
}
// if we do not know how to evaluate use for this kind, just ignore
evaluators := q.registry.Evaluators()
evaluator, found := evaluators[a.GetKind()]
if !found {
return nil
}
// for this kind, check if the operation could mutate any quota resources
// if no resources tracked by quota are impacted, then just return
op := a.GetOperation()
operationResources := evaluator.OperationResources(op)
if len(operationResources) == 0 {
return nil
}
// determine if there are any quotas in this namespace
// if there are no quotas, we don't need to do anything
namespace, name := a.GetNamespace(), a.GetName()
items, err := q.indexer.Index("namespace", &api.ResourceQuota{ObjectMeta: api.ObjectMeta{Namespace: namespace, Name: ""}})
if err != nil {
return admission.NewForbidden(a, fmt.Errorf("Error resolving quota."))
}
// if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it.
if len(items) == 0 {
lruItemObj, ok := q.liveLookupCache.Get(a.GetNamespace())
if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) {
// TODO: If there are multiple operations at the same time and cache has just expired,
// this may cause multiple List operations being issued at the same time.
// If there is already in-flight List() for a given namespace, we should wait until
// it is finished and cache is updated instead of doing the same, also to avoid
// throttling - see #22422 for details.
liveList, err := q.client.Core().ResourceQuotas(namespace).List(api.ListOptions{})
if err != nil {
return admission.NewForbidden(a, err)
}
newEntry := liveLookupEntry{expiry: time.Now().Add(q.liveTTL)}
for i := range liveList.Items {
newEntry.items = append(newEntry.items, &liveList.Items[i])
}
q.liveLookupCache.Add(a.GetNamespace(), newEntry)
lruItemObj = newEntry
}
lruEntry := lruItemObj.(liveLookupEntry)
for i := range lruEntry.items {
items = append(items, lruEntry.items[i])
}
}
// if there are still no items, we can return
if len(items) == 0 {
return nil
}
// find the set of quotas that are pertinent to this request
// reject if we match the quota, but usage is not calculated yet
// reject if the input object does not satisfy quota constraints
// if there are no pertinent quotas, we can just return
inputObject := a.GetObject()
resourceQuotas := []*api.ResourceQuota{}
for i := range items {
resourceQuota := items[i].(*api.ResourceQuota)
match := evaluator.Matches(resourceQuota, inputObject)
if !match {
continue
}
hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
evaluatorResources := evaluator.MatchesResources()
requiredResources := quota.Intersection(hardResources, evaluatorResources)
err := evaluator.Constraints(requiredResources, inputObject)
if err != nil {
return admission.NewForbidden(a, fmt.Errorf("Failed quota: %s: %v", resourceQuota.Name, err))
}
if !hasUsageStats(resourceQuota) {
return admission.NewForbidden(a, fmt.Errorf("Status unknown for quota: %s", resourceQuota.Name))
}
resourceQuotas = append(resourceQuotas, resourceQuota)
}
if len(resourceQuotas) == 0 {
return nil
}
// there is at least one quota that definitely matches our object
// as a result, we need to measure the usage of this object for quota
// on updates, we need to subtract the previous measured usage
// if usage shows no change, just return since it has no impact on quota
deltaUsage := evaluator.Usage(inputObject)
if admission.Update == op {
prevItem, err := evaluator.Get(namespace, name)
if err != nil {
return admission.NewForbidden(a, fmt.Errorf("Unable to get previous: %v", err))
}
prevUsage := evaluator.Usage(prevItem)
deltaUsage = quota.Subtract(deltaUsage, prevUsage)
}
if quota.IsZero(deltaUsage) {
return nil
}
// TODO: Move to a bucketing work queue
// If we guaranteed that we processed the request in order it was received to server, we would reduce quota conflicts.
// Until we have the bucketing work queue, we jitter requests and retry on conflict.
numRetries := 10
interval := time.Duration(rand.Int63n(90)+int64(10)) * time.Millisecond
// seed the retry loop with the initial set of quotas to process (should reduce each iteration)
resourceQuotasToProcess := resourceQuotas
for retry := 1; retry <= numRetries; retry++ {
// the list of quotas we will try again if there is a version conflict
tryAgain := []*api.ResourceQuota{}
// check that we pass all remaining quotas so we do not prematurely charge
// for each quota, mask the usage to the set of resources tracked by the quota
// if request + used > hard, return an error describing the failure
updatedUsage := map[string]api.ResourceList{}
for _, resourceQuota := range resourceQuotasToProcess {
hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
requestedUsage := quota.Mask(deltaUsage, hardResources)
newUsage := quota.Add(resourceQuota.Status.Used, requestedUsage)
if allowed, exceeded := quota.LessThanOrEqual(newUsage, resourceQuota.Status.Hard); !allowed {
failedRequestedUsage := quota.Mask(requestedUsage, exceeded)
failedUsed := quota.Mask(resourceQuota.Status.Used, exceeded)
failedHard := quota.Mask(resourceQuota.Status.Hard, exceeded)
return admission.NewForbidden(a,
fmt.Errorf("Exceeded quota: %s, requested: %s, used: %s, limited: %s",
resourceQuota.Name,
prettyPrint(failedRequestedUsage),
prettyPrint(failedUsed),
prettyPrint(failedHard)))
}
updatedUsage[resourceQuota.Name] = newUsage
}
// update the status for each quota with its new usage
// if we get a conflict, get updated quota, and enqueue
for i, resourceQuota := range resourceQuotasToProcess {
newUsage := updatedUsage[resourceQuota.Name]
quotaToUpdate := &api.ResourceQuota{
ObjectMeta: api.ObjectMeta{
Name: resourceQuota.Name,
Namespace: resourceQuota.Namespace,
ResourceVersion: resourceQuota.ResourceVersion,
},
Status: api.ResourceQuotaStatus{
Hard: quota.Add(api.ResourceList{}, resourceQuota.Status.Hard),
Used: newUsage,
},
}
_, err = q.client.Core().ResourceQuotas(quotaToUpdate.Namespace).UpdateStatus(quotaToUpdate)
if err != nil {
if !errors.IsConflict(err) {
return admission.NewForbidden(a, fmt.Errorf("Unable to update quota status: %s %v", resourceQuota.Name, err))
}
// if we get a conflict, we get the latest copy of the quota documents that were not yet modified so we retry all with latest state.
for fetchIndex := i; fetchIndex < len(resourceQuotasToProcess); fetchIndex++ {
latestQuota, err := q.client.Core().ResourceQuotas(namespace).Get(resourceQuotasToProcess[fetchIndex].Name)
if err != nil {
return admission.NewForbidden(a, fmt.Errorf("Unable to get quota: %s %v", resourceQuotasToProcess[fetchIndex].Name, err))
}
tryAgain = append(tryAgain, latestQuota)
}
break
}
}
// all quotas were updated, so we can return
if len(tryAgain) == 0 {
return nil
}
// we have concurrent requests to update quota, so look to retry if needed
// next iteration, we need to process the items that have to try again
// pause the specified interval to encourage jitter
if retry == numRetries {
names := []string{}
for _, quota := range tryAgain {
names = append(names, quota.Name)
}
return admission.NewForbidden(a, fmt.Errorf("Unable to update status for quota: %s, ", strings.Join(names, ",")))
}
resourceQuotasToProcess = tryAgain
time.Sleep(interval)
}
return nil
}
// prettyPrint formats a resource list for usage in errors
func prettyPrint(item api.ResourceList) string {
parts := []string{}
for key, value := range item {
constraint := string(key) + "=" + value.String()
parts = append(parts, constraint)
}
return strings.Join(parts, ",")
}
// hasUsageStats returns true if for each hard constraint there is a value for its current usage
func hasUsageStats(resourceQuota *api.ResourceQuota) bool {
for resourceName := range resourceQuota.Status.Hard {
if _, found := resourceQuota.Status.Used[resourceName]; !found {
return false
}
}
return true
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
core/security/workspace.go
|
package security
import (
"context"
appleError "core/error"
"core/user"
"core/workspace"
wsCollaborator "core/workspace_collaborator"
"os"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
func (security *Security) validateWorkspaceCollaborator(ctx context.Context,
database *mongo.Database, workspaceID, userID string) error {
filter := bson.M{"workspaceID": workspaceID, "userID": userID}
store := wsCollaborator.NewStore(security.Context, database)
option := options.Find().SetLimit(1)
option.SetProjection(bson.M{"_id": 1})
workspaceCollaborators, err := store.List(filter, option)
if err != nil {
return appleError.New(appleError.InternalServerErrorCode, appleError.InternalServerError,
appleError.InternalServerError)
}
if len(workspaceCollaborators) == 0 {
return appleError.New(appleError.ForbiddenErrorCode, appleError.ForbiddenError,
appleError.InvalidWorkspaceCollaborator)
}
return nil
}
func (security *Security) validateWorkspaceAdmin(ctx context.Context,
database *mongo.Database, workspaceID, userID string) error {
filter := bson.M{"workspaceID": workspaceID, "userID": userID,
"roleID": workspace.WorkspaceAdmin}
store := wsCollaborator.NewStore(security.Context, database)
option := options.Find().SetLimit(1)
option.SetProjection(bson.M{"_id": 1})
workspaceCollaborators, err := store.List(filter, option)
if err != nil {
return appleError.New(appleError.InternalServerErrorCode, appleError.InternalServerError,
appleError.InternalServerError)
}
if len(workspaceCollaborators) == 0 {
return appleError.New(appleError.ForbiddenErrorCode, appleError.ForbiddenError,
appleError.NotAnWorkspaceAdmin)
}
return nil
}
//GetWorkspaces security check
func (security *Security) GetWorkspaces(userID string) error {
database := security.getDatabase(os.Getenv("MONGO_DB_NAME"))
store := user.NewStore(security.Context, database)
_, err := store.Get(userID, options.FindOne().SetProjection(bson.M{"_id": 1}))
if err != nil {
if err == mongo.ErrNoDocuments {
return appleError.New(appleError.ForbiddenErrorCode, appleError.ForbiddenError,
appleError.InvalidWorkspaceOperation)
}
return appleError.New(appleError.InternalServerErrorCode, appleError.InternalServerError,
appleError.InternalServerError)
}
return nil
}
//CreateWorkspace security check
func (security *Security) CreateWorkspace(userID string, emailVerified bool) error {
if !emailVerified {
return appleError.New(appleError.ForbiddenErrorCode, appleError.ForbiddenError,
appleError.UnverifiedEmail)
}
database := security.getDatabase(os.Getenv("MONGO_DB_NAME"))
store := user.NewStore(security.Context, database)
_, err := store.Get(userID, options.FindOne().SetProjection(bson.M{"_id": 1}))
if err != nil {
if err == mongo.ErrNoDocuments {
return appleError.New(appleError.ForbiddenErrorCode, appleError.ForbiddenError,
appleError.InvalidWorkspaceOperation)
}
return appleError.New(appleError.InternalServerErrorCode, appleError.InternalServerError,
appleError.InternalServerError)
}
return nil
}
//GetWorkspace security check
func (security *Security) GetWorkspace(workspaceID, userID string) error {
database := security.getDatabase(os.Getenv("MONGO_DB_NAME"))
return security.validateWorkspaceCollaborator(security.Context, database, workspaceID,
userID)
}
//UpdateWorkspace security check
func (security *Security) UpdateWorkspace(workspaceID, userID string) error {
database := security.getDatabase(os.Getenv("MONGO_DB_NAME"))
return security.validateWorkspaceAdmin(security.Context, database, workspaceID, userID)
}
//DeleteWorkspace security check
func (security *Security) DeleteWorkspace(workspaceID, userID string) error {
database := security.getDatabase(os.Getenv("MONGO_DB_NAME"))
return security.validateWorkspaceAdmin(security.Context, database, workspaceID, userID)
}
//GetWorkspaceByAdmin workspace
func (security *Security) GetWorkspaceByAdmin(workspaceID, userID string) error {
database := security.getDatabase(os.Getenv("MONGO_DB_NAME"))
return security.validateWorkspaceAdmin(security.Context, database, workspaceID, userID)
}
|
[
"\"MONGO_DB_NAME\"",
"\"MONGO_DB_NAME\"",
"\"MONGO_DB_NAME\"",
"\"MONGO_DB_NAME\"",
"\"MONGO_DB_NAME\"",
"\"MONGO_DB_NAME\""
] |
[] |
[
"MONGO_DB_NAME"
] |
[]
|
["MONGO_DB_NAME"]
|
go
| 1 | 0 | |
pkg/agent/main.go
|
// +build !windows
package main
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"os"
"time"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/rancher/rancher/pkg/agent/cluster"
"github.com/rancher/rancher/pkg/agent/node"
"github.com/rancher/rancher/pkg/logserver"
"github.com/rancher/rancher/pkg/remotedialer"
"github.com/rancher/rancher/pkg/rkenodeconfigclient"
"github.com/sirupsen/logrus"
)
var (
VERSION = "dev"
)
const (
Token = "X-API-Tunnel-Token"
Params = "X-API-Tunnel-Params"
)
func main() {
logserver.StartServerWithDefaults()
if os.Getenv("CATTLE_DEBUG") == "true" || os.Getenv("RANCHER_DEBUG") == "true" {
logrus.SetLevel(logrus.DebugLevel)
}
if err := run(); err != nil {
log.Fatal(err)
}
}
func isCluster() bool {
return os.Getenv("CATTLE_CLUSTER") == "true"
}
func getParams() (map[string]interface{}, error) {
if isCluster() {
return cluster.Params()
}
return node.Params(), nil
}
func getTokenAndURL() (string, string, error) {
token, url, err := node.TokenAndURL()
if err != nil {
return "", "", err
}
if token == "" {
return cluster.TokenAndURL()
}
return token, url, nil
}
func isConnect() bool {
if os.Getenv("CATTLE_AGENT_CONNECT") == "true" {
return true
}
_, err := os.Stat("connected")
return err == nil
}
func connected() {
f, err := os.Create("connected")
if err != nil {
f.Close()
}
}
func cleanup(ctx context.Context) error {
if os.Getenv("CATTLE_K8S_MANAGED") != "true" {
return nil
}
c, err := client.NewEnvClient()
if err != nil {
return err
}
defer c.Close()
args := filters.NewArgs()
args.Add("label", "io.cattle.agent=true")
containers, err := c.ContainerList(ctx, types.ContainerListOptions{
All: true,
Filters: args,
})
if err != nil {
return err
}
for _, container := range containers {
if _, ok := container.Labels["io.kubernetes.pod.namespace"]; ok {
continue
}
if strings.Contains(container.Names[0], "share-mnt") {
continue
}
container := container
go func() {
time.Sleep(15 * time.Second)
logrus.Infof("Removing unmanaged agent %s(%s)", container.Names[0], container.ID)
c.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{
Force: true,
})
}()
}
return nil
}
func run() error {
logrus.Infof("Rancher agent version %s is starting", VERSION)
params, err := getParams()
if err != nil {
return err
}
writeCertsOnly := os.Getenv("CATTLE_WRITE_CERT_ONLY") == "true"
bytes, err := json.Marshal(params)
if err != nil {
return err
}
token, server, err := getTokenAndURL()
if err != nil {
return err
}
headers := map[string][]string{
Token: {token},
Params: {base64.StdEncoding.EncodeToString(bytes)},
}
serverURL, err := url.Parse(server)
if err != nil {
return err
}
onConnect := func(ctx context.Context) error {
connected()
connectConfig := fmt.Sprintf("https://%s/v3/connect/config", serverURL.Host)
if err := rkenodeconfigclient.ConfigClient(ctx, connectConfig, headers, writeCertsOnly); err != nil {
return err
}
if isCluster() {
return nil
}
if err := cleanup(context.Background()); err != nil {
return err
}
go func() {
logrus.Infof("Starting plan monitor")
for {
select {
case <-time.After(2 * time.Minute):
err := rkenodeconfigclient.ConfigClient(ctx, connectConfig, headers, writeCertsOnly)
if err != nil {
logrus.Errorf("failed to check plan: %v", err)
}
case <-ctx.Done():
return
}
}
}()
return nil
}
for {
wsURL := fmt.Sprintf("wss://%s/v3/connect", serverURL.Host)
if !isConnect() {
wsURL += "/register"
}
logrus.Infof("Connecting to %s with token %s", wsURL, token)
remotedialer.ClientConnect(wsURL, http.Header(headers), nil, func(proto, address string) bool {
switch proto {
case "tcp":
return true
case "unix":
return address == "/var/run/docker.sock"
}
return false
}, onConnect)
time.Sleep(5 * time.Second)
}
}
|
[
"\"CATTLE_DEBUG\"",
"\"RANCHER_DEBUG\"",
"\"CATTLE_CLUSTER\"",
"\"CATTLE_AGENT_CONNECT\"",
"\"CATTLE_K8S_MANAGED\"",
"\"CATTLE_WRITE_CERT_ONLY\""
] |
[] |
[
"RANCHER_DEBUG",
"CATTLE_DEBUG",
"CATTLE_CLUSTER",
"CATTLE_WRITE_CERT_ONLY",
"CATTLE_K8S_MANAGED",
"CATTLE_AGENT_CONNECT"
] |
[]
|
["RANCHER_DEBUG", "CATTLE_DEBUG", "CATTLE_CLUSTER", "CATTLE_WRITE_CERT_ONLY", "CATTLE_K8S_MANAGED", "CATTLE_AGENT_CONNECT"]
|
go
| 6 | 0 | |
src/cmd/dist/buildtool.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Build toolchain using Go 1.4.
//
// The general strategy is to copy the source files we need into
// a new GOPATH workspace, adjust import paths appropriately,
// invoke the Go 1.4 go command to build those sources,
// and then copy the binaries back.
package main
import (
"os"
"strings"
)
// bootstrapDirs is a list of directories holding code that must be
// compiled with a Go 1.4 toolchain to produce the bootstrapTargets.
// All directories in this list are relative to and must be below $GOROOT/src.
//
// The list has have two kinds of entries: names beginning with cmd/ with
// no other slashes, which are commands, and other paths, which are packages
// supporting the commands. Packages in the standard library can be listed
// if a newer copy needs to be substituted for the Go 1.4 copy when used
// by the command packages.
// These will be imported during bootstrap as bootstrap/name, like bootstrap/math/big.
var bootstrapDirs = []string{
"cmd/asm",
"cmd/asm/internal/arch",
"cmd/asm/internal/asm",
"cmd/asm/internal/flags",
"cmd/asm/internal/lex",
"cmd/compile",
"cmd/compile/internal/amd64",
"cmd/compile/internal/arm",
"cmd/compile/internal/arm64",
"cmd/compile/internal/gc",
"cmd/compile/internal/mips64",
"cmd/compile/internal/ppc64",
"cmd/compile/internal/s390x",
"cmd/compile/internal/ssa",
"cmd/compile/internal/syntax",
"cmd/compile/internal/x86",
"cmd/internal/bio",
"cmd/internal/gcprog",
"cmd/internal/dwarf",
"cmd/internal/obj",
"cmd/internal/obj/arm",
"cmd/internal/obj/arm64",
"cmd/internal/obj/mips",
"cmd/internal/obj/ppc64",
"cmd/internal/obj/s390x",
"cmd/internal/obj/x86",
"cmd/internal/sys",
"cmd/link",
"cmd/link/internal/amd64",
"cmd/link/internal/arm",
"cmd/link/internal/arm64",
"cmd/link/internal/ld",
"cmd/link/internal/mips64",
"cmd/link/internal/ppc64",
"cmd/link/internal/s390x",
"cmd/link/internal/x86",
"debug/pe",
"math/big",
}
// File suffixes that use build tags introduced since Go 1.4.
// These must not be copied into the bootstrap build directory.
var ignoreSuffixes = []string{
"_arm64.s",
"_arm64.go",
}
func bootstrapBuildTools() {
goroot_bootstrap := os.Getenv("GOROOT_BOOTSTRAP")
if goroot_bootstrap == "" {
goroot_bootstrap = pathf("%s/go1.4", os.Getenv("HOME"))
}
xprintf("##### Building Go toolchain using %s.\n", goroot_bootstrap)
mkzbootstrap(pathf("%s/src/cmd/internal/obj/zbootstrap.go", goroot))
// Use $GOROOT/pkg/bootstrap as the bootstrap workspace root.
// We use a subdirectory of $GOROOT/pkg because that's the
// space within $GOROOT where we store all generated objects.
// We could use a temporary directory outside $GOROOT instead,
// but it is easier to debug on failure if the files are in a known location.
workspace := pathf("%s/pkg/bootstrap", goroot)
xremoveall(workspace)
base := pathf("%s/src/bootstrap", workspace)
xmkdirall(base)
// Copy source code into $GOROOT/pkg/bootstrap and rewrite import paths.
for _, dir := range bootstrapDirs {
src := pathf("%s/src/%s", goroot, dir)
dst := pathf("%s/%s", base, dir)
xmkdirall(dst)
Dir:
for _, name := range xreaddirfiles(src) {
for _, suf := range ignoreSuffixes {
if strings.HasSuffix(name, suf) {
continue Dir
}
}
srcFile := pathf("%s/%s", src, name)
text := readfile(srcFile)
text = bootstrapFixImports(text, srcFile)
writefile(text, pathf("%s/%s", dst, name), 0)
}
}
// Set up environment for invoking Go 1.4 go command.
// GOROOT points at Go 1.4 GOROOT,
// GOPATH points at our bootstrap workspace,
// GOBIN is empty, so that binaries are installed to GOPATH/bin,
// and GOOS, GOHOSTOS, GOARCH, and GOHOSTOS are empty,
// so that Go 1.4 builds whatever kind of binary it knows how to build.
// Restore GOROOT, GOPATH, and GOBIN when done.
// Don't bother with GOOS, GOHOSTOS, GOARCH, and GOHOSTARCH,
// because setup will take care of those when bootstrapBuildTools returns.
defer os.Setenv("GOROOT", os.Getenv("GOROOT"))
os.Setenv("GOROOT", goroot_bootstrap)
defer os.Setenv("GOPATH", os.Getenv("GOPATH"))
os.Setenv("GOPATH", workspace)
defer os.Setenv("GOBIN", os.Getenv("GOBIN"))
os.Setenv("GOBIN", "")
os.Setenv("GOOS", "")
os.Setenv("GOHOSTOS", "")
os.Setenv("GOARCH", "")
os.Setenv("GOHOSTARCH", "")
// Run Go 1.4 to build binaries. Use -gcflags=-l to disable inlining to
// workaround bugs in Go 1.4's compiler. See discussion thread:
// https://groups.google.com/d/msg/golang-dev/Ss7mCKsvk8w/Gsq7VYI0AwAJ
// Use the math_big_pure_go build tag to disable the assembly in math/big
// which may contain unsupported instructions.
run(workspace, ShowOutput|CheckExit, pathf("%s/bin/go", goroot_bootstrap), "install", "-gcflags=-l", "-tags=math_big_pure_go", "-v", "bootstrap/cmd/...")
// Copy binaries into tool binary directory.
for _, name := range bootstrapDirs {
if !strings.HasPrefix(name, "cmd/") {
continue
}
name = name[len("cmd/"):]
if !strings.Contains(name, "/") {
copyfile(pathf("%s/%s%s", tooldir, name, exe), pathf("%s/bin/%s%s", workspace, name, exe), writeExec)
}
}
xprintf("\n")
}
func bootstrapFixImports(text, srcFile string) string {
lines := strings.SplitAfter(text, "\n")
inBlock := false
for i, line := range lines {
if strings.HasPrefix(line, "import (") {
inBlock = true
continue
}
if inBlock && strings.HasPrefix(line, ")") {
inBlock = false
continue
}
if strings.HasPrefix(line, `import "`) || strings.HasPrefix(line, `import . "`) ||
inBlock && (strings.HasPrefix(line, "\t\"") || strings.HasPrefix(line, "\t. \"")) {
line = strings.Replace(line, `"cmd/`, `"bootstrap/cmd/`, -1)
for _, dir := range bootstrapDirs {
if strings.HasPrefix(dir, "cmd/") {
continue
}
line = strings.Replace(line, `"`+dir+`"`, `"bootstrap/`+dir+`"`, -1)
}
lines[i] = line
}
}
lines[0] = "// Do not edit. Bootstrap copy of " + srcFile + "\n\n//line " + srcFile + ":1\n" + lines[0]
return strings.Join(lines, "")
}
|
[
"\"GOROOT_BOOTSTRAP\"",
"\"HOME\"",
"\"GOROOT\"",
"\"GOPATH\"",
"\"GOBIN\""
] |
[] |
[
"GOBIN",
"GOROOT",
"GOPATH",
"GOROOT_BOOTSTRAP",
"HOME"
] |
[]
|
["GOBIN", "GOROOT", "GOPATH", "GOROOT_BOOTSTRAP", "HOME"]
|
go
| 5 | 0 | |
gunicorn.py
|
# Copyright (c) 2018, Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configure the gunicorn server."""
import os
import gevent.monkey
# Ensure gevent is monkeypatched before ssl is imported (gunicorn does this too
# late). This is only necessary when `preload_app` is True. The gevent warning
# is still printed, but testing shows that recursion errors do not occur (eg. on
# use of `requests`) when monkey-patching here.
# See also https://github.com/gevent/gevent/issues/1016 and
# https://github.com/benoitc/gunicorn/issues/1566
gevent.monkey.patch_all()
_config = os.environ["ENVIRONMENT"]
bind = "0.0.0.0:8000"
worker_class = "gevent"
timeout = 20
accesslog = "-"
access_log_format = '''%(t)s "%(r)s" %(s)s %(b)s %(L)s "%(f)s"'''
if _config == "production":
workers = os.cpu_count() * 2 + 1
preload_app = True
else:
# FIXME: The number of workers is up for debate. At least for testing more
# than one worker could make sense.
workers = 1
reload = True
|
[] |
[] |
[
"ENVIRONMENT"
] |
[]
|
["ENVIRONMENT"]
|
python
| 1 | 0 | |
test_tensorflow.py
|
import os
import cv2
import numpy as np
import tensorflow as tf
import time
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
gpus = tf.config.list_physical_devices('GPU')
print("Devices: ", gpus)
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, False)
img = cv2.imread("imgs/test-img.jpeg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (480, 480)).transpose(2, 0, 1)
imgs = np.expand_dims(img, axis=0)
with tf.device("/gpu:0"):
imported = tf.saved_model.load("weights/test2.pb")
inference_func = imported.signatures["serving_default"]
imgs = tf.convert_to_tensor(imgs, dtype=tf.float32)
for i in range(100):
start_time = time.time()
inference_func(input=imgs)
print(time.time() - start_time)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
messaging/message_test.go
|
package messaging
import (
"bytes"
"encoding/json"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"log"
"net/http"
"net/http/httptest"
"os"
)
type TestArgsData struct {
Token string `json:"token"`
Topic string `json:"topic"`
Title string `json:"title"`
Body string `json:"body"`
Icon string `json:"icon"`
Data interface{} `json:"data"`
}
var serverKey = os.Getenv("FIREBASE_SERVER_KEY")
var _ = Describe("Firebase cloud messaging without server key", func() {
testmessage := TestArgsData{
Token: "eNstKTcV5Dg:APA91bEGvEHaP6-UdcLBfgaib1lOPUZgrP1QYDAOUoZc_ZQNNlGO1afiR5lGYqbuJTc4YQ0yn3Xogjuj1GeryvvgkcutItfu0kjMwCTIN2CNdp9oiQBPm2394FxHjWMyW8ZgsL1p4xHo",
Title: "Test cases",
Body: "Hello body from cli"}
requestBody := new(bytes.Buffer)
errr := json.NewEncoder(requestBody).Encode(testmessage)
if errr != nil {
log.Fatal(errr)
}
req, err := http.NewRequest("POST", "/send-message-by-token", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(SendMessageByToken)
handler.ServeHTTP(recorder, req)
Describe("Send message by token", func() {
Context("SendMessageByToken", func() {
It("Should result http.StatusBadRequest", func() {
Expect(recorder.Code).To(Equal(http.StatusBadRequest))
})
})
})
})
var _ = Describe("Firebase cloud messaging negative testing without enviroment variables", func() {
testmessage := TestArgsData{
Token: "eNstKTcV5Dg:APA91bEGvEHaP6-UdcLBfgaasasassaswOUoZc_ZQNNlGO1afiR5lGYqbuJTc4YQ0yn3Xogjuj1GeryvvgkcutItfu0kjMwCTIN2CNdp9oiQBPm2394FxHjWMyW8ZgsL1p4xHo",
Topic: "news",
Title: "Test cases",
Body: "Hello body from cli"}
requestBody := new(bytes.Buffer)
errr := json.NewEncoder(requestBody).Encode(testmessage)
if errr != nil {
log.Fatal(errr)
}
req, err := http.NewRequest("POST", "/send-message-by-topic", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(SendMessageByTopic)
handler.ServeHTTP(recorder, req)
Describe("Send message by topic", func() {
Context("SendMessageByTopic", func() {
It("Should result http.StatusBadRequest", func() {
Expect(recorder.Code).To(Equal(http.StatusBadRequest))
})
})
})
})
var _ = Describe("Firebase cloud messaging negative testing for token", func() {
testmessage := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
errr := json.NewEncoder(requestBody).Encode(testmessage)
if errr != nil {
log.Fatal(errr)
}
req, err := http.NewRequest("POST", "/send-message-by-token", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(SendMessageByToken)
handler.ServeHTTP(recorder, req)
Describe("Send message by token", func() {
Context("SendMessageByToken", func() {
It("Should result http.StatusBadRequest", func() {
Expect(recorder.Code).To(Equal(http.StatusBadRequest))
})
})
})
})
var _ = Describe("Firebase cloud messaging by token", func() {
os.Setenv("SERVER_KEY", serverKey)
testmessage := TestArgsData{
Token: "eNstKTcV5Dg:APA91bEGvEHaP6-UdcLBfgaib1lOPUZgrP1QYDAOUoZc_ZQNNlGO1afiR5lGYqbuJTc4YQ0yn3Xogjuj1GeryvvgkcutItfu0kjMwCTIN2CNdp9oiQBPm2394FxHjWMyW8ZgsL1p4xHo",
Title: "Test cases",
Body: "Hello body from cli"}
requestBody := new(bytes.Buffer)
errr := json.NewEncoder(requestBody).Encode(testmessage)
if errr != nil {
log.Fatal(errr)
}
req, err := http.NewRequest("POST", "/send-message-by-token", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(SendMessageByToken)
handler.ServeHTTP(recorder, req)
Describe("Send message by token", func() {
Context("SendMessageByToken", func() {
It("Should result http.StatusOK", func() {
Expect(recorder.Code).To(Equal(http.StatusOK))
})
})
})
})
var _ = Describe("Firebase cloud messaging negative testing with args", func() {
os.Setenv("SERVER_KEY", serverKey)
testmessage := []byte(`{"status":false}`)
requestBody := new(bytes.Buffer)
errr := json.NewEncoder(requestBody).Encode(testmessage)
if errr != nil {
log.Fatal(errr)
}
req, err := http.NewRequest("POST", "/send-message-by-topic", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(SendMessageByTopic)
handler.ServeHTTP(recorder, req)
Describe("Send message by topic", func() {
Context("SendMessageByTopic", func() {
It("Should result http.StatusBadRequest", func() {
Expect(recorder.Code).To(Equal(http.StatusBadRequest))
})
})
})
})
var _ = Describe("Firebase cloud messaging negative testing for topic", func() {
os.Setenv("SERVER_KEY", serverKey)
testmessage := TestArgsData{
Token: "eNstKTcV5Dg:APA91bEGvEHaP6-UdcLBfgaasasassaswOUoZc_ZQNNlGO1afiR5lGYqbuJTc4YQ0yn3Xogjuj1GeryvvgkcutItfu0kjMwCTIN2CNdp9oiQBPm2394FxHjWMyW8ZgsL1p4xHo",
Topic: "news",
Title: "Test cases",
Body: "Hello body from cli"}
requestBody := new(bytes.Buffer)
errr := json.NewEncoder(requestBody).Encode(testmessage)
if errr != nil {
log.Fatal(errr)
}
req, err := http.NewRequest("POST", "/send-message-by-topic", requestBody)
if err != nil {
log.Fatal(err)
}
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(SendMessageByTopic)
handler.ServeHTTP(recorder, req)
Describe("Send message by topic", func() {
Context("SendMessageByTopic", func() {
It("Should result http.StatusOK", func() {
Expect(recorder.Code).To(Equal(http.StatusOK))
})
})
})
})
|
[
"\"FIREBASE_SERVER_KEY\""
] |
[] |
[
"FIREBASE_SERVER_KEY"
] |
[]
|
["FIREBASE_SERVER_KEY"]
|
go
| 1 | 0 | |
space_agency/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'space_agency.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
models/attachment.go
|
// Copyright 2017 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"fmt"
"io"
"mime/multipart"
"os"
"path"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
api "code.gitea.io/sdk/gitea"
"github.com/go-xorm/xorm"
gouuid "github.com/satori/go.uuid"
)
// Attachment represent a attachment of issue/comment/release.
type Attachment struct {
ID int64 `xorm:"pk autoincr"`
UUID string `xorm:"uuid UNIQUE"`
IssueID int64 `xorm:"INDEX"`
ReleaseID int64 `xorm:"INDEX"`
CommentID int64
Name string
DownloadCount int64 `xorm:"DEFAULT 0"`
Size int64 `xorm:"DEFAULT 0"`
CreatedUnix util.TimeStamp `xorm:"created"`
}
// IncreaseDownloadCount is update download count + 1
func (a *Attachment) IncreaseDownloadCount() error {
// Update download count.
if _, err := x.Exec("UPDATE `attachment` SET download_count=download_count+1 WHERE id=?", a.ID); err != nil {
return fmt.Errorf("increase attachment count: %v", err)
}
return nil
}
// APIFormat converts models.Attachment to api.Attachment
func (a *Attachment) APIFormat() *api.Attachment {
return &api.Attachment{
ID: a.ID,
Name: a.Name,
Created: a.CreatedUnix.AsTime(),
DownloadCount: a.DownloadCount,
Size: a.Size,
UUID: a.UUID,
DownloadURL: a.DownloadURL(),
}
}
// AttachmentLocalPath returns where attachment is stored in local file
// system based on given UUID.
func AttachmentLocalPath(uuid string) string {
return path.Join(setting.AttachmentPath, uuid[0:1], uuid[1:2], uuid)
}
// LocalPath returns where attachment is stored in local file system.
func (a *Attachment) LocalPath() string {
return AttachmentLocalPath(a.UUID)
}
// DownloadURL returns the download url of the attached file
func (a *Attachment) DownloadURL() string {
return fmt.Sprintf("%sattachments/%s", setting.AppURL, a.UUID)
}
// NewAttachment creates a new attachment object.
func NewAttachment(name string, buf []byte, file multipart.File) (_ *Attachment, err error) {
attach := &Attachment{
UUID: gouuid.NewV4().String(),
Name: name,
}
localPath := attach.LocalPath()
if err = os.MkdirAll(path.Dir(localPath), os.ModePerm); err != nil {
return nil, fmt.Errorf("MkdirAll: %v", err)
}
fw, err := os.Create(localPath)
if err != nil {
return nil, fmt.Errorf("Create: %v", err)
}
defer fw.Close()
if _, err = fw.Write(buf); err != nil {
return nil, fmt.Errorf("Write: %v", err)
} else if _, err = io.Copy(fw, file); err != nil {
return nil, fmt.Errorf("Copy: %v", err)
}
// Update file size
var fi os.FileInfo
if fi, err = fw.Stat(); err != nil {
return nil, fmt.Errorf("file size: %v", err)
}
attach.Size = fi.Size()
if _, err := x.Insert(attach); err != nil {
return nil, err
}
return attach, nil
}
// GetAttachmentByID returns attachment by given id
func GetAttachmentByID(id int64) (*Attachment, error) {
return getAttachmentByID(x, id)
}
func getAttachmentByID(e Engine, id int64) (*Attachment, error) {
attach := &Attachment{ID: id}
if has, err := e.Get(attach); err != nil {
return nil, err
} else if !has {
return nil, ErrAttachmentNotExist{ID: id, UUID: ""}
}
return attach, nil
}
func getAttachmentByUUID(e Engine, uuid string) (*Attachment, error) {
attach := &Attachment{UUID: uuid}
has, err := e.Get(attach)
if err != nil {
return nil, err
} else if !has {
return nil, ErrAttachmentNotExist{0, uuid}
}
return attach, nil
}
func getAttachmentsByUUIDs(e Engine, uuids []string) ([]*Attachment, error) {
if len(uuids) == 0 {
return []*Attachment{}, nil
}
// Silently drop invalid uuids.
attachments := make([]*Attachment, 0, len(uuids))
return attachments, e.In("uuid", uuids).Find(&attachments)
}
// GetAttachmentByUUID returns attachment by given UUID.
func GetAttachmentByUUID(uuid string) (*Attachment, error) {
return getAttachmentByUUID(x, uuid)
}
func getAttachmentsByIssueID(e Engine, issueID int64) ([]*Attachment, error) {
attachments := make([]*Attachment, 0, 10)
return attachments, e.Where("issue_id = ? AND comment_id = 0", issueID).Find(&attachments)
}
// GetAttachmentsByIssueID returns all attachments of an issue.
func GetAttachmentsByIssueID(issueID int64) ([]*Attachment, error) {
return getAttachmentsByIssueID(x, issueID)
}
// GetAttachmentsByCommentID returns all attachments if comment by given ID.
func GetAttachmentsByCommentID(commentID int64) ([]*Attachment, error) {
return getAttachmentsByCommentID(x, commentID)
}
func getAttachmentsByCommentID(e Engine, commentID int64) ([]*Attachment, error) {
attachments := make([]*Attachment, 0, 10)
return attachments, x.Where("comment_id=?", commentID).Find(&attachments)
}
// DeleteAttachment deletes the given attachment and optionally the associated file.
func DeleteAttachment(a *Attachment, remove bool) error {
_, err := DeleteAttachments([]*Attachment{a}, remove)
return err
}
// DeleteAttachments deletes the given attachments and optionally the associated files.
func DeleteAttachments(attachments []*Attachment, remove bool) (int, error) {
if len(attachments) == 0 {
return 0, nil
}
var ids = make([]int64, 0, len(attachments))
for _, a := range attachments {
ids = append(ids, a.ID)
}
cnt, err := x.In("id", ids).NoAutoCondition().Delete(attachments[0])
if err != nil {
return 0, err
}
if remove {
for i, a := range attachments {
if err := os.Remove(a.LocalPath()); err != nil {
return i, err
}
}
}
return int(cnt), nil
}
// DeleteAttachmentsByIssue deletes all attachments associated with the given issue.
func DeleteAttachmentsByIssue(issueID int64, remove bool) (int, error) {
attachments, err := GetAttachmentsByIssueID(issueID)
if err != nil {
return 0, err
}
return DeleteAttachments(attachments, remove)
}
// DeleteAttachmentsByComment deletes all attachments associated with the given comment.
func DeleteAttachmentsByComment(commentID int64, remove bool) (int, error) {
attachments, err := GetAttachmentsByCommentID(commentID)
if err != nil {
return 0, err
}
return DeleteAttachments(attachments, remove)
}
// UpdateAttachment updates the given attachment in database
func UpdateAttachment(atta *Attachment) error {
return updateAttachment(x, atta)
}
func updateAttachment(e Engine, atta *Attachment) error {
var sess *xorm.Session
if atta.ID != 0 && atta.UUID == "" {
sess = e.ID(atta.ID)
} else {
// Use uuid only if id is not set and uuid is set
sess = e.Where("uuid = ?", atta.UUID)
}
_, err := sess.Cols("name", "issue_id", "release_id", "comment_id", "download_count").Update(atta)
return err
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
tools/v8_presubmit.py
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
try:
import hashlib
md5er = hashlib.md5
except ImportError as e:
import md5
md5er = md5.new
import json
import optparse
import os
from os.path import abspath, join, dirname, basename, exists
import pickle
import re
import sys
import subprocess
import multiprocessing
from subprocess import PIPE
from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.local import utils
# Special LINT rules diverging from default and reason.
# build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
# We now run our own header guard check in PRESUBMIT.py.
# build/include_what_you_use: Started giving false positives for variables
# named "string" and "map" assuming that you needed to include STL headers.
LINT_RULES = """
-build/header_guard
-build/include_what_you_use
-readability/fn_size
-readability/multiline_comment
-runtime/references
-whitespace/comments
""".split()
LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
FLAGS_LINE = re.compile("//\s*Flags:.*--([A-z0-9-])+_[A-z0-9].*\n")
ASSERT_OPTIMIZED_PATTERN = re.compile("assertOptimized")
FLAGS_ENABLE_OPT = re.compile("//\s*Flags:.*--opt[^-].*\n")
ASSERT_UNOPTIMIZED_PATTERN = re.compile("assertUnoptimized")
FLAGS_NO_ALWAYS_OPT = re.compile("//\s*Flags:.*--no-?always-opt.*\n")
TOOLS_PATH = dirname(abspath(__file__))
def CppLintWorker(command):
try:
process = subprocess.Popen(command, stderr=subprocess.PIPE)
process.wait()
out_lines = ""
error_count = -1
while True:
out_line = process.stderr.readline()
if out_line == '' and process.poll() != None:
if error_count == -1:
print("Failed to process %s" % command.pop())
return 1
break
m = LINT_OUTPUT_PATTERN.match(out_line)
if m:
out_lines += out_line
error_count += 1
sys.stdout.write(out_lines)
return error_count
except KeyboardInterrupt:
process.kill()
except:
print('Error running cpplint.py. Please make sure you have depot_tools' +
' in your $PATH. Lint check skipped.')
process.kill()
def TorqueLintWorker(command):
try:
process = subprocess.Popen(command, stderr=subprocess.PIPE)
process.wait()
out_lines = ""
error_count = 0
while True:
out_line = process.stderr.readline()
if out_line == '' and process.poll() != None:
break
out_lines += out_line
error_count += 1
sys.stdout.write(out_lines)
if error_count != 0:
sys.stdout.write(
"warning: formatting and overwriting unformatted Torque files\n")
return error_count
except KeyboardInterrupt:
process.kill()
except:
print('Error running format-torque.py')
process.kill()
class FileContentsCache(object):
def __init__(self, sums_file_name):
self.sums = {}
self.sums_file_name = sums_file_name
def Load(self):
try:
sums_file = None
try:
sums_file = open(self.sums_file_name, 'r')
self.sums = pickle.load(sums_file)
except:
# Cannot parse pickle for any reason. Not much we can do about it.
pass
finally:
if sums_file:
sums_file.close()
def Save(self):
try:
sums_file = open(self.sums_file_name, 'w')
pickle.dump(self.sums, sums_file)
except:
# Failed to write pickle. Try to clean-up behind us.
if sums_file:
sums_file.close()
try:
os.unlink(self.sums_file_name)
except:
pass
finally:
sums_file.close()
def FilterUnchangedFiles(self, files):
changed_or_new = []
for file in files:
try:
handle = open(file, "r")
file_sum = md5er(handle.read()).digest()
if not file in self.sums or self.sums[file] != file_sum:
changed_or_new.append(file)
self.sums[file] = file_sum
finally:
handle.close()
return changed_or_new
def RemoveFile(self, file):
if file in self.sums:
self.sums.pop(file)
class SourceFileProcessor(object):
"""
Utility class that can run through a directory structure, find all relevant
files and invoke a custom check on the files.
"""
def RunOnPath(self, path):
"""Runs processor on all files under the given path."""
all_files = []
for file in self.GetPathsToSearch():
all_files += self.FindFilesIn(join(path, file))
return self.ProcessFiles(all_files)
def RunOnFiles(self, files):
"""Runs processor only on affected files."""
# Helper for getting directory pieces.
dirs = lambda f: dirname(f).split(os.sep)
# Path offsets where to look (to be in sync with RunOnPath).
# Normalize '.' to check for it with str.startswith.
search_paths = [('' if p == '.' else p) for p in self.GetPathsToSearch()]
all_files = [
f.AbsoluteLocalPath()
for f in files
if (not self.IgnoreFile(f.LocalPath()) and
self.IsRelevant(f.LocalPath()) and
all(not self.IgnoreDir(d) for d in dirs(f.LocalPath())) and
any(map(f.LocalPath().startswith, search_paths)))
]
return self.ProcessFiles(all_files)
def IgnoreDir(self, name):
return (name.startswith('.') or
name in ('buildtools', 'data', 'gmock', 'gtest', 'kraken',
'octane', 'sunspider', 'traces-arm64'))
def IgnoreFile(self, name):
return name.startswith('.')
def FindFilesIn(self, path):
result = []
for (root, dirs, files) in os.walk(path):
for ignored in [x for x in dirs if self.IgnoreDir(x)]:
dirs.remove(ignored)
for file in files:
if not self.IgnoreFile(file) and self.IsRelevant(file):
result.append(join(root, file))
return result
class CacheableSourceFileProcessor(SourceFileProcessor):
"""Utility class that allows caching ProcessFiles() method calls.
In order to use it, create a ProcessFilesWithoutCaching method that returns
the files requiring intervention after processing the source files.
"""
def __init__(self, use_cache, cache_file_path, file_type):
self.use_cache = use_cache
self.cache_file_path = cache_file_path
self.file_type = file_type
def GetProcessorWorker(self):
"""Expected to return the worker function to run the formatter."""
raise NotImplementedError
def GetProcessorScript(self):
"""Expected to return a tuple
(path to the format processor script, list of arguments)."""
raise NotImplementedError
def GetProcessorCommand(self):
format_processor, options = self.GetProcessorScript()
if not format_processor:
print('Could not find the formatter for % files' % self.file_type)
sys.exit(1)
command = [sys.executable, format_processor]
command.extend(options)
return command
def ProcessFiles(self, files):
if self.use_cache:
cache = FileContentsCache(self.cache_file_path)
cache.Load()
files = cache.FilterUnchangedFiles(files)
if len(files) == 0:
print('No changes in %s files detected. Skipping check' % self.file_type)
return True
files_requiring_changes = self.DetectFilesToChange(files)
print (
'Total %s files found that require formatting: %d' %
(self.file_type, len(files_requiring_changes)))
if self.use_cache:
for file in files_requiring_changes:
cache.RemoveFile(file)
cache.Save()
return files_requiring_changes == []
def DetectFilesToChange(self, files):
command = self.GetProcessorCommand()
worker = self.GetProcessorWorker()
commands = [command + [file] for file in files]
count = multiprocessing.cpu_count()
pool = multiprocessing.Pool(count)
try:
results = pool.map_async(worker, commands).get(timeout=240)
except KeyboardInterrupt:
print("\nCaught KeyboardInterrupt, terminating workers.")
pool.terminate()
pool.join()
sys.exit(1)
unformatted_files = []
for index, errors in enumerate(results):
if errors > 0:
unformatted_files.append(files[index])
return unformatted_files
class CppLintProcessor(CacheableSourceFileProcessor):
"""
Lint files to check that they follow the google code style.
"""
def __init__(self, use_cache=True):
super(CppLintProcessor, self).__init__(
use_cache=use_cache, cache_file_path='.cpplint-cache', file_type='C/C++')
def IsRelevant(self, name):
return name.endswith('.cc') or name.endswith('.h')
def IgnoreDir(self, name):
return (super(CppLintProcessor, self).IgnoreDir(name)
or (name == 'third_party'))
IGNORE_LINT = ['export-template.h', 'flag-definitions.h']
def IgnoreFile(self, name):
return (super(CppLintProcessor, self).IgnoreFile(name)
or (name in CppLintProcessor.IGNORE_LINT))
def GetPathsToSearch(self):
dirs = ['include', 'samples', 'src']
test_dirs = ['cctest', 'common', 'fuzzer', 'inspector', 'unittests']
return dirs + [join('test', dir) for dir in test_dirs]
def GetProcessorWorker(self):
return CppLintWorker
def GetProcessorScript(self):
filters = ','.join([n for n in LINT_RULES])
arguments = ['--filter', filters]
for path in [TOOLS_PATH] + os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
cpplint = os.path.join(path, 'cpplint.py')
if os.path.isfile(cpplint):
return cpplint, arguments
return None, arguments
class TorqueLintProcessor(CacheableSourceFileProcessor):
"""
Check .tq files to verify they follow the Torque style guide.
"""
def __init__(self, use_cache=True):
super(TorqueLintProcessor, self).__init__(
use_cache=use_cache, cache_file_path='.torquelint-cache',
file_type='Torque')
def IsRelevant(self, name):
return name.endswith('.tq')
def GetPathsToSearch(self):
dirs = ['third_party', 'src']
test_dirs = ['torque']
return dirs + [join('test', dir) for dir in test_dirs]
def GetProcessorWorker(self):
return TorqueLintWorker
def GetProcessorScript(self):
torque_tools = os.path.join(TOOLS_PATH, "torque")
torque_path = os.path.join(torque_tools, "format-torque.py")
arguments = ["-il"]
if os.path.isfile(torque_path):
return torque_path, arguments
return None, arguments
COPYRIGHT_HEADER_PATTERN = re.compile(
r'Copyright [\d-]*20[0-1][0-9] the V8 project authors. All rights reserved.')
class SourceProcessor(SourceFileProcessor):
"""
Check that all files include a copyright notice and no trailing whitespaces.
"""
RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', '.status', '.tq', '.g4']
def __init__(self):
self.runtime_function_call_pattern = self.CreateRuntimeFunctionCallMatcher()
def CreateRuntimeFunctionCallMatcher(self):
runtime_h_path = join(dirname(TOOLS_PATH), 'src/runtime/runtime.h')
pattern = re.compile(r'\s+F\(([^,]*),.*\)')
runtime_functions = []
with open(runtime_h_path) as f:
for line in f.readlines():
m = pattern.match(line)
if m:
runtime_functions.append(m.group(1))
if len(runtime_functions) < 250:
print ("Runtime functions list is suspiciously short. "
"Consider updating the presubmit script.")
sys.exit(1)
str = '(\%\s+(' + '|'.join(runtime_functions) + '))[\s\(]'
return re.compile(str)
# Overwriting the one in the parent class.
def FindFilesIn(self, path):
if os.path.exists(path+'/.git'):
output = subprocess.Popen('git ls-files --full-name',
stdout=PIPE, cwd=path, shell=True)
result = []
for file in output.stdout.read().split():
for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'):
if self.IgnoreDir(dir_part):
break
else:
if (self.IsRelevant(file) and os.path.exists(file)
and not self.IgnoreFile(file)):
result.append(join(path, file))
if output.wait() == 0:
return result
return super(SourceProcessor, self).FindFilesIn(path)
def IsRelevant(self, name):
for ext in SourceProcessor.RELEVANT_EXTENSIONS:
if name.endswith(ext):
return True
return False
def GetPathsToSearch(self):
return ['.']
def IgnoreDir(self, name):
return (super(SourceProcessor, self).IgnoreDir(name) or
name in ('third_party', 'out', 'obj', 'DerivedSources'))
IGNORE_COPYRIGHTS = ['box2d.js',
'cpplint.py',
'copy.js',
'corrections.js',
'crypto.js',
'daemon.py',
'earley-boyer.js',
'fannkuch.js',
'fasta.js',
'injected-script.cc',
'injected-script.h',
'libraries.cc',
'libraries-empty.cc',
'lua_binarytrees.js',
'meta-123.js',
'memops.js',
'poppler.js',
'primes.js',
'raytrace.js',
'regexp-pcre.js',
'resources-123.js',
'sqlite.js',
'sqlite-change-heap.js',
'sqlite-pointer-masking.js',
'sqlite-safe-heap.js',
'v8-debugger-script.h',
'v8-inspector-impl.cc',
'v8-inspector-impl.h',
'v8-runtime-agent-impl.cc',
'v8-runtime-agent-impl.h',
'gnuplot-4.6.3-emscripten.js',
'zlib.js']
IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
IGNORE_COPYRIGHTS_DIRECTORY = "test/test262/local-tests"
def EndOfDeclaration(self, line):
return line == "}" or line == "};"
def StartOfDeclaration(self, line):
return line.find("//") == 0 or \
line.find("/*") == 0 or \
line.find(") {") != -1
def ProcessContents(self, name, contents):
result = True
base = basename(name)
if not base in SourceProcessor.IGNORE_TABS:
if '\t' in contents:
print("%s contains tabs" % name)
result = False
if not base in SourceProcessor.IGNORE_COPYRIGHTS and \
not SourceProcessor.IGNORE_COPYRIGHTS_DIRECTORY in name:
if not COPYRIGHT_HEADER_PATTERN.search(contents):
print("%s is missing a correct copyright header." % name)
result = False
if ' \n' in contents or contents.endswith(' '):
line = 0
lines = []
parts = contents.split(' \n')
if not contents.endswith(' '):
parts.pop()
for part in parts:
line += part.count('\n') + 1
lines.append(str(line))
linenumbers = ', '.join(lines)
if len(lines) > 1:
print("%s has trailing whitespaces in lines %s." % (name, linenumbers))
else:
print("%s has trailing whitespaces in line %s." % (name, linenumbers))
result = False
if not contents.endswith('\n') or contents.endswith('\n\n'):
print("%s does not end with a single new line." % name)
result = False
# Sanitize flags for fuzzer.
if ".js" in name and ("mjsunit" in name or "debugger" in name):
match = FLAGS_LINE.search(contents)
if match:
print("%s Flags should use '-' (not '_')" % name)
result = False
if (not "mjsunit/mjsunit.js" in name and
not "mjsunit/mjsunit_numfuzz.js" in name):
if ASSERT_OPTIMIZED_PATTERN.search(contents) and \
not FLAGS_ENABLE_OPT.search(contents):
print("%s Flag --opt should be set if " \
"assertOptimized() is used" % name)
result = False
if ASSERT_UNOPTIMIZED_PATTERN.search(contents) and \
not FLAGS_NO_ALWAYS_OPT.search(contents):
print("%s Flag --no-always-opt should be set if " \
"assertUnoptimized() is used" % name)
result = False
match = self.runtime_function_call_pattern.search(contents)
if match:
print("%s has unexpected spaces in a runtime call '%s'" % (name, match.group(1)))
result = False
return result
def ProcessFiles(self, files):
success = True
violations = 0
for file in files:
try:
handle = open(file)
contents = handle.read()
if len(contents) > 0 and not self.ProcessContents(file, contents):
success = False
violations += 1
finally:
handle.close()
print("Total violating files: %s" % violations)
return success
def _CheckStatusFileForDuplicateKeys(filepath):
comma_space_bracket = re.compile(", *]")
lines = []
with open(filepath) as f:
for line in f.readlines():
# Skip all-comment lines.
if line.lstrip().startswith("#"): continue
# Strip away comments at the end of the line.
comment_start = line.find("#")
if comment_start != -1:
line = line[:comment_start]
line = line.strip()
# Strip away trailing commas within the line.
line = comma_space_bracket.sub("]", line)
if len(line) > 0:
lines.append(line)
# Strip away trailing commas at line ends. Ugh.
for i in range(len(lines) - 1):
if (lines[i].endswith(",") and len(lines[i + 1]) > 0 and
lines[i + 1][0] in ("}", "]")):
lines[i] = lines[i][:-1]
contents = "\n".join(lines)
# JSON wants double-quotes.
contents = contents.replace("'", '"')
# Fill in keywords (like PASS, SKIP).
for key in statusfile.KEYWORDS:
contents = re.sub(r"\b%s\b" % key, "\"%s\"" % key, contents)
status = {"success": True}
def check_pairs(pairs):
keys = {}
for key, value in pairs:
if key in keys:
print("%s: Error: duplicate key %s" % (filepath, key))
status["success"] = False
keys[key] = True
json.loads(contents, object_pairs_hook=check_pairs)
return status["success"]
class StatusFilesProcessor(SourceFileProcessor):
"""Checks status files for incorrect syntax and duplicate keys."""
def IsRelevant(self, name):
# Several changes to files under the test directories could impact status
# files.
return True
def GetPathsToSearch(self):
return ['test', 'tools/testrunner']
def ProcessFiles(self, files):
success = True
for status_file_path in sorted(self._GetStatusFiles(files)):
success &= statusfile.PresubmitCheck(status_file_path)
success &= _CheckStatusFileForDuplicateKeys(status_file_path)
return success
def _GetStatusFiles(self, files):
test_path = join(dirname(TOOLS_PATH), 'test')
testrunner_path = join(TOOLS_PATH, 'testrunner')
status_files = set()
for file_path in files:
if file_path.startswith(testrunner_path):
for suitepath in os.listdir(test_path):
suitename = os.path.basename(suitepath)
status_file = os.path.join(
test_path, suitename, suitename + ".status")
if os.path.exists(status_file):
status_files.add(status_file)
return status_files
for file_path in files:
if file_path.startswith(test_path):
# Strip off absolute path prefix pointing to test suites.
pieces = file_path[len(test_path):].lstrip(os.sep).split(os.sep)
if pieces:
# Infer affected status file name. Only care for existing status
# files. Some directories under "test" don't have any.
if not os.path.isdir(join(test_path, pieces[0])):
continue
status_file = join(test_path, pieces[0], pieces[0] + ".status")
if not os.path.exists(status_file):
continue
status_files.add(status_file)
return status_files
def CheckDeps(workspace):
checkdeps_py = join(workspace, 'buildtools', 'checkdeps', 'checkdeps.py')
return subprocess.call([sys.executable, checkdeps_py, workspace]) == 0
def PyTests(workspace):
result = True
for script in [
join(workspace, 'tools', 'clusterfuzz', 'v8_foozzie_test.py'),
join(workspace, 'tools', 'release', 'test_scripts.py'),
join(workspace, 'tools', 'unittests', 'run_tests_test.py'),
join(workspace, 'tools', 'unittests', 'run_perf_test.py'),
join(workspace, 'tools', 'testrunner', 'testproc', 'variant_unittest.py'),
]:
print('Running ' + script)
result &= subprocess.call(
[sys.executable, script], stdout=subprocess.PIPE) == 0
return result
def GetOptions():
result = optparse.OptionParser()
result.add_option('--no-lint', help="Do not run cpplint", default=False,
action="store_true")
result.add_option('--no-linter-cache', help="Do not cache linter results",
default=False, action="store_true")
return result
def Main():
workspace = abspath(join(dirname(sys.argv[0]), '..'))
parser = GetOptions()
(options, args) = parser.parse_args()
success = True
print("Running checkdeps...")
success &= CheckDeps(workspace)
use_linter_cache = not options.no_linter_cache
if not options.no_lint:
print("Running C++ lint check...")
success &= CppLintProcessor(use_cache=use_linter_cache).RunOnPath(workspace)
print("Running Torque formatting check...")
success &= TorqueLintProcessor(use_cache=use_linter_cache).RunOnPath(
workspace)
print("Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check...")
success &= SourceProcessor().RunOnPath(workspace)
print("Running status-files check...")
success &= StatusFilesProcessor().RunOnPath(workspace)
print("Running python tests...")
success &= PyTests(workspace)
if success:
return 0
else:
return 1
if __name__ == '__main__':
sys.exit(Main())
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.